diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/.changelog/unreleased/breaking-changes/211-deprecate-tmhome.md b/.changelog/unreleased/breaking-changes/211-deprecate-tmhome.md deleted file mode 100644 index 547fc4e2d54..00000000000 --- a/.changelog/unreleased/breaking-changes/211-deprecate-tmhome.md +++ /dev/null @@ -1,2 +0,0 @@ -- The `TMHOME` environment variable was renamed to `CMTHOME`, and all environment variables starting with `TM_` are instead prefixed with `CMT_` - ([\#211](https://github.com/cometbft/cometbft/issues/211)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/260-remove-priority-mempool.md b/.changelog/unreleased/breaking-changes/260-remove-priority-mempool.md deleted file mode 100644 index 5800158653e..00000000000 --- a/.changelog/unreleased/breaking-changes/260-remove-priority-mempool.md +++ /dev/null @@ -1,6 +0,0 @@ -- [mempool] Remove priority mempool. - ([\#260](https://github.com/cometbft/cometbft/issues/260)) -- [config] Remove `Version` field from `MempoolConfig`. - ([\#260](https://github.com/cometbft/cometbft/issues/260)) -- [protobuf] Remove fields `sender`, `priority`, and `mempool_error` from - `ResponseCheckTx`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/385-update-to-go1.20.md b/.changelog/unreleased/breaking-changes/385-update-to-go1.20.md deleted file mode 100644 index 5e9ea3386ac..00000000000 --- a/.changelog/unreleased/breaking-changes/385-update-to-go1.20.md +++ /dev/null @@ -1,2 +0,0 @@ -- Bump minimum Go version to 1.20 - ([\#385](https://github.com/cometbft/cometbft/issues/385)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/6498-signerharness-set-home-dir.md b/.changelog/unreleased/breaking-changes/6498-signerharness-set-home-dir.md deleted file mode 100644 index 19beb3c9b38..00000000000 --- a/.changelog/unreleased/breaking-changes/6498-signerharness-set-home-dir.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[tools/tm-signer-harness]` Set OS home dir to instead of the hardcoded PATH. - ([\#6498](https://github.com/tendermint/tendermint/pull/6498)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/386-quick-fix-needproofblock.md b/.changelog/unreleased/bug-fixes/386-quick-fix-needproofblock.md deleted file mode 100644 index 2180086ce97..00000000000 --- a/.changelog/unreleased/bug-fixes/386-quick-fix-needproofblock.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` ([\#386](https://github.com/cometbft/cometbft/pull/386)) Short-term fix for the case when `needProofBlock` cannot find previous block meta by defaulting to the creation of a new proof block. (@adizere) - - Special thanks to the [Vega.xyz](https://vega.xyz/) team, and in particular to Zohar (@ze97286), for reporting the problem and working with us to get to a fix. diff --git a/.changelog/unreleased/bug-fixes/4-busy-loop-send-block-part.md b/.changelog/unreleased/bug-fixes/4-busy-loop-send-block-part.md deleted file mode 100644 index 59bda2afc3e..00000000000 --- a/.changelog/unreleased/bug-fixes/4-busy-loop-send-block-part.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Fixed a busy loop that happened when sending of a block part failed by sleeping in case of error. - ([\#4](https://github.com/informalsystems/tendermint/pull/4)) \ No newline at end of file diff --git a/.changelog/unreleased/dependencies/4059-update-cometbft-db.md b/.changelog/unreleased/dependencies/4059-update-cometbft-db.md new file mode 100644 index 00000000000..22900e5e765 --- /dev/null +++ b/.changelog/unreleased/dependencies/4059-update-cometbft-db.md @@ -0,0 +1,4 @@ +- `[go/runtime]` Bump Go version to 1.22 + ([\#4073](https://github.com/cometbft/cometbft/pull/4073)) +- Bump cometbft-db version to v0.12.0 + ([\#4073](https://github.com/cometbft/cometbft/pull/4073)) diff --git a/.changelog/unreleased/features/9680-config-introduce-bootstrappeers.md b/.changelog/unreleased/features/9680-config-introduce-bootstrappeers.md deleted file mode 100644 index 538429f3a24..00000000000 --- a/.changelog/unreleased/features/9680-config-introduce-bootstrappeers.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[config]` Introduce `BootstrapPeers` to the config to allow - nodes to list peers to be added to the addressbook upon start up. - ([\#9680](https://github.com/tendermint/tendermint/pull/9680)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/136-remove-tm-signer-harness.md b/.changelog/unreleased/improvements/136-remove-tm-signer-harness.md deleted file mode 100644 index 6eb6c2158c2..00000000000 --- a/.changelog/unreleased/improvements/136-remove-tm-signer-harness.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[tools/tm-signer-harness]` Remove the folder as it is unused - ([\#136](https://github.com/cometbft/cometbft/issues/136)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/56-rpc-cache-rpc-responses.md b/.changelog/unreleased/improvements/56-rpc-cache-rpc-responses.md deleted file mode 100644 index 344b3df93b9..00000000000 --- a/.changelog/unreleased/improvements/56-rpc-cache-rpc-responses.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[e2e]` Add functionality for uncoordinated (minor) upgrades - ([\#56](https://github.com/tendermint/tendermint/pull/56)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/9650-rpc-cache-rpc-responses.md b/.changelog/unreleased/improvements/9650-rpc-cache-rpc-responses.md deleted file mode 100644 index 3b6b5034726..00000000000 --- a/.changelog/unreleased/improvements/9650-rpc-cache-rpc-responses.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[rpc]` Enable caching of RPC responses - ([\#9650](https://github.com/tendermint/tendermint/pull/9650)) \ No newline at end of file diff --git a/.changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode b/.changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode.md similarity index 100% rename from .changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode rename to .changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode.md diff --git a/.changelog/v0.38.0/breaking-changes/1057-bootstrap-state-api.md b/.changelog/v0.38.0/breaking-changes/1057-bootstrap-state-api.md new file mode 100644 index 00000000000..dec9de27bfa --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/1057-bootstrap-state-api.md @@ -0,0 +1,4 @@ +- `[node/state]` Add Go API to bootstrap block store and state store to a height. Make sure block sync starts syncing from bootstrapped height. + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) +- `[state/store]` Added Go functions to save height at which offline state sync is performed. + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@jmalicevic) diff --git a/.changelog/v0.38.0/breaking-changes/1113-rm-upnp.md b/.changelog/v0.38.0/breaking-changes/1113-rm-upnp.md new file mode 100644 index 00000000000..bb95f20c082 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/1113-rm-upnp.md @@ -0,0 +1,2 @@ +- `[p2p]` Remove UPnP functionality + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) \ No newline at end of file diff --git a/.changelog/v0.38.0/breaking-changes/1120-node-api-cleanup.md b/.changelog/v0.38.0/breaking-changes/1120-node-api-cleanup.md new file mode 100644 index 00000000000..8c049370997 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/1120-node-api-cleanup.md @@ -0,0 +1,3 @@ +- `[node]` Removed `ConsensusState()` accessor from `Node` + struct - all access to consensus state should go via the reactor + ([\#1120](https://github.com/cometbft/cometbft/pull/1120)) diff --git a/.changelog/v0.38.0/breaking-changes/1270-executor_extend_vote.md b/.changelog/v0.38.0/breaking-changes/1270-executor_extend_vote.md new file mode 100644 index 00000000000..975c8f8232e --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/1270-executor_extend_vote.md @@ -0,0 +1,3 @@ +- `[state]` Signature of `ExtendVote` changed in `BlockExecutor`. + It now includes the block whose precommit will be extended, an the state object. + ([\#1270](https://github.com/cometbft/cometbft/pull/1270)) diff --git a/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-config.md b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-config.md new file mode 100644 index 00000000000..c989879b9e4 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-config.md @@ -0,0 +1,2 @@ +- `[config]` Remove `Version` field from `MempoolConfig`. + ([\#260](https://github.com/cometbft/cometbft/issues/260)) diff --git a/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-proto.md b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-proto.md new file mode 100644 index 00000000000..042001178b3 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool-proto.md @@ -0,0 +1,2 @@ +- `[protobuf]` Remove fields `sender`, `priority`, and `mempool_error` from + `ResponseCheckTx`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) diff --git a/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool.md b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool.md new file mode 100644 index 00000000000..e76a567afeb --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/260-remove-priority-mempool.md @@ -0,0 +1,2 @@ +- `[mempool]` Remove priority mempool. + ([\#260](https://github.com/cometbft/cometbft/issues/260)) diff --git a/.changelog/v0.38.0/breaking-changes/558-tm10011.md b/.changelog/v0.38.0/breaking-changes/558-tm10011.md new file mode 100644 index 00000000000..d1b9fca4aba --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/558-tm10011.md @@ -0,0 +1,2 @@ +- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic + ([\#558](https://github.com/cometbft/cometbft/issues/558)) diff --git a/.changelog/unreleased/breaking-changes/6541-state-move-pruneblocks-execution.md b/.changelog/v0.38.0/breaking-changes/6541-state-move-pruneblocks-execution.md similarity index 100% rename from .changelog/unreleased/breaking-changes/6541-state-move-pruneblocks-execution.md rename to .changelog/v0.38.0/breaking-changes/6541-state-move-pruneblocks-execution.md diff --git a/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type copy.md b/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type copy.md new file mode 100644 index 00000000000..4e8fcb82295 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type copy.md @@ -0,0 +1,3 @@ +- `[rpc]` Removed `begin_block_events` and `end_block_events` from `BlockResultsResponse`. + The events are merged into one field called `finalize_block_events`. + ([\#9427](https://github.com/tendermint/tendermint/issues/9427)) diff --git a/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type.md b/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type.md new file mode 100644 index 00000000000..22a3b9fc5a3 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/774-state-indexerevent-remove-function-type.md @@ -0,0 +1,3 @@ +- `[state/kvindexer]` Remove the function type from the event key stored in the database. This should be breaking only +for people who forked CometBFT and interact directly with the indexers kvstore. + ([\#774](https://github.com/cometbft/cometbft/pull/774)) \ No newline at end of file diff --git a/.changelog/v0.38.0/breaking-changes/797-kvindexer-support-for-big-numbers.md b/.changelog/v0.38.0/breaking-changes/797-kvindexer-support-for-big-numbers.md new file mode 100644 index 00000000000..e64e2775d34 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/797-kvindexer-support-for-big-numbers.md @@ -0,0 +1,3 @@ +- `[kvindexer]` Added support for big integers and big floats in the kvindexer. + Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) diff --git a/.changelog/v0.38.0/breaking-changes/797-pubsub-support-for-big-numbers.md b/.changelog/v0.38.0/breaking-changes/797-pubsub-support-for-big-numbers.md new file mode 100644 index 00000000000..33778282eaf --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/797-pubsub-support-for-big-numbers.md @@ -0,0 +1,3 @@ +- `[pubsub]` Added support for big integers and big floats in the pubsub event query system. + Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) diff --git a/.changelog/unreleased/breaking-changes/8664-move-app-hash-to-commit.md b/.changelog/v0.38.0/breaking-changes/8664-move-app-hash-to-commit.md similarity index 85% rename from .changelog/unreleased/breaking-changes/8664-move-app-hash-to-commit.md rename to .changelog/v0.38.0/breaking-changes/8664-move-app-hash-to-commit.md index cbc4ba25a18..a22b3926477 100644 --- a/.changelog/unreleased/breaking-changes/8664-move-app-hash-to-commit.md +++ b/.changelog/v0.38.0/breaking-changes/8664-move-app-hash-to-commit.md @@ -1,2 +1,2 @@ -- `[abci]` Move `app_hash` parameter from `Commit` to `FinalizeBlock` (@sergio-mena) - ([\#8664](https://github.com/tendermint/tendermint/pull/8664)) \ No newline at end of file +- `[abci]` Move `app_hash` parameter from `Commit` to `FinalizeBlock` + ([\#8664](https://github.com/tendermint/tendermint/pull/8664)) diff --git a/.changelog/unreleased/breaking-changes/9468-finalize-block.md b/.changelog/v0.38.0/breaking-changes/9468-finalize-block.md similarity index 65% rename from .changelog/unreleased/breaking-changes/9468-finalize-block.md rename to .changelog/v0.38.0/breaking-changes/9468-finalize-block.md index 7bf64790dc8..e27f65e55e5 100644 --- a/.changelog/unreleased/breaking-changes/9468-finalize-block.md +++ b/.changelog/v0.38.0/breaking-changes/9468-finalize-block.md @@ -1,2 +1,3 @@ -- `[abci]` Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` and `EndBlock` into a single method call (@cmwaters) - ([\#9468](https://github.com/tendermint/tendermint/pull/9468)) \ No newline at end of file +- `[abci]` Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` + and `EndBlock` into a single method call + ([\#9468](https://github.com/tendermint/tendermint/pull/9468)) diff --git a/.changelog/unreleased/breaking-changes/9625-p2p-remove-trust-package.md b/.changelog/v0.38.0/breaking-changes/9625-p2p-remove-trust-package.md similarity index 100% rename from .changelog/unreleased/breaking-changes/9625-p2p-remove-trust-package.md rename to .changelog/v0.38.0/breaking-changes/9625-p2p-remove-trust-package.md diff --git a/.changelog/unreleased/breaking-changes/9655-inspect-add-command.md b/.changelog/v0.38.0/breaking-changes/9655-inspect-add-command.md similarity index 100% rename from .changelog/unreleased/breaking-changes/9655-inspect-add-command.md rename to .changelog/v0.38.0/breaking-changes/9655-inspect-add-command.md diff --git a/.changelog/unreleased/breaking-changes/9655-node-move-DB-vars-config.md b/.changelog/v0.38.0/breaking-changes/9655-node-move-DB-vars-config.md similarity index 100% rename from .changelog/unreleased/breaking-changes/9655-node-move-DB-vars-config.md rename to .changelog/v0.38.0/breaking-changes/9655-node-move-DB-vars-config.md diff --git a/.changelog/unreleased/breaking-changes/9655-rpc-remove-environment-var.md b/.changelog/v0.38.0/breaking-changes/9655-rpc-remove-environment-var.md similarity index 100% rename from .changelog/unreleased/breaking-changes/9655-rpc-remove-environment-var.md rename to .changelog/v0.38.0/breaking-changes/9655-rpc-remove-environment-var.md diff --git a/.changelog/unreleased/breaking-changes/9682-metrics-refactor-state-block-synching.md b/.changelog/v0.38.0/breaking-changes/9682-metrics-refactor-state-block-synching.md similarity index 100% rename from .changelog/unreleased/breaking-changes/9682-metrics-refactor-state-block-synching.md rename to .changelog/v0.38.0/breaking-changes/9682-metrics-refactor-state-block-synching.md diff --git a/.changelog/v0.38.0/breaking-changes/980-max-size-more-control.md b/.changelog/v0.38.0/breaking-changes/980-max-size-more-control.md new file mode 100644 index 00000000000..e4354e3cb05 --- /dev/null +++ b/.changelog/v0.38.0/breaking-changes/980-max-size-more-control.md @@ -0,0 +1,9 @@ +- `[mempool]` Application can now set `ConsensusParams.Block.MaxBytes` to -1 + to have visibility on all transactions in the + mempool at `PrepareProposal` time. + This means that the total size of transactions sent via `RequestPrepareProposal` + might exceed `RequestPrepareProposal.max_tx_bytes`. + If that is the case, the application MUST make sure that the total size of transactions + returned in `ResponsePrepareProposal.txs` does not exceed `RequestPrepareProposal.max_tx_bytes`, + otherwise CometBFT will panic. + ([\#980](https://github.com/cometbft/cometbft/issues/980)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/423-forwardport-default-kvindexer-behaviour.md b/.changelog/v0.38.0/bug-fixes/423-forwardport-default-kvindexer-behaviour.md similarity index 100% rename from .changelog/unreleased/bug-fixes/423-forwardport-default-kvindexer-behaviour.md rename to .changelog/v0.38.0/bug-fixes/423-forwardport-default-kvindexer-behaviour.md diff --git a/.changelog/unreleased/bug-fixes/496-error-on-applyblock-should-panic.md b/.changelog/v0.38.0/bug-fixes/496-error-on-applyblock-should-panic.md similarity index 100% rename from .changelog/unreleased/bug-fixes/496-error-on-applyblock-should-panic.md rename to .changelog/v0.38.0/bug-fixes/496-error-on-applyblock-should-panic.md diff --git a/.changelog/unreleased/bug-fixes/524-rename-peerstate-tojson.md b/.changelog/v0.38.0/bug-fixes/524-rename-peerstate-tojson.md similarity index 100% rename from .changelog/unreleased/bug-fixes/524-rename-peerstate-tojson.md rename to .changelog/v0.38.0/bug-fixes/524-rename-peerstate-tojson.md diff --git a/.changelog/v0.38.0/bug-fixes/575-fix-light-client-panic.md b/.changelog/v0.38.0/bug-fixes/575-fix-light-client-panic.md new file mode 100644 index 00000000000..0ec55b923fb --- /dev/null +++ b/.changelog/v0.38.0/bug-fixes/575-fix-light-client-panic.md @@ -0,0 +1,6 @@ +- `[light]` Fixed an edge case where a light client would panic when attempting + to query a node that (1) has started from a non-zero height and (2) does + not yet have any data. The light client will now, correctly, not panic + _and_ keep the node in its list of providers in the same way it would if + it queried a node starting from height zero that does not yet have data + ([\#575](https://github.com/cometbft/cometbft/issues/575)) \ No newline at end of file diff --git a/.changelog/v0.38.0/bug-fixes/855-snake-case-json-for-exec-tx-result-fields.md b/.changelog/v0.38.0/bug-fixes/855-snake-case-json-for-exec-tx-result-fields.md new file mode 100644 index 00000000000..27f5fe53512 --- /dev/null +++ b/.changelog/v0.38.0/bug-fixes/855-snake-case-json-for-exec-tx-result-fields.md @@ -0,0 +1,2 @@ +- `[abci]` Restore the snake_case naming in JSON serialization of + `ExecTxResult` ([\#855](https://github.com/cometbft/cometbft/issues/855)). diff --git a/.changelog/v0.38.0/bug-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.38.0/bug-fixes/865-fix-peerstate-marshaljson.md new file mode 100644 index 00000000000..318bda315c5 --- /dev/null +++ b/.changelog/v0.38.0/bug-fixes/865-fix-peerstate-marshaljson.md @@ -0,0 +1,2 @@ +- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON + ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.38.0/bug-fixes/890-mempool-fix-cache.md b/.changelog/v0.38.0/bug-fixes/890-mempool-fix-cache.md new file mode 100644 index 00000000000..78ade6f4c1c --- /dev/null +++ b/.changelog/v0.38.0/bug-fixes/890-mempool-fix-cache.md @@ -0,0 +1,2 @@ +- `[mempool/clist_mempool]` Prevent a transaction to appear twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/unreleased/bug-fixes/9462-docker-go-use-consistent-version.md b/.changelog/v0.38.0/bug-fixes/9462-docker-go-use-consistent-version.md similarity index 100% rename from .changelog/unreleased/bug-fixes/9462-docker-go-use-consistent-version.md rename to .changelog/v0.38.0/bug-fixes/9462-docker-go-use-consistent-version.md diff --git a/.changelog/unreleased/bug-fixes/9717-abci-cli-fix-help.md b/.changelog/v0.38.0/bug-fixes/9717-abci-cli-fix-help.md similarity index 100% rename from .changelog/unreleased/bug-fixes/9717-abci-cli-fix-help.md rename to .changelog/v0.38.0/bug-fixes/9717-abci-cli-fix-help.md diff --git a/.changelog/v0.38.0/deprecations/650-deprecate-grpc-broadcast-api.md b/.changelog/v0.38.0/deprecations/650-deprecate-grpc-broadcast-api.md new file mode 100644 index 00000000000..7ab623a6998 --- /dev/null +++ b/.changelog/v0.38.0/deprecations/650-deprecate-grpc-broadcast-api.md @@ -0,0 +1,4 @@ +- `[rpc/grpc]` Mark the gRPC broadcast API as deprecated. + It will be superseded by a broader API as part of + [\#81](https://github.com/cometbft/cometbft/issues/81) + ([\#650](https://github.com/cometbft/cometbft/issues/650)) \ No newline at end of file diff --git a/.changelog/v0.38.0/features/1057-bootstrap-state-api.md b/.changelog/v0.38.0/features/1057-bootstrap-state-api.md new file mode 100644 index 00000000000..ff3dcb6820a --- /dev/null +++ b/.changelog/v0.38.0/features/1057-bootstrap-state-api.md @@ -0,0 +1,2 @@ +- `[node/state]` Add Go API to bootstrap block store and state store to a height + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) \ No newline at end of file diff --git a/.changelog/v0.38.0/features/9830-proxy-introduce-newconnsynclocalclientcreator.md b/.changelog/v0.38.0/features/9830-proxy-introduce-newconnsynclocalclientcreator.md new file mode 100644 index 00000000000..a7c8a0f69fc --- /dev/null +++ b/.changelog/v0.38.0/features/9830-proxy-introduce-newconnsynclocalclientcreator.md @@ -0,0 +1,5 @@ +- `[proxy]` Introduce `NewConnSyncLocalClientCreator`, which allows local ABCI + clients to have the same concurrency model as remote clients (i.e. one mutex + per client "connection", for each of the four ABCI "connections"). + ([tendermint/tendermint\#9830](https://github.com/tendermint/tendermint/pull/9830) + and [\#1145](https://github.com/cometbft/cometbft/pull/1145)) diff --git a/.changelog/unreleased/features/9830-proxy-introduce-newunsynclocalclientcreator.md b/.changelog/v0.38.0/features/9830-proxy-introduce-newunsynclocalclientcreator.md similarity index 100% rename from .changelog/unreleased/features/9830-proxy-introduce-newunsynclocalclientcreator.md rename to .changelog/v0.38.0/features/9830-proxy-introduce-newunsynclocalclientcreator.md diff --git a/.changelog/v0.38.0/features/9836-abci-add-vote-extension.md b/.changelog/v0.38.0/features/9836-abci-add-vote-extension.md new file mode 100644 index 00000000000..4d8df79c720 --- /dev/null +++ b/.changelog/v0.38.0/features/9836-abci-add-vote-extension.md @@ -0,0 +1 @@ +- `[abci]` New ABCI methods `VerifyVoteExtension` and `ExtendVote` allow validators to validate the vote extension data attached to a pre-commit message and allow applications to let their validators do more than just validate within consensus ([\#9836](https://github.com/tendermint/tendermint/pull/9836)) diff --git a/.changelog/v0.38.0/improvements/1210-close-evidence-db.md b/.changelog/v0.38.0/improvements/1210-close-evidence-db.md new file mode 100644 index 00000000000..e32bc87dbe1 --- /dev/null +++ b/.changelog/v0.38.0/improvements/1210-close-evidence-db.md @@ -0,0 +1 @@ +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) diff --git a/.changelog/v0.38.0/improvements/1264-log-app-hash-as-hex.md b/.changelog/v0.38.0/improvements/1264-log-app-hash-as-hex.md new file mode 100644 index 00000000000..2e530c73895 --- /dev/null +++ b/.changelog/v0.38.0/improvements/1264-log-app-hash-as-hex.md @@ -0,0 +1,2 @@ +- `[state]` Make logging `block_app_hash` and `app_hash` consistent by logging them both as hex. + ([\#1264](https://github.com/cometbft/cometbft/pull/1264)) diff --git a/.changelog/unreleased/improvements/543-metrics-for-blocksync.md b/.changelog/v0.38.0/improvements/543-metrics-for-blocksync.md similarity index 100% rename from .changelog/unreleased/improvements/543-metrics-for-blocksync.md rename to .changelog/v0.38.0/improvements/543-metrics-for-blocksync.md diff --git a/.changelog/v0.38.0/improvements/638-json-rpc-error-message.md b/.changelog/v0.38.0/improvements/638-json-rpc-error-message.md new file mode 100644 index 00000000000..6922091fd25 --- /dev/null +++ b/.changelog/v0.38.0/improvements/638-json-rpc-error-message.md @@ -0,0 +1,3 @@ +- `[jsonrpc/client]` Improve the error message for client errors stemming from + bad HTTP responses. + ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) diff --git a/.changelog/unreleased/improvements/6443-merkle-hashalternatives-perf-improv-a.md b/.changelog/v0.38.0/improvements/6443-merkle-hashalternatives-perf-improv-a.md similarity index 100% rename from .changelog/unreleased/improvements/6443-merkle-hashalternatives-perf-improv-a.md rename to .changelog/v0.38.0/improvements/6443-merkle-hashalternatives-perf-improv-a.md diff --git a/.changelog/unreleased/improvements/6509-pex-addrbook-perf-improv.md b/.changelog/v0.38.0/improvements/6509-pex-addrbook-perf-improv.md similarity index 100% rename from .changelog/unreleased/improvements/6509-pex-addrbook-perf-improv.md rename to .changelog/v0.38.0/improvements/6509-pex-addrbook-perf-improv.md diff --git a/.changelog/unreleased/improvements/6513-merkle-hashalternatives-perf-improv-b.md b/.changelog/v0.38.0/improvements/6513-merkle-hashalternatives-perf-improv-b.md similarity index 100% rename from .changelog/unreleased/improvements/6513-merkle-hashalternatives-perf-improv-b.md rename to .changelog/v0.38.0/improvements/6513-merkle-hashalternatives-perf-improv-b.md diff --git a/.changelog/v0.38.0/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.38.0/improvements/654-rpc-rm-response-data-logs.md new file mode 100644 index 00000000000..3fddfee8e71 --- /dev/null +++ b/.changelog/v0.38.0/improvements/654-rpc-rm-response-data-logs.md @@ -0,0 +1,3 @@ +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/7319-pubsub-query-perf-improv.md b/.changelog/v0.38.0/improvements/7319-pubsub-query-perf-improv.md similarity index 100% rename from .changelog/unreleased/improvements/7319-pubsub-query-perf-improv.md rename to .changelog/v0.38.0/improvements/7319-pubsub-query-perf-improv.md diff --git a/.changelog/v0.38.0/improvements/797-pubsub-float.md b/.changelog/v0.38.0/improvements/797-pubsub-float.md new file mode 100644 index 00000000000..c3d1a878554 --- /dev/null +++ b/.changelog/v0.38.0/improvements/797-pubsub-float.md @@ -0,0 +1,3 @@ +- `[pubsub/kvindexer]` Numeric query conditions and event values are represented as big floats with default precision of 125. + Integers are read as "big ints" and represented with as many bits as they need when converting to floats. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) diff --git a/.changelog/v0.38.0/improvements/857-make-handshake-cancelable.md b/.changelog/v0.38.0/improvements/857-make-handshake-cancelable.md new file mode 100644 index 00000000000..16b447f6d23 --- /dev/null +++ b/.changelog/v0.38.0/improvements/857-make-handshake-cancelable.md @@ -0,0 +1 @@ +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) diff --git a/.changelog/v0.38.0/improvements/896-consensus-metric-duplicates.md b/.changelog/v0.38.0/improvements/896-consensus-metric-duplicates.md new file mode 100644 index 00000000000..5661da834a9 --- /dev/null +++ b/.changelog/v0.38.0/improvements/896-consensus-metric-duplicates.md @@ -0,0 +1,2 @@ +- `[consensus]` New metrics (counters) to track duplicate votes and block parts. + ([\#896](https://github.com/cometbft/cometbft/pull/896)) \ No newline at end of file diff --git a/.changelog/v0.38.0/improvements/980-max-size-more-control.md b/.changelog/v0.38.0/improvements/980-max-size-more-control.md new file mode 100644 index 00000000000..e319779984c --- /dev/null +++ b/.changelog/v0.38.0/improvements/980-max-size-more-control.md @@ -0,0 +1,5 @@ +- `[mempool]` Application can now set `ConsensusParams.Block.MaxBytes` to -1 + to gain more control on the max size of transactions in a block. + It also allows the application to have visibility on all transactions in the + mempool at `PrepareProposal` time. + ([\#980](https://github.com/cometbft/cometbft/pull/980)) \ No newline at end of file diff --git a/.changelog/v0.38.0/summary.md b/.changelog/v0.38.0/summary.md new file mode 100644 index 00000000000..f34c7c5f82b --- /dev/null +++ b/.changelog/v0.38.0/summary.md @@ -0,0 +1,13 @@ +*September 12, 2023* + +This release includes the second part of ABCI++, called ABCI 2.0. +ABCI 2.0 introduces ABCI methods `ExtendVote` and `VerifyVoteExtension`. +These new methods allow the application to add data (opaque to CometBFT), +called _vote extensions_ to precommit votes sent by validators. +These vote extensions are made available to the proposer(s) of the next height. +Additionally, ABCI 2.0 coalesces `BeginBlock`, `DeliverTx`, and `EndBlock` +into one method, `FinalizeBlock`, whose `Request*` and `Response*` +data structures contain the sum of all data previously contained +in the respective `Request*` and `Response*` data structures in +`BeginBlock`, `DeliverTx`, and `EndBlock`. +See the [specification](./spec/abci/) for more details on ABCI 2.0. diff --git a/.changelog/v0.38.1/bug-fixes/1529-indexer-respect-height-params-on-query.md b/.changelog/v0.38.1/bug-fixes/1529-indexer-respect-height-params-on-query.md new file mode 100644 index 00000000000..d12f3eda536 --- /dev/null +++ b/.changelog/v0.38.1/bug-fixes/1529-indexer-respect-height-params-on-query.md @@ -0,0 +1,2 @@ +- `[state/indexer]` Respect both height params while querying for events + ([\#1529](https://github.com/cometbft/cometbft/pull/1529)) diff --git a/.changelog/v0.38.1/features/1512-metric-mempool-size-bytes.md b/.changelog/v0.38.1/features/1512-metric-mempool-size-bytes.md new file mode 100644 index 00000000000..b935dc40842 --- /dev/null +++ b/.changelog/v0.38.1/features/1512-metric-mempool-size-bytes.md @@ -0,0 +1,2 @@ +- `[metrics]` Add metric for mempool size in bytes `SizeBytes`. + ([\#1512](https://github.com/cometbft/cometbft/pull/1512)) \ No newline at end of file diff --git a/.changelog/v0.38.1/improvements/1558-experimental-gossip-limiting.md b/.changelog/v0.38.1/improvements/1558-experimental-gossip-limiting.md new file mode 100644 index 00000000000..6931cef8274 --- /dev/null +++ b/.changelog/v0.38.1/improvements/1558-experimental-gossip-limiting.md @@ -0,0 +1,9 @@ +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) diff --git a/.changelog/v0.38.1/summary.md b/.changelog/v0.38.1/summary.md new file mode 100644 index 00000000000..f1e5c7f755c --- /dev/null +++ b/.changelog/v0.38.1/summary.md @@ -0,0 +1,5 @@ +*November 17, 2023* + +This release contains, among other things, an opt-in, experimental feature to +help reduce the bandwidth consumption associated with the mempool's transaction +gossip. diff --git a/.changelog/v0.38.10/bug-fixes/3002-invalid-txs-results.md b/.changelog/v0.38.10/bug-fixes/3002-invalid-txs-results.md new file mode 100644 index 00000000000..67742d67ec0 --- /dev/null +++ b/.changelog/v0.38.10/bug-fixes/3002-invalid-txs-results.md @@ -0,0 +1,3 @@ +- `[rpc]` Fix an issue where a legacy ABCI response, created on `v0.37` or before, is not returned properly in `v0.38` and up + on the `/block_results` RPC endpoint. + ([\#3002](https://github.com/cometbft/cometbft/issues/3002)) diff --git a/.changelog/v0.38.10/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md b/.changelog/v0.38.10/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md new file mode 100644 index 00000000000..909e6a56039 --- /dev/null +++ b/.changelog/v0.38.10/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md @@ -0,0 +1,3 @@ +- `[blocksync]` Do not stay in blocksync if the node's validator voting power + is high enough to block the chain while it is not online + ([\#3406](https://github.com/cometbft/cometbft/pull/3406)) diff --git a/.changelog/v0.38.10/bug-fixes/486-p2p-max-outbound.md b/.changelog/v0.38.10/bug-fixes/486-p2p-max-outbound.md new file mode 100644 index 00000000000..f6507ed9671 --- /dev/null +++ b/.changelog/v0.38.10/bug-fixes/486-p2p-max-outbound.md @@ -0,0 +1,3 @@ +- `[p2p]` Node respects configured `max_num_outbound_peers` limit when dialing + peers provided by a seed node + ([\#486](https://github.com/cometbft/cometbft/issues/486)) diff --git a/.changelog/v0.38.10/improvements/3382-single-send-monitor-per-packet.md b/.changelog/v0.38.10/improvements/3382-single-send-monitor-per-packet.md new file mode 100644 index 00000000000..efa5e3cc27f --- /dev/null +++ b/.changelog/v0.38.10/improvements/3382-single-send-monitor-per-packet.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Update send monitor, used for sending rate limiting, once per batch of packets sent + ([\#3382](https://github.com/cometbft/cometbft/pull/3382)) diff --git a/.changelog/v0.38.10/improvements/3401-allow-dash-in-event-tags.md b/.changelog/v0.38.10/improvements/3401-allow-dash-in-event-tags.md new file mode 100644 index 00000000000..6de79f5e09a --- /dev/null +++ b/.changelog/v0.38.10/improvements/3401-allow-dash-in-event-tags.md @@ -0,0 +1,2 @@ +- `[libs/pubsub]` Allow dash (`-`) in event tags + ([\#3401](https://github.com/cometbft/cometbft/issues/3401)) diff --git a/.changelog/v0.38.10/improvements/3403-remove-pool-buffer-usage-in-secretconn.md b/.changelog/v0.38.10/improvements/3403-remove-pool-buffer-usage-in-secretconn.md new file mode 100644 index 00000000000..4069a79ef3b --- /dev/null +++ b/.changelog/v0.38.10/improvements/3403-remove-pool-buffer-usage-in-secretconn.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Remove the usage of a synchronous pool of buffers in secret connection, storing instead the buffer in the connection struct. This reduces the synchronization primitive usage, speeding up the code. + ([\#3403](https://github.com/cometbft/cometbft/issues/3403)) diff --git a/.changelog/v0.38.10/summary.md b/.changelog/v0.38.10/summary.md new file mode 100644 index 00000000000..16f1fb18acc --- /dev/null +++ b/.changelog/v0.38.10/summary.md @@ -0,0 +1,5 @@ +*July 16, 2024* + +This release fixes a bug in `v0.38.x` that prevented ABCI responses from being +correctly read when upgrading from `v0.37.x` or below. It also includes a few other +bug fixes and performance improvements. diff --git a/.changelog/v0.38.11/bug-fixes/3565-extension-iff-enabled.md b/.changelog/v0.38.11/bug-fixes/3565-extension-iff-enabled.md new file mode 100644 index 00000000000..96f7859d35a --- /dev/null +++ b/.changelog/v0.38.11/bug-fixes/3565-extension-iff-enabled.md @@ -0,0 +1,2 @@ +- `[types]` Only check IFF vote is a non-nil Precommit if extensionsEnabled + types ([\#3565](https://github.com/cometbft/cometbft/issues/3565)) diff --git a/.changelog/v0.38.11/improvements/3544-indexer-break-statement.md b/.changelog/v0.38.11/improvements/3544-indexer-break-statement.md new file mode 100644 index 00000000000..ef83bdd514e --- /dev/null +++ b/.changelog/v0.38.11/improvements/3544-indexer-break-statement.md @@ -0,0 +1,3 @@ +- `[indexer]` Fixed ineffective select break statements; they now + point to their enclosing for loop label to exit + ([\#3544](https://github.com/cometbft/cometbft/issues/3544)) \ No newline at end of file diff --git a/.changelog/v0.38.11/summary.md b/.changelog/v0.38.11/summary.md new file mode 100644 index 00000000000..532517f6af7 --- /dev/null +++ b/.changelog/v0.38.11/summary.md @@ -0,0 +1,7 @@ +*August 12, 2024* + +This release fixes a panic in consensus where CometBFT would previously panic +if there's no extension signature in non-nil Precommit EVEN IF vote extensions +themselves are disabled. + +It also includes a few other bug fixes and performance improvements. diff --git a/.changelog/v0.38.12/bug-fixes/0016-abc-light-proposer-priorities.md b/.changelog/v0.38.12/bug-fixes/0016-abc-light-proposer-priorities.md new file mode 100644 index 00000000000..6915b51db30 --- /dev/null +++ b/.changelog/v0.38.12/bug-fixes/0016-abc-light-proposer-priorities.md @@ -0,0 +1,2 @@ +- `[light]` Cross-check proposer priorities in retrieved validator sets + ([\#ASA-2024-009](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) diff --git a/.changelog/v0.38.12/bug-fixes/3828-privval-drop-duplicate-listen.md b/.changelog/v0.38.12/bug-fixes/3828-privval-drop-duplicate-listen.md new file mode 100644 index 00000000000..3c3ad1f4b66 --- /dev/null +++ b/.changelog/v0.38.12/bug-fixes/3828-privval-drop-duplicate-listen.md @@ -0,0 +1 @@ +- `[privval]` Ignore duplicate privval listen when already connected ([\#3828](https://github.com/cometbft/cometbft/issues/3828) diff --git a/.changelog/v0.38.12/dependencies/3728-update-btcec-v2.md b/.changelog/v0.38.12/dependencies/3728-update-btcec-v2.md new file mode 100644 index 00000000000..361592b2c9c --- /dev/null +++ b/.changelog/v0.38.12/dependencies/3728-update-btcec-v2.md @@ -0,0 +1,4 @@ +- `[crypto/secp256k1]` Adjust to breaking interface changes in + `btcec/v2` latest release, while avoiding breaking changes to + local CometBFT functions + ([\#3728](https://github.com/cometbft/cometbft/pull/3728)) diff --git a/.changelog/v0.38.12/improvements/0016-abc-types-validator-set.md b/.changelog/v0.38.12/improvements/0016-abc-types-validator-set.md new file mode 100644 index 00000000000..b8eb2d6579e --- /dev/null +++ b/.changelog/v0.38.12/improvements/0016-abc-types-validator-set.md @@ -0,0 +1,2 @@ +- `[types]` Check that proposer is one of the validators in `ValidateBasic` + ([\#ASA-2024-009](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) diff --git a/.changelog/v0.38.12/improvements/3819-e2e-log-level.md b/.changelog/v0.38.12/improvements/3819-e2e-log-level.md new file mode 100644 index 00000000000..b0726fb319e --- /dev/null +++ b/.changelog/v0.38.12/improvements/3819-e2e-log-level.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `log_level` option to manifest file + ([#3819](https://github.com/cometbft/cometbft/pull/3819)). diff --git a/.changelog/v0.38.12/improvements/3836-e2e-log-format.md b/.changelog/v0.38.12/improvements/3836-e2e-log-format.md new file mode 100644 index 00000000000..6e1e50a8c4d --- /dev/null +++ b/.changelog/v0.38.12/improvements/3836-e2e-log-format.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `log_format` option to manifest file + ([#3836](https://github.com/cometbft/cometbft/issues/3836)). diff --git a/.changelog/v0.38.12/summary.md b/.changelog/v0.38.12/summary.md new file mode 100644 index 00000000000..0ed31926ac8 --- /dev/null +++ b/.changelog/v0.38.12/summary.md @@ -0,0 +1,4 @@ +*September 3, 2024* + +This release includes a security fix for the light client and is recommended +for all users. diff --git a/.changelog/v0.38.2/bug-fixes/1654-semaphore-wait.md b/.changelog/v0.38.2/bug-fixes/1654-semaphore-wait.md new file mode 100644 index 00000000000..9d0fb80adcc --- /dev/null +++ b/.changelog/v0.38.2/bug-fixes/1654-semaphore-wait.md @@ -0,0 +1,3 @@ +- `[mempool]` Avoid infinite wait in transaction sending routine when + using experimental parameters to limiting transaction gossiping to peers + ([\#1654](https://github.com/cometbft/cometbft/pull/1654)) \ No newline at end of file diff --git a/.changelog/v0.38.2/features/1643-nop-mempool.md b/.changelog/v0.38.2/features/1643-nop-mempool.md new file mode 100644 index 00000000000..e12ec43fc1a --- /dev/null +++ b/.changelog/v0.38.2/features/1643-nop-mempool.md @@ -0,0 +1,17 @@ +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)) + + If you want to use it, change mempool's `type` to `nop`: + + ```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" + ``` \ No newline at end of file diff --git a/.changelog/v0.38.2/summary.md b/.changelog/v0.38.2/summary.md new file mode 100644 index 00000000000..97d902edcc7 --- /dev/null +++ b/.changelog/v0.38.2/summary.md @@ -0,0 +1,6 @@ +*November 27, 2023* + +This release provides the **nop** mempool for applications that want to build their own mempool. +Using this mempool effectively disables all mempool functionality in CometBFT, including transaction dissemination and the `broadcast_tx_*` endpoints. + +Also fixes a small bug in the mempool for an experimental feature. diff --git a/.changelog/v0.38.3/bug-fixes/0000-asa-2024-001-fix-validate.md b/.changelog/v0.38.3/bug-fixes/0000-asa-2024-001-fix-validate.md new file mode 100644 index 00000000000..35340fe71cf --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/0000-asa-2024-001-fix-validate.md @@ -0,0 +1,2 @@ +- `[consensus]` Fix for "Validation of `VoteExtensionsEnableHeight` can cause chain halt" + ([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)) diff --git a/.changelog/v0.38.3/bug-fixes/1687-consensus-fix-block-validation.md b/.changelog/v0.38.3/bug-fixes/1687-consensus-fix-block-validation.md new file mode 100644 index 00000000000..778f0b538b4 --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/1687-consensus-fix-block-validation.md @@ -0,0 +1,3 @@ +- `[mempool]` The calculation method of tx size returned by calling proxyapp should be consistent with that of mempool + ([\#1687](https://github.com/cometbft/cometbft/pull/1687)) + diff --git a/.changelog/v0.38.3/bug-fixes/1749-light-client-attack-verify-all-sigs.md b/.changelog/v0.38.3/bug-fixes/1749-light-client-attack-verify-all-sigs.md new file mode 100644 index 00000000000..1115c4d195a --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/1749-light-client-attack-verify-all-sigs.md @@ -0,0 +1,4 @@ +- `[evidence]` When `VerifyCommitLight` & `VerifyCommitLightTrusting` are called as part + of evidence verification, all signatures present in the evidence must be verified + ([\#1749](https://github.com/cometbft/cometbft/pull/1749)) + diff --git a/.changelog/v0.38.3/bug-fixes/1825-false-on-nil-key.md b/.changelog/v0.38.3/bug-fixes/1825-false-on-nil-key.md new file mode 100644 index 00000000000..dcd466a39e7 --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/1825-false-on-nil-key.md @@ -0,0 +1,3 @@ +- `[crypto]` `SupportsBatchVerifier` returns false + if public key is nil instead of dereferencing nil. + ([\#1825](https://github.com/cometbft/cometbft/pull/1825)) \ No newline at end of file diff --git a/.changelog/v0.38.3/bug-fixes/1879-blocksync-wait-for-pool-routine.md b/.changelog/v0.38.3/bug-fixes/1879-blocksync-wait-for-pool-routine.md new file mode 100644 index 00000000000..e4122368287 --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/1879-blocksync-wait-for-pool-routine.md @@ -0,0 +1,2 @@ +- `[blocksync]` wait for `poolRoutine` to stop in `(*Reactor).OnStop` + ([\#1879](https://github.com/cometbft/cometbft/pull/1879)) diff --git a/.changelog/v0.38.3/bug-fixes/4019-mempool-metric-rejected-txs.md b/.changelog/v0.38.3/bug-fixes/4019-mempool-metric-rejected-txs.md new file mode 100644 index 00000000000..aba8c6e663c --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/4019-mempool-metric-rejected-txs.md @@ -0,0 +1,2 @@ +- `[metrics]` Call unused `rejected_txs` metric in mempool + ([\#4019](https://github.com/cometbft/cometbft/pull/4019)) diff --git a/.changelog/v0.38.3/bug-fixes/642-clist-mempool-data-races.md b/.changelog/v0.38.3/bug-fixes/642-clist-mempool-data-races.md new file mode 100644 index 00000000000..037bbc9550f --- /dev/null +++ b/.changelog/v0.38.3/bug-fixes/642-clist-mempool-data-races.md @@ -0,0 +1,2 @@ +- `[mempool]` Fix data races in `CListMempool` by making atomic the types of `height`, `txsBytes`, and + `notifiedTxsAvailable`. ([\#642](https://github.com/cometbft/cometbft/pull/642)) diff --git a/.changelog/v0.38.3/improvements/1715-validate-validator-address.md b/.changelog/v0.38.3/improvements/1715-validate-validator-address.md new file mode 100644 index 00000000000..ec7f2c7da6a --- /dev/null +++ b/.changelog/v0.38.3/improvements/1715-validate-validator-address.md @@ -0,0 +1 @@ +- `[types]` Validate `Validator#Address` in `ValidateBasic` ([\#1715](https://github.com/cometbft/cometbft/pull/1715)) diff --git a/.changelog/v0.38.3/improvements/1730-increase-abci-socket-message-size-limit.md b/.changelog/v0.38.3/improvements/1730-increase-abci-socket-message-size-limit.md new file mode 100644 index 00000000000..5246eb57f08 --- /dev/null +++ b/.changelog/v0.38.3/improvements/1730-increase-abci-socket-message-size-limit.md @@ -0,0 +1 @@ +- `[abci]` Increase ABCI socket message size limit to 2GB ([\#1730](https://github.com/cometbft/cometbft/pull/1730): @troykessler) diff --git a/.changelog/v0.38.3/improvements/1735-batch-save-state.md b/.changelog/v0.38.3/improvements/1735-batch-save-state.md new file mode 100644 index 00000000000..721380f6041 --- /dev/null +++ b/.changelog/v0.38.3/improvements/1735-batch-save-state.md @@ -0,0 +1 @@ +- `[state]` Save the state using a single DB batch ([\#1735](https://github.com/cometbft/cometbft/pull/1735)) diff --git a/.changelog/v0.38.3/improvements/1755-batch-save-block.md b/.changelog/v0.38.3/improvements/1755-batch-save-block.md new file mode 100644 index 00000000000..22f15cdb423 --- /dev/null +++ b/.changelog/v0.38.3/improvements/1755-batch-save-block.md @@ -0,0 +1,2 @@ +- `[store]` Save block using a single DB batch if block is less than 640kB, otherwise each block part is saved individually + ([\#1755](https://github.com/cometbft/cometbft/pull/1755)) diff --git a/.changelog/v0.38.3/improvements/1900-httpproxy-from-env.md b/.changelog/v0.38.3/improvements/1900-httpproxy-from-env.md new file mode 100644 index 00000000000..fd654ef7ba0 --- /dev/null +++ b/.changelog/v0.38.3/improvements/1900-httpproxy-from-env.md @@ -0,0 +1,2 @@ +- `[rpc]` Support setting proxy from env to `DefaultHttpClient`. + ([\#1900](https://github.com/cometbft/cometbft/pull/1900)) diff --git a/.changelog/v0.38.3/improvements/1902-rpc-default-port.md b/.changelog/v0.38.3/improvements/1902-rpc-default-port.md new file mode 100644 index 00000000000..b321bed5394 --- /dev/null +++ b/.changelog/v0.38.3/improvements/1902-rpc-default-port.md @@ -0,0 +1 @@ +- `[rpc]` Use default port for HTTP(S) URLs when there is no explicit port ([\#1903](https://github.com/cometbft/cometbft/pull/1903)) diff --git a/.changelog/v0.38.3/improvements/1921-crypto-merkle-innerHash.md b/.changelog/v0.38.3/improvements/1921-crypto-merkle-innerHash.md new file mode 100644 index 00000000000..d3c9dac2cba --- /dev/null +++ b/.changelog/v0.38.3/improvements/1921-crypto-merkle-innerHash.md @@ -0,0 +1 @@ +- `[crypto/merkle]` faster calculation of hashes ([#1921](https://github.com/cometbft/cometbft/pull/1921)) diff --git a/.changelog/v0.38.3/summary.md b/.changelog/v0.38.3/summary.md new file mode 100644 index 00000000000..4282c85ad1d --- /dev/null +++ b/.changelog/v0.38.3/summary.md @@ -0,0 +1,8 @@ +*January 17, 2024* + +This release addresses a high impact security issue reported in advisory +([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)). +There are other non-security bugs fixes that have been addressed since +`v0.38.2` was released, as well as some improvements. +Please check the list below for further details. + diff --git a/.changelog/v0.38.4/improvements/2065-e2e-vote-ext-activation.md b/.changelog/v0.38.4/improvements/2065-e2e-vote-ext-activation.md new file mode 100644 index 00000000000..9ced3a5da72 --- /dev/null +++ b/.changelog/v0.38.4/improvements/2065-e2e-vote-ext-activation.md @@ -0,0 +1,5 @@ +- `[e2e]` Add manifest option `VoteExtensionsUpdateHeight` to test + vote extension activation via `InitChain` and `FinalizeBlock`. + Also, extend the manifest generator to produce different values + of this new option + ([\#2065](https://github.com/cometbft/cometbft/pull/2065)) diff --git a/.changelog/v0.38.4/summary.md b/.changelog/v0.38.4/summary.md new file mode 100644 index 00000000000..0a8b339c9e8 --- /dev/null +++ b/.changelog/v0.38.4/summary.md @@ -0,0 +1,8 @@ +*January 22, 2024* + +This release is aimed at those projects that have a dependency on CometBFT, +release line `v0.38.x`, and make use of function `SaveBlockStoreState` in package +`github.com/cometbft/cometbft/store`. This function changed its signature in `v0.38.3`. +This new release reverts the signature change so that upgrading to the latest release +of CometBFT on `v0.38.x` does not require any change in the code depending on CometBFT. + diff --git a/.changelog/v0.38.5/improvements/2093-metric-chain-size-bytes.md b/.changelog/v0.38.5/improvements/2093-metric-chain-size-bytes.md new file mode 100644 index 00000000000..afba958e3b7 --- /dev/null +++ b/.changelog/v0.38.5/improvements/2093-metric-chain-size-bytes.md @@ -0,0 +1,2 @@ +- `[consensus]` Add `chain_size_bytes` metric for measuring the size of the blockchain in bytes + ([\#2093](https://github.com/cometbft/cometbft/pull/2093)) diff --git a/.changelog/v0.38.5/summary.md b/.changelog/v0.38.5/summary.md new file mode 100644 index 00000000000..61a0a3d29cc --- /dev/null +++ b/.changelog/v0.38.5/summary.md @@ -0,0 +1,10 @@ +*January 24, 2024* + +This release fixes a problem introduced in `v0.38.3`: if an application +updates the value of ConsensusParam `VoteExtensionsEnableHeight` to the same value +(actually a "noop" update) this is accepted in `v0.38.2` but rejected under some +conditions in `v0.38.3` and `v0.38.4`. Even if rejecting a useless update would make sense +in general, in a point release we should not reject a set of inputs to +a function that was previuosly accepted (unless there is a good reason +for it). The goal of this release is to accept again all "noop" updates, like `v0.38.2` did. + diff --git a/.changelog/v0.38.6/bug-fixes/2047-privval-retry-accepting-conn.md b/.changelog/v0.38.6/bug-fixes/2047-privval-retry-accepting-conn.md new file mode 100644 index 00000000000..45260721c8b --- /dev/null +++ b/.changelog/v0.38.6/bug-fixes/2047-privval-retry-accepting-conn.md @@ -0,0 +1 @@ +- `[privval]` Retry accepting a connection ([\#2047](https://github.com/cometbft/cometbft/pull/2047)) diff --git a/.changelog/v0.38.6/bug-fixes/2136-fix-state-rollback.md b/.changelog/v0.38.6/bug-fixes/2136-fix-state-rollback.md new file mode 100644 index 00000000000..55cbfe51ed4 --- /dev/null +++ b/.changelog/v0.38.6/bug-fixes/2136-fix-state-rollback.md @@ -0,0 +1,2 @@ +- `[state]` Fix rollback to a specific height + ([\#2136](https://github.com/cometbft/cometbft/pull/2136)) diff --git a/.changelog/v0.38.6/features/2362-e2e-block-max-bytes.md b/.changelog/v0.38.6/features/2362-e2e-block-max-bytes.md new file mode 100644 index 00000000000..a3b007c3f96 --- /dev/null +++ b/.changelog/v0.38.6/features/2362-e2e-block-max-bytes.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `block_max_bytes` option to the manifest file. + ([\#2362](https://github.com/cometbft/cometbft/pull/2362)) \ No newline at end of file diff --git a/.changelog/v0.38.6/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md b/.changelog/v0.38.6/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md new file mode 100644 index 00000000000..7251221be18 --- /dev/null +++ b/.changelog/v0.38.6/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md @@ -0,0 +1,2 @@ +- `[blocksync]` Avoid double-calling `types.BlockFromProto` for performance + reasons ([\#2016](https://github.com/cometbft/cometbft/pull/2016)) diff --git a/.changelog/v0.38.6/improvements/2094-e2e-load-max-txs.md b/.changelog/v0.38.6/improvements/2094-e2e-load-max-txs.md new file mode 100644 index 00000000000..31ca79cfe3b --- /dev/null +++ b/.changelog/v0.38.6/improvements/2094-e2e-load-max-txs.md @@ -0,0 +1,2 @@ +- `[e2e]` Add manifest option `load_max_txs` to limit the number of transactions generated by the + `load` command. ([\#2094](https://github.com/cometbft/cometbft/pull/2094)) diff --git a/.changelog/v0.38.6/improvements/2434-jsonrpc-websocket-basic-auth.md b/.changelog/v0.38.6/improvements/2434-jsonrpc-websocket-basic-auth.md new file mode 100644 index 00000000000..e4db7c06c7d --- /dev/null +++ b/.changelog/v0.38.6/improvements/2434-jsonrpc-websocket-basic-auth.md @@ -0,0 +1 @@ +- `[jsonrpc]` enable HTTP basic auth in websocket client ([#2434](https://github.com/cometbft/cometbft/pull/2434)) diff --git a/.changelog/v0.38.6/improvements/2467-decrease-n-of-requested-blocks.md b/.changelog/v0.38.6/improvements/2467-decrease-n-of-requested-blocks.md new file mode 100644 index 00000000000..3b5ea17ce5a --- /dev/null +++ b/.changelog/v0.38.6/improvements/2467-decrease-n-of-requested-blocks.md @@ -0,0 +1,3 @@ +- `[blocksync]` make the max number of downloaded blocks dynamic. + Previously it was a const 600. Now it's `peersCount * maxPendingRequestsPerPeer (20)` + [\#2467](https://github.com/cometbft/cometbft/pull/2467) diff --git a/.changelog/v0.38.6/improvements/2475-blocksync-2nd-request.md b/.changelog/v0.38.6/improvements/2475-blocksync-2nd-request.md new file mode 100644 index 00000000000..67614a8e35f --- /dev/null +++ b/.changelog/v0.38.6/improvements/2475-blocksync-2nd-request.md @@ -0,0 +1,3 @@ +- `[blocksync]` Request a block from peer B if we are approaching pool's height + (less than 50 blocks) and the current peer A is slow in sending us the + block [\#2475](https://github.com/cometbft/cometbft/pull/2475) diff --git a/.changelog/v0.38.6/improvements/2475-blocksync-no-block-response.md b/.changelog/v0.38.6/improvements/2475-blocksync-no-block-response.md new file mode 100644 index 00000000000..d01b3679866 --- /dev/null +++ b/.changelog/v0.38.6/improvements/2475-blocksync-no-block-response.md @@ -0,0 +1,3 @@ +- `[blocksync]` Request the block N from peer B immediately after getting + `NoBlockResponse` from peer A + [\#2475](https://github.com/cometbft/cometbft/pull/2475) diff --git a/.changelog/v0.38.6/improvements/2475-blocksync-sort-peers.md b/.changelog/v0.38.6/improvements/2475-blocksync-sort-peers.md new file mode 100644 index 00000000000..5c544401ba6 --- /dev/null +++ b/.changelog/v0.38.6/improvements/2475-blocksync-sort-peers.md @@ -0,0 +1,2 @@ +- `[blocksync]` Sort peers by download rate (the fastest peer is picked first) + [\#2475](https://github.com/cometbft/cometbft/pull/2475) diff --git a/.changelog/v0.38.6/summary.md b/.changelog/v0.38.6/summary.md new file mode 100644 index 00000000000..3e31f8c58c3 --- /dev/null +++ b/.changelog/v0.38.6/summary.md @@ -0,0 +1,5 @@ +*March 12, 2024* + +This release fixes a security bug in the light client. It also introduces many +improvements to the block sync in collaboration with the +[Osmosis](https://osmosis.zone/) team. diff --git a/.changelog/v0.38.7/bug-fixes/2225-fix-checktx-request-returns-error.md b/.changelog/v0.38.7/bug-fixes/2225-fix-checktx-request-returns-error.md new file mode 100644 index 00000000000..cb0da42b54d --- /dev/null +++ b/.changelog/v0.38.7/bug-fixes/2225-fix-checktx-request-returns-error.md @@ -0,0 +1,2 @@ +- [`mempool`] Panic when a CheckTx request to the app returns an error + ([\#2225](https://github.com/cometbft/cometbft/pull/2225)) diff --git a/.changelog/v0.38.7/bug-fixes/2774-bitarray-unmarshal-json.md b/.changelog/v0.38.7/bug-fixes/2774-bitarray-unmarshal-json.md new file mode 100644 index 00000000000..1c51af49d26 --- /dev/null +++ b/.changelog/v0.38.7/bug-fixes/2774-bitarray-unmarshal-json.md @@ -0,0 +1,2 @@ +- [`bits`] prevent `BitArray.UnmarshalJSON` from crashing on 0 bits + ([\#2774](https://github.com/cometbft/cometbft/pull/2774)) diff --git a/.changelog/v0.38.7/features/2793-boostrap.md b/.changelog/v0.38.7/features/2793-boostrap.md new file mode 100644 index 00000000000..407bac77e0b --- /dev/null +++ b/.changelog/v0.38.7/features/2793-boostrap.md @@ -0,0 +1,2 @@ +- [`node`] Add `BootstrapStateWithGenProvider` to boostrap state using a custom + genesis doc provider ([\#2793](https://github.com/cometbft/cometbft/pull/2793)) diff --git a/.changelog/v0.38.7/improvements/2839-tx_index-lower-heap-allocation.md b/.changelog/v0.38.7/improvements/2839-tx_index-lower-heap-allocation.md new file mode 100644 index 00000000000..2c763654201 --- /dev/null +++ b/.changelog/v0.38.7/improvements/2839-tx_index-lower-heap-allocation.md @@ -0,0 +1,2 @@ +- `[state/indexer]` Lower the heap allocation of transaction searches + ([\#2839](https://github.com/cometbft/cometbft/pull/2839)) \ No newline at end of file diff --git a/.changelog/v0.38.7/improvements/2841-speedup-bits-pick-random.md b/.changelog/v0.38.7/improvements/2841-speedup-bits-pick-random.md new file mode 100644 index 00000000000..b7103be2629 --- /dev/null +++ b/.changelog/v0.38.7/improvements/2841-speedup-bits-pick-random.md @@ -0,0 +1,2 @@ +- `[internal/bits]` 10x speedup and remove heap overhead of bitArray.PickRandom (used extensively in consensus gossip) + ([\#2841](https://github.com/cometbft/cometbft/pull/2841)). diff --git a/.changelog/v0.38.7/improvements/2846-speedup-json-encoding.md b/.changelog/v0.38.7/improvements/2846-speedup-json-encoding.md new file mode 100644 index 00000000000..026b5a43989 --- /dev/null +++ b/.changelog/v0.38.7/improvements/2846-speedup-json-encoding.md @@ -0,0 +1,2 @@ +- `[libs/json]` Lower the memory overhead of JSON encoding by using JSON encoders internally + ([\#2846](https://github.com/cometbft/cometbft/pull/2846)). diff --git a/.changelog/v0.38.7/summary.md b/.changelog/v0.38.7/summary.md new file mode 100644 index 00000000000..6a84208dd26 --- /dev/null +++ b/.changelog/v0.38.7/summary.md @@ -0,0 +1,3 @@ +*April 26, 2024* + +This release contains a few bug fixes and performance improvements. diff --git a/.changelog/v0.38.8/breaking-changes/3314-mempool-preupdate.md b/.changelog/v0.38.8/breaking-changes/3314-mempool-preupdate.md new file mode 100644 index 00000000000..4c2528939f7 --- /dev/null +++ b/.changelog/v0.38.8/breaking-changes/3314-mempool-preupdate.md @@ -0,0 +1,4 @@ +- `[mempool]` Add to the `Mempool` interface a new method `PreUpdate()`. This method should be + called before acquiring the mempool lock, to signal that a new update is coming. Also add to + `ErrMempoolIsFull` a new field `RecheckFull`. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) diff --git a/.changelog/v0.38.8/bug-fixes/14-abc.md b/.changelog/v0.38.8/bug-fixes/14-abc.md new file mode 100644 index 00000000000..8d9d211609b --- /dev/null +++ b/.changelog/v0.38.8/bug-fixes/14-abc.md @@ -0,0 +1,4 @@ +- `[blockstore]` Added peer banning in blockstore + ([\#ABC-0013](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) +- `[blockstore]` Send correct error message when vote extensions do not align with received packet + ([\#ABC-0014](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) diff --git a/.changelog/v0.38.8/bug-fixes/1827-fix-recheck-async.md b/.changelog/v0.38.8/bug-fixes/1827-fix-recheck-async.md new file mode 100644 index 00000000000..40abe23c1af --- /dev/null +++ b/.changelog/v0.38.8/bug-fixes/1827-fix-recheck-async.md @@ -0,0 +1,2 @@ +- [`mempool`] Fix data race when rechecking with async ABCI client + ([\#1827](https://github.com/cometbft/cometbft/issues/1827)) diff --git a/.changelog/v0.38.8/bug-fixes/3092-consensus-timeout-ticker-data-race.md b/.changelog/v0.38.8/bug-fixes/3092-consensus-timeout-ticker-data-race.md new file mode 100644 index 00000000000..b27dddc4f02 --- /dev/null +++ b/.changelog/v0.38.8/bug-fixes/3092-consensus-timeout-ticker-data-race.md @@ -0,0 +1,2 @@ +- `[consensus]` Fix a race condition in the consensus timeout ticker. Race is caused by two timeouts being scheduled at the same time. + ([\#3092](https://github.com/cometbft/cometbft/pull/2136)) diff --git a/.changelog/v0.38.8/bug-fixes/3195-batch-verification.md b/.changelog/v0.38.8/bug-fixes/3195-batch-verification.md new file mode 100644 index 00000000000..2e4104af394 --- /dev/null +++ b/.changelog/v0.38.8/bug-fixes/3195-batch-verification.md @@ -0,0 +1,2 @@ +- `[types]` Do not batch verify a commit if the validator set keys have different + types. ([\#3195](https://github.com/cometbft/cometbft/issues/3195) diff --git a/.changelog/v0.38.8/improvements/1827-config-mempool-recheck-timeout.md b/.changelog/v0.38.8/improvements/1827-config-mempool-recheck-timeout.md new file mode 100644 index 00000000000..28bbb8a1d37 --- /dev/null +++ b/.changelog/v0.38.8/improvements/1827-config-mempool-recheck-timeout.md @@ -0,0 +1,3 @@ +- `[config]` Added `recheck_timeout` mempool parameter to set how much time to wait for recheck + responses from the app (only applies to non-local ABCI clients). + ([\#1827](https://github.com/cometbft/cometbft/issues/1827/)) diff --git a/.changelog/v0.38.8/improvements/2867-rpc-batch-size-config.md b/.changelog/v0.38.8/improvements/2867-rpc-batch-size-config.md new file mode 100644 index 00000000000..d353bed0474 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2867-rpc-batch-size-config.md @@ -0,0 +1,2 @@ +- `[rpc]` Add a configurable maximum batch size for RPC requests. + ([\#2867](https://github.com/cometbft/cometbft/pull/2867)). diff --git a/.changelog/v0.38.8/improvements/2911-remove-event-bus-debug-logs.md b/.changelog/v0.38.8/improvements/2911-remove-event-bus-debug-logs.md new file mode 100644 index 00000000000..a008e2482a0 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2911-remove-event-bus-debug-logs.md @@ -0,0 +1,2 @@ +- `[event-bus]` Remove the debug logs in PublishEventTx, which were noticed production slowdowns. + ([\#2911](https://github.com/cometbft/cometbft/pull/2911)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2924-consensus-cache-block-hash.md b/.changelog/v0.38.8/improvements/2924-consensus-cache-block-hash.md new file mode 100644 index 00000000000..4f9e5638c45 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2924-consensus-cache-block-hash.md @@ -0,0 +1,2 @@ +- `[state/execution]` Cache the block hash computation inside of the Block Type, so we only compute it once. + ([\#2924](https://github.com/cometbft/cometbft/pull/2924)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md b/.changelog/v0.38.8/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md new file mode 100644 index 00000000000..ac975738453 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md @@ -0,0 +1,2 @@ +- `[consensus/state]` Remove a redundant `VerifyBlock` call in `FinalizeCommit` + ([\#2928](https://github.com/cometbft/cometbft/pull/2928)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2949-reduce-protoio-writer-creation-time.md b/.changelog/v0.38.8/improvements/2949-reduce-protoio-writer-creation-time.md new file mode 100644 index 00000000000..75838e24882 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2949-reduce-protoio-writer-creation-time.md @@ -0,0 +1,2 @@ +- `[p2p/channel]` Speedup `ProtoIO` writer creation time, and thereby speedup channel writing by 5%. + ([\#2949](https://github.com/cometbft/cometbft/pull/2949)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2952-lower-next-packet-msg-time.md b/.changelog/v0.38.8/improvements/2952-lower-next-packet-msg-time.md new file mode 100644 index 00000000000..6a05588c0f6 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2952-lower-next-packet-msg-time.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Minor speedup (3%) to connection.WritePacketMsgTo, by removing MinInt calls. + ([\#2952](https://github.com/cometbft/cometbft/pull/2952)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2959-speedup-initialized-bitarray-construction.md b/.changelog/v0.38.8/improvements/2959-speedup-initialized-bitarray-construction.md new file mode 100644 index 00000000000..7c1b2181d08 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2959-speedup-initialized-bitarray-construction.md @@ -0,0 +1,2 @@ +- `[internal/bits]` 10x speedup creating initialized bitArrays, which speedsup extendedCommit.BitArray(). This is used in consensus vote gossip. + ([\#2959](https://github.com/cometbft/cometbft/pull/2841)). diff --git a/.changelog/v0.38.8/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md b/.changelog/v0.38.8/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md new file mode 100644 index 00000000000..26fdb6c7ed2 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md @@ -0,0 +1,2 @@ +- `[blockstore]` Remove a redundant `Header.ValidateBasic` call in `LoadBlockMeta`, 75% reducing this time. + ([\#2964](https://github.com/cometbft/cometbft/pull/2964)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/2986-lower-memory-allocation-in-packet-writing.md b/.changelog/v0.38.8/improvements/2986-lower-memory-allocation-in-packet-writing.md new file mode 100644 index 00000000000..9d262798808 --- /dev/null +++ b/.changelog/v0.38.8/improvements/2986-lower-memory-allocation-in-packet-writing.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Speedup connection.WritePacketMsgTo, by reusing internal buffers rather than re-allocating. + ([\#2986](https://github.com/cometbft/cometbft/pull/2986)) \ No newline at end of file diff --git a/.changelog/v0.38.8/improvements/3003-use-lru-caches-in-blockstore.md b/.changelog/v0.38.8/improvements/3003-use-lru-caches-in-blockstore.md new file mode 100644 index 00000000000..14d4cf2abfb --- /dev/null +++ b/.changelog/v0.38.8/improvements/3003-use-lru-caches-in-blockstore.md @@ -0,0 +1,2 @@ +- [`blockstore`] Use LRU caches in blockstore, significiantly improving consensus gossip routine performance + ([\#3003](https://github.com/cometbft/cometbft/issues/3003) diff --git a/.changelog/v0.38.8/improvements/3017-speedup-consensus-metrics.md b/.changelog/v0.38.8/improvements/3017-speedup-consensus-metrics.md new file mode 100644 index 00000000000..2d468990abf --- /dev/null +++ b/.changelog/v0.38.8/improvements/3017-speedup-consensus-metrics.md @@ -0,0 +1,2 @@ +- [`consensus`] Improve performance of consensus metrics by lowering string operations + ([\#3017](https://github.com/cometbft/cometbft/issues/3017) diff --git a/.changelog/v0.38.8/improvements/3019-reduce-allocations-in-packet-reads.md b/.changelog/v0.38.8/improvements/3019-reduce-allocations-in-packet-reads.md new file mode 100644 index 00000000000..604002636a9 --- /dev/null +++ b/.changelog/v0.38.8/improvements/3019-reduce-allocations-in-packet-reads.md @@ -0,0 +1,3 @@ +- [`protoio`] Remove one allocation and new object call from `ReadMsg`, + leading to a 4% p2p message reading performance gain. + ([\#3018](https://github.com/cometbft/cometbft/issues/3018) diff --git a/.changelog/v0.38.8/improvements/3314-mempool-update-consider-full-when-rechecking.md b/.changelog/v0.38.8/improvements/3314-mempool-update-consider-full-when-rechecking.md new file mode 100644 index 00000000000..1e308ec6040 --- /dev/null +++ b/.changelog/v0.38.8/improvements/3314-mempool-update-consider-full-when-rechecking.md @@ -0,0 +1,3 @@ +- `[mempool]` Before updating the mempool, consider it as full if rechecking is still in progress. + This will stop accepting transactions in the mempool if the node can't keep up with re-CheckTx. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) diff --git a/.changelog/v0.38.8/improvements/4019-mempool-metric-evicted-txs.md b/.changelog/v0.38.8/improvements/4019-mempool-metric-evicted-txs.md new file mode 100644 index 00000000000..420cbf2c5a8 --- /dev/null +++ b/.changelog/v0.38.8/improvements/4019-mempool-metric-evicted-txs.md @@ -0,0 +1,2 @@ +- `[metrics]` Add `evicted_txs` metric to mempool + ([\#4019](https://github.com/cometbft/cometbft/pull/4019)) diff --git a/.changelog/v0.38.8/improvements/4123-mempool-is-full-log.md b/.changelog/v0.38.8/improvements/4123-mempool-is-full-log.md new file mode 100644 index 00000000000..68f187e658d --- /dev/null +++ b/.changelog/v0.38.8/improvements/4123-mempool-is-full-log.md @@ -0,0 +1,2 @@ +- `[log]` Change "mempool is full" log to debug level + ([\#4123](https://github.com/cometbft/cometbft/pull/4123)) diff --git a/.changelog/v0.38.8/summary.md b/.changelog/v0.38.8/summary.md new file mode 100644 index 00000000000..9f857767e35 --- /dev/null +++ b/.changelog/v0.38.8/summary.md @@ -0,0 +1,3 @@ +*June 27, 2024* + +This release contains a few bug fixes and performance improvements. diff --git a/.changelog/v0.38.9/breaking-changes/3361-mempool-preupdate.md b/.changelog/v0.38.9/breaking-changes/3361-mempool-preupdate.md new file mode 100644 index 00000000000..eb63d3b7e92 --- /dev/null +++ b/.changelog/v0.38.9/breaking-changes/3361-mempool-preupdate.md @@ -0,0 +1,4 @@ +- `[mempool]` Revert adding the method `PreUpdate()` to the `Mempool` interface, recently introduced + in the previous patch release (v0.38.8). Its logic is now moved into the `Lock` method. With this change, + the `Mempool` interface is the same as in v0.38.7. + ([\#3361](https://github.com/cometbft/cometbft/pull/3361)) diff --git a/.changelog/v0.38.9/bug-fixes/3352-nil-pointer-tx-search.md b/.changelog/v0.38.9/bug-fixes/3352-nil-pointer-tx-search.md new file mode 100644 index 00000000000..1d568e52b79 --- /dev/null +++ b/.changelog/v0.38.9/bug-fixes/3352-nil-pointer-tx-search.md @@ -0,0 +1,2 @@ +- `[rpc]` Fix nil pointer error in `/tx` and `/tx_search` when block is + absent ([\#3352](https://github.com/cometbft/cometbft/issues/3352)) diff --git a/.changelog/v0.38.9/summary.md b/.changelog/v0.38.9/summary.md new file mode 100644 index 00000000000..705ae83e568 --- /dev/null +++ b/.changelog/v0.38.9/summary.md @@ -0,0 +1,5 @@ +*July 1, 2024* + +This release reverts the API-breaking change to the Mempool interface introduced in the last patch +release (v0.38.8) while still keeping the performance improvement added to the mempool. It also +includes a minor fix to the RPC endpoints /tx and /tx_search. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 579466cdcb7..3af96c5554f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,13 +1,12 @@ name: Build # Tests runs different tests (test_abci_apps, test_abci_cli, test_apps) -# This workflow runs on every push to main or release branch and every pull requests +# This workflow runs on every push to v0.38.x and every pull request # All jobs will pass without running if no *{.go, .mod, .sum} files have been modified on: pull_request: push: branches: - - main - - release/** + - v0.38.x jobs: build: @@ -20,10 +19,10 @@ jobs: goos: ["linux"] timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.22" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -42,10 +41,10 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.22" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -64,10 +63,10 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.22" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 45bec88585a..a340cb179e1 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -7,32 +7,33 @@ name: Check generated code on: pull_request: branches: - - main + - v0.38.x permissions: contents: read jobs: - check-mocks: + check-mocks-metrics: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.22" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - name: "Check generated mocks" + - name: "Check generated mocks and metrics" run: | set -euo pipefail - make mockery + make mockery metrics - if ! git diff --stat --exit-code ; then + git add . + if ! git diff HEAD --stat --exit-code ; then echo ">> ERROR:" echo ">>" - echo ">> Generated mocks require update (either Mockery or source files may have changed)." - echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR." + echo ">> Generated mocks and/or metrics require update (either Mockery or source files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make mockery metrics' and update this PR." echo ">>" git diff exit 1 @@ -41,11 +42,11 @@ jobs: check-proto: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.22" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 # we need a .git directory to run git diff @@ -55,7 +56,8 @@ jobs: make proto-gen - if ! git diff --stat --exit-code ; then + git add . + if ! git diff HEAD --stat --exit-code ; then echo ">> ERROR:" echo ">>" echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)." diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index 8319c6745b2..00000000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,76 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - workflow_dispatch: - push: - branches: ["main"] - pull_request: - # The branches below must be a subset of the branches above - branches: ["main"] - - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: ['go'] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Use only 'java' to analyze code written in Java, Kotlin or both - # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - - - # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 - with: - category: "/language:${{matrix.language}}" diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index f55725d8b3d..db488ad0b7c 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -1,10 +1,10 @@ name: Docker -# Build & Push rebuilds the CometBFT docker image on every push to main and creation of tags +# Rebuilds the CometBFT docker image on every push to v0.38.x and creation of tags # and pushes the image to https://hub.docker.com/r/cometbft/cometbft on: push: branches: - - main + - v0.38.x tags: - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 @@ -15,7 +15,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.7.1 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v6.9.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/docs-toc.yml b/.github/workflows/docs-toc.yml index 589be02c68a..46ac63856aa 100644 --- a/.github/workflows/docs-toc.yml +++ b/.github/workflows/docs-toc.yml @@ -4,13 +4,13 @@ on: pull_request: push: branches: - - main + - v0.38.x jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/e2e-long-main.yml b/.github/workflows/e2e-long-main.yml deleted file mode 100644 index 302e29d1ecb..00000000000 --- a/.github/workflows/e2e-long-main.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Weekly run of the E2E testnet using the long-running manifest on main - -# !! Relevant changes to this file should be propagated to the e2e-nightly-x -# files for the supported backport branches, when appropriate, modulo version -# markers. - -name: e2e-long-main -on: - workflow_dispatch: - schedule: - - cron: '0 3 * * 4' - -jobs: - e2e-long-test: - runs-on: ubuntu-latest - timeout-minutes: 120 - steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.20' - - - uses: actions/checkout@v3 - - - name: Build - working-directory: test/e2e - # Run make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker runner - - - name: Run testnet - working-directory: test/e2e - run: ./run-multiple.sh networks/long.toml - - e2e-long-fail: - needs: e2e-long-test - if: ${{ failure() }} - runs-on: ubuntu-latest - steps: - - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - BRANCH: ${{ github.ref_name }} - RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}" - with: - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Weekly long-run E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } diff --git a/.github/workflows/e2e-manual-multiversion.yml b/.github/workflows/e2e-manual-multiversion.yml index 164ee3b9e21..cdd97b63aa4 100644 --- a/.github/workflows/e2e-manual-multiversion.yml +++ b/.github/workflows/e2e-manual-multiversion.yml @@ -11,15 +11,15 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', '04'] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e @@ -27,12 +27,19 @@ jobs: run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above # Generate multi-version tests with double the quantity of E2E nodes # based on the current branch as compared to the latest version. run: ./build/generator -g 5 -m "latest:1,local:2" -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index d69cdf892c3..c769f2b8ac6 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -11,15 +11,15 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', '04'] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e @@ -27,10 +27,17 @@ jobs: run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above run: ./build/generator -g 5 -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml deleted file mode 100644 index bffbf66676f..00000000000 --- a/.github/workflows/e2e-nightly-34x.yml +++ /dev/null @@ -1,77 +0,0 @@ -# Runs randomly generated E2E testnets nightly on the 0.34.x branch. - -# !! This file should be kept in sync with the e2e-nightly-main.yml file, -# modulo changes to the version labels. - -name: e2e-nightly-34x -on: - schedule: - - cron: '0 2 * * *' - -jobs: - e2e-nightly-test: - # Run parallel jobs for the listed testnet groups (must match the - # ./build/generator -g flag) - strategy: - fail-fast: false - matrix: - group: ['00', '01'] - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.18' - - - uses: actions/checkout@v3 - with: - ref: 'v0.34.x' - - - name: Capture git repo info - id: git-info - run: | - echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT - - - name: Build - working-directory: test/e2e - # Run make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker generator runner - - - name: Generate testnets - working-directory: test/e2e - # When changing -g, also change the matrix groups above - run: ./build/generator -g 2 -d networks/nightly -p - - - name: Run testnets in group ${{ matrix.group }} - working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml - - outputs: - git-branch: ${{ steps.git-info.outputs.branch }} - - e2e-nightly-fail: - needs: e2e-nightly-test - if: ${{ failure() }} - runs-on: ubuntu-latest - steps: - - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} - RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" - with: - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml deleted file mode 100644 index 26614fe4b42..00000000000 --- a/.github/workflows/e2e-nightly-37x.yml +++ /dev/null @@ -1,77 +0,0 @@ -# Runs randomly generated E2E testnets nightly on the v0.37.x branch. - -# !! This file should be kept in sync with the e2e-nightly-main.yml file, -# modulo changes to the version labels. - -name: e2e-nightly-37x -on: - schedule: - - cron: '0 2 * * *' - -jobs: - e2e-nightly-test: - # Run parallel jobs for the listed testnet groups (must match the - # ./build/generator -g flag) - strategy: - fail-fast: false - matrix: - group: ['00', '01', '02', '03', "04"] - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.20' - - - uses: actions/checkout@v3 - with: - ref: 'v0.37.x' - - - name: Capture git repo info - id: git-info - run: | - echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT - - - name: Build - working-directory: test/e2e - # Run make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker generator runner tests - - - name: Generate testnets - working-directory: test/e2e - # When changing -g, also change the matrix groups above - run: ./build/generator -g 5 -d networks/nightly/ -p - - - name: Run ${{ matrix.p2p }} p2p testnets - working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml - - outputs: - git-branch: ${{ steps.git-info.outputs.branch }} - - e2e-nightly-fail: - needs: e2e-nightly-test - if: ${{ failure() }} - runs-on: ubuntu-latest - steps: - - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} - RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" - with: - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml deleted file mode 100644 index b031f78fd1d..00000000000 --- a/.github/workflows/e2e-nightly-main.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Runs randomly generated E2E testnets nightly on main - -# !! Relevant changes to this file should be propagated to the e2e-nightly-x -# files for the supported backport branches, when appropriate, modulo version -# markers. - -name: e2e-nightly-main -on: - schedule: - - cron: '0 2 * * *' - -jobs: - e2e-nightly-test: - # Run parallel jobs for the listed testnet groups (must match the - # ./build/generator -g flag) - strategy: - fail-fast: false - matrix: - group: ['00', '01', '02', '03', "04"] - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.20' - - - uses: actions/checkout@v3 - - - name: Build - working-directory: test/e2e - # Run make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker generator runner tests - - - name: Generate testnets - working-directory: test/e2e - # When changing -g, also change the matrix groups above - run: ./build/generator -g 5 -d networks/nightly/ -p - - - name: Run ${{ matrix.p2p }} p2p testnets - working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml - - e2e-nightly-fail: - needs: e2e-nightly-test - if: ${{ failure() }} - runs-on: ubuntu-latest - steps: - - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - BRANCH: ${{ github.ref_name }} - RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}" - with: - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index e6a4a7e730f..ef5a8060c1f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -1,23 +1,22 @@ name: e2e -# Runs the CI end-to-end test network on all pushes to main or release branches +# Runs the CI end-to-end test network on all pushes to v0.38.x # and every pull request, but only if any Go files have been changed. on: workflow_dispatch: # allow running workflow manually pull_request: push: branches: - - main - - release/** + - v0.38.x jobs: e2e-test: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' - - uses: actions/checkout@v3 + go-version: '1.22' + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 15a20f06407..ea453ddb90c 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -5,7 +5,8 @@ on: schedule: - cron: '0 3 * * *' pull_request: - branches: [main] + branches: + - v0.38.x paths: - "test/fuzz/**/*.go" @@ -13,11 +14,11 @@ jobs: fuzz-nightly-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install go-fuzz working-directory: test/fuzz @@ -49,14 +50,14 @@ jobs: continue-on-error: true - name: Archive crashers - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: crashers path: test/fuzz/**/crashers retention-days: 3 - name: Archive suppressions - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: suppressions path: test/fuzz/**/suppressions @@ -76,7 +77,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.27.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index f7e1e10818a..f0c2fbd652e 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -8,17 +8,17 @@ on: pull_request: push: branches: - - main - - release/** + - v0.38.x jobs: govulncheck: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20.2" - - uses: actions/checkout@v3 + go-version: "1.22" + check-latest: true + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index 22cba4a93af..29ad2ceb54b 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.11.0 + - uses: styfle/cancel-workflow-action@0.12.1 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b3271d53895..6386487af34 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,7 +1,7 @@ name: Golang Linter # Lint runs golangci-lint over the entire CometBFT repository. # -# This workflow is run on every pull request and push to main. +# This workflow is run on every pull request and push to v0.38.x. # # The `golangci` job will pass without running if no *.{go, mod, sum} # files have been modified. @@ -12,26 +12,26 @@ on: pull_request: push: branches: - - main + - v0.38.x jobs: golangci: name: golangci-lint runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go go.mod go.sum - - uses: golangci/golangci-lint-action@v3 + - uses: golangci/golangci-lint-action@v6 with: - version: v1.51 + version: latest args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml index 0caa6c679ac..7a381fe453e 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-linter.yml @@ -2,13 +2,14 @@ name: Markdown Linter on: push: branches: - - main + - v0.38.x paths: - "**.md" - "**.yml" - "**.yaml" pull_request: - branches: [main] + branches: + - v0.38.x paths: - "**.md" - "**.yml" @@ -19,12 +20,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Lint Code Base uses: docker://github/super-linter:v4 env: VALIDATE_ALL_CODEBASE: true - DEFAULT_BRANCH: main + DEFAULT_BRANCH: v0.38.x GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VALIDATE_MD: true VALIDATE_OPENAPI: true diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index bbd28cbd067..47869f21191 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -12,13 +12,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -44,7 +44,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon pre-release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.27.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 55f5feff79a..376bd6f9dce 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -5,7 +5,7 @@ on: - 'proto/**' push: branches: - - main + - v0.38.x paths: - 'proto/**' @@ -14,8 +14,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.15.1 + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.45.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index 773e9339ab3..ec27668d4e2 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -11,11 +11,11 @@ jobs: check-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' - name: Check version run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e1c959c3f32..06a0e38255d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,13 +10,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.22' # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -43,7 +43,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for this release." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.27.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 396f41b1aba..35bfb53d77f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index d503a9868f7..5736f6a74ae 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -1,10 +1,10 @@ name: Docker E2E Node -# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags +# Rebuilds the e2e Testapp docker image on every push to v0.38.x and creation of tags # and pushes the image to https://hub.docker.com/r/cometbft/e2e-node on: push: branches: - - main + - v0.38.x tags: - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 @@ -15,7 +15,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.7.1 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v6.9.0 with: context: . file: ./test/e2e/docker/Dockerfile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 69801355d55..84742390fce 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,8 +5,7 @@ on: paths: - "**.go" branches: - - main - - release/** + - v0.38.x jobs: tests: @@ -16,10 +15,10 @@ jobs: matrix: part: ["00", "01", "02", "03", "04", "05"] steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.22" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.gitignore b/.gitignore index d9247f281a0..b3eb33aefb6 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,4 @@ proto/spec/**/*.pb.go *.dvi # Python virtual environments .venv +go.work.sum diff --git a/.golangci.yml b/.golangci.yml index 80e7214b2c0..24116994dae 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,12 +6,11 @@ linters: - dogsled - dupl - errcheck - - exportloopref + # - copyloopvar - goconst - gofmt - goimports - - revive - - gosec + # - gosec - gosimple - govet - ineffassign @@ -38,7 +37,62 @@ linters-settings: max-blank-identifiers: 3 golint: min-confidence: 0 + goconst: + ignore-tests: true maligned: suggest-new: true misspell: locale: US + depguard: + rules: + main: + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/cometbft + - github.com/cosmos + - github.com/btcsuite/btcd/btcec/v2 + - github.com/BurntSushi/toml + - github.com/go-git/go-git/v5 + - github.com/go-kit + - github.com/go-logfmt/logfmt + - github.com/gofrs/uuid + - github.com/google + - github.com/gorilla/websocket + - github.com/informalsystems/tm-load-test/pkg/loadtest + - github.com/hashicorp/golang-lru/v2 + - github.com/lib/pq + - github.com/libp2p/go-buffer-pool + - github.com/Masterminds/semver/v3 + - github.com/minio/highwayhash + - github.com/oasisprotocol/curve25519-voi + - github.com/pkg/errors + - github.com/prometheus + - github.com/rcrowley/go-metrics + - github.com/rs/cors + - github.com/snikch/goodman + - github.com/spf13 + - github.com/stretchr/testify/require + - github.com/syndtr/goleveldb + test: + files: + - "$test" + allow: + - $gostd + - github.com/cosmos + - github.com/cometbft + - github.com/adlio/schema + - github.com/btcsuite/btcd + - github.com/fortytw2/leaktest + - github.com/go-kit + - github.com/google/uuid + - github.com/gorilla/websocket + - github.com/lib/pq + - github.com/oasisprotocol/curve25519-voi/primitives/merlin + - github.com/ory/dockertest + - github.com/pkg/errors + - github.com/prometheus/client_golang/prometheus/promhttp + - github.com/spf13 + - github.com/stretchr/testify diff --git a/CHANGELOG.md b/CHANGELOG.md index fd6c3aab928..e620b6586ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,528 @@ # CHANGELOG +## v0.38.12 + +*September 3, 2024* + +This release includes a security fix for the light client and is recommended +for all users. + +### BUG FIXES + +- `[light]` Cross-check proposer priorities in retrieved validator sets + ([\#ASA-2024-009](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) +- `[privval]` Ignore duplicate privval listen when already connected ([\#3828](https://github.com/cometbft/cometbft/issues/3828) + +### DEPENDENCIES + +- `[crypto/secp256k1]` Adjust to breaking interface changes in + `btcec/v2` latest release, while avoiding breaking changes to + local CometBFT functions + ([\#3728](https://github.com/cometbft/cometbft/pull/3728)) + +### IMPROVEMENTS + +- `[types]` Check that proposer is one of the validators in `ValidateBasic` + ([\#ASA-2024-009](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) +- `[e2e]` Add `log_level` option to manifest file + ([#3819](https://github.com/cometbft/cometbft/pull/3819)). +- `[e2e]` Add `log_format` option to manifest file + ([#3836](https://github.com/cometbft/cometbft/issues/3836)). + +## v0.38.11 + +*August 12, 2024* + +This release fixes a panic in consensus where CometBFT would previously panic +if there's no extension signature in non-nil Precommit EVEN IF vote extensions +themselves are disabled. + +It also includes a few other bug fixes and performance improvements. + +### BUG FIXES + +- `[types]` Only check IFF vote is a non-nil Precommit if extensionsEnabled + types ([\#3565](https://github.com/cometbft/cometbft/issues/3565)) + +### IMPROVEMENTS + +- `[indexer]` Fixed ineffective select break statements; they now + point to their enclosing for loop label to exit + ([\#3544](https://github.com/cometbft/cometbft/issues/3544)) + +## v0.38.10 + +*July 16, 2024* + +This release fixes a bug in `v0.38.x` that prevented ABCI responses from being +correctly read when upgrading from `v0.37.x` or below. It also includes a few other +bug fixes and performance improvements. + +### BUG FIXES + +- `[p2p]` Node respects configured `max_num_outbound_peers` limit when dialing + peers provided by a seed node + ([\#486](https://github.com/cometbft/cometbft/issues/486)) +- `[rpc]` Fix an issue where a legacy ABCI response, created on `v0.37` or before, is not returned properly in `v0.38` and up + on the `/block_results` RPC endpoint. + ([\#3002](https://github.com/cometbft/cometbft/issues/3002)) +- `[blocksync]` Do not stay in blocksync if the node's validator voting power + is high enough to block the chain while it is not online + ([\#3406](https://github.com/cometbft/cometbft/pull/3406)) + +### IMPROVEMENTS + +- `[p2p/conn]` Update send monitor, used for sending rate limiting, once per batch of packets sent + ([\#3382](https://github.com/cometbft/cometbft/pull/3382)) +- `[libs/pubsub]` Allow dash (`-`) in event tags + ([\#3401](https://github.com/cometbft/cometbft/issues/3401)) +- `[p2p/conn]` Remove the usage of a synchronous pool of buffers in secret connection, storing instead the buffer in the connection struct. This reduces the synchronization primitive usage, speeding up the code. + ([\#3403](https://github.com/cometbft/cometbft/issues/3403)) + +## v0.38.9 + +*July 1, 2024* + +This release reverts the API-breaking change to the Mempool interface introduced in the last patch +release (v0.38.8) while still keeping the performance improvement added to the mempool. It also +includes a minor fix to the RPC endpoints /tx and /tx_search. + +### BREAKING CHANGES + +- `[mempool]` Revert adding the method `PreUpdate()` to the `Mempool` interface, recently introduced + in the previous patch release (v0.38.8). Its logic is now moved into the `Lock` method. With this change, + the `Mempool` interface is the same as in v0.38.7. + ([\#3361](https://github.com/cometbft/cometbft/pull/3361)) + +### BUG FIXES + +- `[rpc]` Fix nil pointer error in `/tx` and `/tx_search` when block is + absent ([\#3352](https://github.com/cometbft/cometbft/issues/3352)) + +## v0.38.8 + +*June 27, 2024* + +This release contains a few bug fixes and performance improvements. + +### BREAKING CHANGES + +- `[mempool]` Add to the `Mempool` interface a new method `PreUpdate()`. This method should be + called before acquiring the mempool lock, to signal that a new update is coming. Also add to + `ErrMempoolIsFull` a new field `RecheckFull`. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) + +### BUG FIXES + +- `[blockstore]` Added peer banning in blockstore + ([\#ABC-0013](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) +- `[blockstore]` Send correct error message when vote extensions do not align with received packet + ([\#ABC-0014](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) +- [`mempool`] Fix data race when rechecking with async ABCI client + ([\#1827](https://github.com/cometbft/cometbft/issues/1827)) +- `[consensus]` Fix a race condition in the consensus timeout ticker. Race is caused by two timeouts being scheduled at the same time. + ([\#3092](https://github.com/cometbft/cometbft/pull/2136)) +- `[types]` Do not batch verify a commit if the validator set keys have different + types. ([\#3195](https://github.com/cometbft/cometbft/issues/3195) + +### IMPROVEMENTS + +- `[config]` Added `recheck_timeout` mempool parameter to set how much time to wait for recheck + responses from the app (only applies to non-local ABCI clients). + ([\#1827](https://github.com/cometbft/cometbft/issues/1827/)) +- `[rpc]` Add a configurable maximum batch size for RPC requests. + ([\#2867](https://github.com/cometbft/cometbft/pull/2867)). +- `[event-bus]` Remove the debug logs in PublishEventTx, which were noticed production slowdowns. + ([\#2911](https://github.com/cometbft/cometbft/pull/2911)) +- `[state/execution]` Cache the block hash computation inside of the Block Type, so we only compute it once. + ([\#2924](https://github.com/cometbft/cometbft/pull/2924)) +- `[consensus/state]` Remove a redundant `VerifyBlock` call in `FinalizeCommit` + ([\#2928](https://github.com/cometbft/cometbft/pull/2928)) +- `[p2p/channel]` Speedup `ProtoIO` writer creation time, and thereby speedup channel writing by 5%. + ([\#2949](https://github.com/cometbft/cometbft/pull/2949)) +- `[p2p/conn]` Minor speedup (3%) to connection.WritePacketMsgTo, by removing MinInt calls. + ([\#2952](https://github.com/cometbft/cometbft/pull/2952)) +- `[internal/bits]` 10x speedup creating initialized bitArrays, which speedsup extendedCommit.BitArray(). This is used in consensus vote gossip. + ([\#2959](https://github.com/cometbft/cometbft/pull/2841)). +- `[blockstore]` Remove a redundant `Header.ValidateBasic` call in `LoadBlockMeta`, 75% reducing this time. + ([\#2964](https://github.com/cometbft/cometbft/pull/2964)) +- `[p2p/conn]` Speedup connection.WritePacketMsgTo, by reusing internal buffers rather than re-allocating. + ([\#2986](https://github.com/cometbft/cometbft/pull/2986)) +- [`blockstore`] Use LRU caches in blockstore, significiantly improving consensus gossip routine performance + ([\#3003](https://github.com/cometbft/cometbft/issues/3003) +- [`consensus`] Improve performance of consensus metrics by lowering string operations + ([\#3017](https://github.com/cometbft/cometbft/issues/3017) +- [`protoio`] Remove one allocation and new object call from `ReadMsg`, + leading to a 4% p2p message reading performance gain. + ([\#3018](https://github.com/cometbft/cometbft/issues/3018) +- `[mempool]` Before updating the mempool, consider it as full if rechecking is still in progress. + This will stop accepting transactions in the mempool if the node can't keep up with re-CheckTx. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) + +## v0.38.7 + +*April 26, 2024* + +This release contains a few bug fixes and performance improvements. + +### BUG FIXES + +- [`mempool`] Panic when a CheckTx request to the app returns an error + ([\#2225](https://github.com/cometbft/cometbft/pull/2225)) +- [`bits`] prevent `BitArray.UnmarshalJSON` from crashing on 0 bits + ([\#2774](https://github.com/cometbft/cometbft/pull/2774)) + +### FEATURES + +- [`node`] Add `BootstrapStateWithGenProvider` to boostrap state using a custom + genesis doc provider ([\#2793](https://github.com/cometbft/cometbft/pull/2793)) + +### IMPROVEMENTS + +- `[state/indexer]` Lower the heap allocation of transaction searches + ([\#2839](https://github.com/cometbft/cometbft/pull/2839)) +- `[internal/bits]` 10x speedup and remove heap overhead of bitArray.PickRandom (used extensively in consensus gossip) + ([\#2841](https://github.com/cometbft/cometbft/pull/2841)). +- `[libs/json]` Lower the memory overhead of JSON encoding by using JSON encoders internally + ([\#2846](https://github.com/cometbft/cometbft/pull/2846)). + +## v0.38.6 + +*March 12, 2024* + +This release fixes a security bug in the light client. It also introduces many +improvements to the block sync in collaboration with the +[Osmosis](https://osmosis.zone/) team. + +### BUG FIXES + +- `[privval]` Retry accepting a connection ([\#2047](https://github.com/cometbft/cometbft/pull/2047)) +- `[state]` Fix rollback to a specific height + ([\#2136](https://github.com/cometbft/cometbft/pull/2136)) + +### FEATURES + +- `[e2e]` Add `block_max_bytes` option to the manifest file. + ([\#2362](https://github.com/cometbft/cometbft/pull/2362)) + +### IMPROVEMENTS + +- `[blocksync]` Avoid double-calling `types.BlockFromProto` for performance + reasons ([\#2016](https://github.com/cometbft/cometbft/pull/2016)) +- `[e2e]` Add manifest option `load_max_txs` to limit the number of transactions generated by the + `load` command. ([\#2094](https://github.com/cometbft/cometbft/pull/2094)) +- `[jsonrpc]` enable HTTP basic auth in websocket client ([#2434](https://github.com/cometbft/cometbft/pull/2434)) +- `[blocksync]` make the max number of downloaded blocks dynamic. + Previously it was a const 600. Now it's `peersCount * maxPendingRequestsPerPeer (20)` + [\#2467](https://github.com/cometbft/cometbft/pull/2467) +- `[blocksync]` Request a block from peer B if we are approaching pool's height + (less than 50 blocks) and the current peer A is slow in sending us the + block [\#2475](https://github.com/cometbft/cometbft/pull/2475) +- `[blocksync]` Request the block N from peer B immediately after getting + `NoBlockResponse` from peer A + [\#2475](https://github.com/cometbft/cometbft/pull/2475) +- `[blocksync]` Sort peers by download rate (the fastest peer is picked first) + [\#2475](https://github.com/cometbft/cometbft/pull/2475) + +## v0.38.5 + +*January 24, 2024* + +This release fixes a problem introduced in `v0.38.3`: if an application +updates the value of ConsensusParam `VoteExtensionsEnableHeight` to the same value +(actually a "noop" update) this is accepted in `v0.38.2` but rejected under some +conditions in `v0.38.3` and `v0.38.4`. Even if rejecting a useless update would make sense +in general, in a point release we should not reject a set of inputs to +a function that was previuosly accepted (unless there is a good reason +for it). The goal of this release is to accept again all "noop" updates, like `v0.38.2` did. + +### IMPROVEMENTS + +- `[consensus]` Add `chain_size_bytes` metric for measuring the size of the blockchain in bytes + ([\#2093](https://github.com/cometbft/cometbft/pull/2093)) + +## v0.38.4 + +*January 22, 2024* + +This release is aimed at those projects that have a dependency on CometBFT, +release line `v0.38.x`, and make use of function `SaveBlockStoreState` in package +`github.com/cometbft/cometbft/store`. This function changed its signature in `v0.38.3`. +This new release reverts the signature change so that upgrading to the latest release +of CometBFT on `v0.38.x` does not require any change in the code depending on CometBFT. + +### IMPROVEMENTS + +- `[e2e]` Add manifest option `VoteExtensionsUpdateHeight` to test + vote extension activation via `InitChain` and `FinalizeBlock`. + Also, extend the manifest generator to produce different values + of this new option + ([\#2065](https://github.com/cometbft/cometbft/pull/2065)) + +## v0.38.3 + +*January 17, 2024* + +This release addresses a high impact security issue reported in advisory +([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)). +There are other non-security bugs fixes that have been addressed since +`v0.38.2` was released, as well as some improvements. +Please check the list below for further details. + +### BUG FIXES + +- `[consensus]` Fix for "Validation of `VoteExtensionsEnableHeight` can cause chain halt" + ([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)) +- `[mempool]` Fix data races in `CListMempool` by making atomic the types of `height`, `txsBytes`, and + `notifiedTxsAvailable`. ([\#642](https://github.com/cometbft/cometbft/pull/642)) +- `[mempool]` The calculation method of tx size returned by calling proxyapp should be consistent with that of mempool + ([\#1687](https://github.com/cometbft/cometbft/pull/1687)) +- `[evidence]` When `VerifyCommitLight` & `VerifyCommitLightTrusting` are called as part + of evidence verification, all signatures present in the evidence must be verified + ([\#1749](https://github.com/cometbft/cometbft/pull/1749)) +- `[crypto]` `SupportsBatchVerifier` returns false + if public key is nil instead of dereferencing nil. + ([\#1825](https://github.com/cometbft/cometbft/pull/1825)) +- `[blocksync]` wait for `poolRoutine` to stop in `(*Reactor).OnStop` + ([\#1879](https://github.com/cometbft/cometbft/pull/1879)) + +### IMPROVEMENTS + +- `[types]` Validate `Validator#Address` in `ValidateBasic` ([\#1715](https://github.com/cometbft/cometbft/pull/1715)) +- `[abci]` Increase ABCI socket message size limit to 2GB ([\#1730](https://github.com/cometbft/cometbft/pull/1730): @troykessler) +- `[state]` Save the state using a single DB batch ([\#1735](https://github.com/cometbft/cometbft/pull/1735)) +- `[store]` Save block using a single DB batch if block is less than 640kB, otherwise each block part is saved individually + ([\#1755](https://github.com/cometbft/cometbft/pull/1755)) +- `[rpc]` Support setting proxy from env to `DefaultHttpClient`. + ([\#1900](https://github.com/cometbft/cometbft/pull/1900)) +- `[rpc]` Use default port for HTTP(S) URLs when there is no explicit port ([\#1903](https://github.com/cometbft/cometbft/pull/1903)) +- `[crypto/merkle]` faster calculation of hashes ([#1921](https://github.com/cometbft/cometbft/pull/1921)) + +## v0.38.2 + +*November 27, 2023* + +This release provides the **nop** mempool for applications that want to build their own mempool. +Using this mempool effectively disables all mempool functionality in CometBFT, including transaction dissemination and the `broadcast_tx_*` endpoints. + +Also fixes a small bug in the mempool for an experimental feature. + +### BUG FIXES + +- `[mempool]` Avoid infinite wait in transaction sending routine when + using experimental parameters to limiting transaction gossiping to peers + ([\#1654](https://github.com/cometbft/cometbft/pull/1654)) + +### FEATURES + +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)) + + If you want to use it, change mempool's `type` to `nop`: + + ```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" + ``` + +## v0.38.1 + +*November 17, 2023* + +This release contains, among other things, an opt-in, experimental feature to +help reduce the bandwidth consumption associated with the mempool's transaction +gossip. + +### BUG FIXES + +- `[state/indexer]` Respect both height params while querying for events + ([\#1529](https://github.com/cometbft/cometbft/pull/1529)) + +### FEATURES + +- `[metrics]` Add metric for mempool size in bytes `SizeBytes`. + ([\#1512](https://github.com/cometbft/cometbft/pull/1512)) + +### IMPROVEMENTS + +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) + +## v0.38.0 + +*September 12, 2023* + +This release includes the second part of ABCI++, called ABCI 2.0. +ABCI 2.0 introduces ABCI methods `ExtendVote` and `VerifyVoteExtension`. +These new methods allow the application to add data (opaque to CometBFT), +called _vote extensions_ to precommit votes sent by validators. +These vote extensions are made available to the proposer(s) of the next height. +Additionally, ABCI 2.0 coalesces `BeginBlock`, `DeliverTx`, and `EndBlock` +into one method, `FinalizeBlock`, whose `Request*` and `Response*` +data structures contain the sum of all data previously contained +in the respective `Request*` and `Response*` data structures in +`BeginBlock`, `DeliverTx`, and `EndBlock`. +See the [specification](./spec/abci/) for more details on ABCI 2.0. + +### BREAKING CHANGES + +- `[mempool]` Remove priority mempool. + ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[config]` Remove `Version` field from `MempoolConfig`. + ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[protobuf]` Remove fields `sender`, `priority`, and `mempool_error` from + `ResponseCheckTx`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic + ([\#558](https://github.com/cometbft/cometbft/issues/558)) +- `[state/kvindexer]` Remove the function type from the event key stored in the database. This should be breaking only +for people who forked CometBFT and interact directly with the indexers kvstore. + ([\#774](https://github.com/cometbft/cometbft/pull/774)) +- `[rpc]` Removed `begin_block_events` and `end_block_events` from `BlockResultsResponse`. + The events are merged into one field called `finalize_block_events`. + ([\#9427](https://github.com/tendermint/tendermint/issues/9427)) +- `[pubsub]` Added support for big integers and big floats in the pubsub event query system. + Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) +- `[kvindexer]` Added support for big integers and big floats in the kvindexer. + Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) +- `[mempool]` Application can now set `ConsensusParams.Block.MaxBytes` to -1 + to have visibility on all transactions in the + mempool at `PrepareProposal` time. + This means that the total size of transactions sent via `RequestPrepareProposal` + might exceed `RequestPrepareProposal.max_tx_bytes`. + If that is the case, the application MUST make sure that the total size of transactions + returned in `ResponsePrepareProposal.txs` does not exceed `RequestPrepareProposal.max_tx_bytes`, + otherwise CometBFT will panic. + ([\#980](https://github.com/cometbft/cometbft/issues/980)) +- `[node/state]` Add Go API to bootstrap block store and state store to a height. Make sure block sync starts syncing from bootstrapped height. + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) +- `[state/store]` Added Go functions to save height at which offline state sync is performed. + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@jmalicevic) +- `[p2p]` Remove UPnP functionality + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) +- `[node]` Removed `ConsensusState()` accessor from `Node` + struct - all access to consensus state should go via the reactor + ([\#1120](https://github.com/cometbft/cometbft/pull/1120)) +- `[state]` Signature of `ExtendVote` changed in `BlockExecutor`. + It now includes the block whose precommit will be extended, an the state object. + ([\#1270](https://github.com/cometbft/cometbft/pull/1270)) +- `[state]` Move pruneBlocks from node/state to state/execution. + ([\#6541](https://github.com/tendermint/tendermint/pull/6541)) +- `[abci]` Move `app_hash` parameter from `Commit` to `FinalizeBlock` + ([\#8664](https://github.com/tendermint/tendermint/pull/8664)) +- `[abci]` Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` + and `EndBlock` into a single method call + ([\#9468](https://github.com/tendermint/tendermint/pull/9468)) +- `[p2p]` Remove unused p2p/trust package + ([\#9625](https://github.com/tendermint/tendermint/pull/9625)) +- `[rpc]` Remove global environment and replace with constructor + ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) +- `[node]` Move DBContext and DBProvider from the node package to the config + package. ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) +- `[inspect]` Add a new `inspect` command for introspecting + the state and block store of a crashed tendermint node. + ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) +- `[metrics]` Move state-syncing and block-syncing metrics to + their respective packages. Move labels from block_syncing + -> blocksync_syncing and state_syncing -> statesync_syncing + ([\#9682](https://github.com/tendermint/tendermint/pull/9682)) + +### BUG FIXES + +- `[kvindexer]` Forward porting the fixes done to the kvindexer in 0.37 in PR \#77 + ([\#423](https://github.com/cometbft/cometbft/pull/423)) +- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. + ([\#496](https://github.com/cometbft/cometbft/pull/496)) +- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race + ([\#524](https://github.com/cometbft/cometbft/pull/524)) +- `[light]` Fixed an edge case where a light client would panic when attempting + to query a node that (1) has started from a non-zero height and (2) does + not yet have any data. The light client will now, correctly, not panic + _and_ keep the node in its list of providers in the same way it would if + it queried a node starting from height zero that does not yet have data + ([\#575](https://github.com/cometbft/cometbft/issues/575)) +- `[abci]` Restore the snake_case naming in JSON serialization of + `ExecTxResult` ([\#855](https://github.com/cometbft/cometbft/issues/855)). +- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON + ([\#863](https://github.com/cometbft/cometbft/pull/863)) +- `[mempool/clist_mempool]` Prevent a transaction to appear twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) +- `[docker]` Ensure Docker image uses consistent version of Go. + ([\#9462](https://github.com/tendermint/tendermint/pull/9462)) +- `[abci-cli]` Fix broken abci-cli help command. + ([\#9717](https://github.com/tendermint/tendermint/pull/9717)) + +### DEPRECATIONS + +- `[rpc/grpc]` Mark the gRPC broadcast API as deprecated. + It will be superseded by a broader API as part of + [\#81](https://github.com/cometbft/cometbft/issues/81) + ([\#650](https://github.com/cometbft/cometbft/issues/650)) + +### FEATURES + +- `[node/state]` Add Go API to bootstrap block store and state store to a height + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) +- `[proxy]` Introduce `NewConnSyncLocalClientCreator`, which allows local ABCI + clients to have the same concurrency model as remote clients (i.e. one mutex + per client "connection", for each of the four ABCI "connections"). + ([tendermint/tendermint\#9830](https://github.com/tendermint/tendermint/pull/9830) + and [\#1145](https://github.com/cometbft/cometbft/pull/1145)) +- `[proxy]` Introduce `NewUnsyncLocalClientCreator`, which allows local ABCI + clients to have the same concurrency model as remote clients (i.e. one + mutex per client "connection", for each of the four ABCI "connections"). + ([\#9830](https://github.com/tendermint/tendermint/pull/9830)) +- `[abci]` New ABCI methods `VerifyVoteExtension` and `ExtendVote` allow validators to validate the vote extension data attached to a pre-commit message and allow applications to let their validators do more than just validate within consensus ([\#9836](https://github.com/tendermint/tendermint/pull/9836)) + +### IMPROVEMENTS + +- `[blocksync]` Generate new metrics during BlockSync + ([\#543](https://github.com/cometbft/cometbft/pull/543)) +- `[jsonrpc/client]` Improve the error message for client errors stemming from + bad HTTP responses. + ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) +- `[pubsub/kvindexer]` Numeric query conditions and event values are represented as big floats with default precision of 125. + Integers are read as "big ints" and represented with as many bits as they need when converting to floats. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) +- `[consensus]` New metrics (counters) to track duplicate votes and block parts. + ([\#896](https://github.com/cometbft/cometbft/pull/896)) +- `[mempool]` Application can now set `ConsensusParams.Block.MaxBytes` to -1 + to gain more control on the max size of transactions in a block. + It also allows the application to have visibility on all transactions in the + mempool at `PrepareProposal` time. + ([\#980](https://github.com/cometbft/cometbft/pull/980)) +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) +- `[state]` Make logging `block_app_hash` and `app_hash` consistent by logging them both as hex. + ([\#1264](https://github.com/cometbft/cometbft/pull/1264)) +- `[crypto/merkle]` Improve HashAlternatives performance + ([\#6443](https://github.com/tendermint/tendermint/pull/6443)) +- `[p2p/pex]` Improve addrBook.hash performance + ([\#6509](https://github.com/tendermint/tendermint/pull/6509)) +- `[crypto/merkle]` Improve HashAlternatives performance + ([\#6513](https://github.com/tendermint/tendermint/pull/6513)) +- `[pubsub]` Performance improvements for the event query API + ([\#7319](https://github.com/tendermint/tendermint/pull/7319)) + ## v0.37.0 *March 6, 2023* @@ -38,6 +561,11 @@ See below for more details. ([\#230](https://github.com/cometbft/cometbft/pull/230)) - Bump minimum Go version to 1.20 ([\#385](https://github.com/cometbft/cometbft/issues/385)) +- [config] The boolean key `fastsync` is deprecated and replaced by + `block_sync`. ([\#9259](https://github.com/tendermint/tendermint/pull/9259)) + At the same time, `block_sync` is also deprecated. In the next release, + BlocSync will always be enabled and `block_sync` will be removed. + ([\#409](https://github.com/cometbft/cometbft/issues/409)) - `[abci]` Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols ([\#5783](https://github.com/tendermint/tendermint/pull/5783)) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index c25964180e6..3f93f1e5e83 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,13 +1,13 @@ # The CometBFT Code of Conduct -This code of conduct applies to all projects run by the CometBFT/Cosmos team and +This code of conduct applies to all projects run by the CometBFT team and hence to CometBFT. ---- # Conduct -## Contact: conduct@interchain.io +## Contact: conduct@informal.systems * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and @@ -35,7 +35,7 @@ hence to CometBFT. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community - member, please contact one of the channel admins or the person mentioned above + member, please get in touch with one of the channel admins or the contact address above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index cf997137572..55931c348d0 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,6 +1,6 @@ # Use a build arg to ensure that both stages use the same, # hopefully current, go version. -ARG GOLANG_BASE_IMAGE=golang:1.20-alpine +ARG GOLANG_BASE_IMAGE=golang:1.22-alpine # stage 1 Generate CometBFT Binary FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder diff --git a/DOCKER/README.md b/DOCKER/README.md index 21f3dd2000b..33a6bd1021a 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -20,9 +20,9 @@ Respective versioned files can be found at `https://raw.githubusercontent.com/co CometBFT is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines. -For more background, see the [the docs](https://docs.cometbft.com/main/introduction/#quick-start). +For more background, see the [the docs](https://docs.cometbft.com/v0.38.x/introduction/#quick-start). -To get started developing applications, see the [application developers guide](https://docs.cometbft.com/main/introduction/quick-start.html). +To get started developing applications, see the [application developers guide](https://docs.cometbft.com/v0.38.x/introduction/quick-start.html). ## How to use this image @@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/cometbft" cometbft/cometbft node --proxy_app=kvsto ## Local cluster -To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/cometbft/cometbft/blob/main/Makefile) and run: +To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/cometbft/cometbft/blob/v0.38.x/Makefile) and run: ```sh make build-linux @@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here. ## License -- CometBFT's license is [Apache 2.0](https://github.com/cometbft/cometbft/blob/main/LICENSE). +- CometBFT's license is [Apache 2.0](https://github.com/cometbft/cometbft/blob/v0.38.x/LICENSE). ## Contributing -Contributions are most welcome! See the [contributing file](https://github.com/cometbft/cometbft/blob/main/CONTRIBUTING.md) for more information. +Contributions are most welcome! See the [contributing file](https://github.com/cometbft/cometbft/blob/v0.38.x/CONTRIBUTING.md) for more information. diff --git a/Makefile b/Makefile index 0d63f4a8dc4..b50ccba40b5 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,8 @@ ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM))) GOARCH=riscv64 endif -all: check build test install +#? all: Run target build, test and install +all: build test install .PHONY: all include tests.mk @@ -77,10 +78,12 @@ include tests.mk ### Build CometBFT ### ############################################################################### +#? build: Build CometBFT build: CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/cometbft/ .PHONY: build +#? install: Install CometBFT to GOBIN install: CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/cometbft .PHONY: install @@ -89,6 +92,7 @@ install: ### Metrics ### ############################################################################### +#? metrics: Generate metrics metrics: testdata-metrics go generate -run="scripts/metricsgen" ./... .PHONY: metrics @@ -96,6 +100,7 @@ metrics: testdata-metrics # By convention, the go tool ignores subdirectories of directories named # 'testdata'. This command invokes the generate command on the folder directly # to avoid this. +#? testdata-metrics: Generate test data for metrics testdata-metrics: ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{} .PHONY: testdata-metrics @@ -104,6 +109,7 @@ testdata-metrics: ### Mocks ### ############################################################################### +#? mockery: Generate test mocks mockery: go generate -run="./scripts/mockery_generate.sh" ./... .PHONY: mockery @@ -112,57 +118,65 @@ mockery: ### Protobuf ### ############################################################################### +#? check-proto-deps: Check protobuf deps check-proto-deps: ifeq (,$(shell which protoc-gen-gogofaster)) @go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest endif .PHONY: check-proto-deps +#? check-proto-format-deps: Check protobuf format deps check-proto-format-deps: ifeq (,$(shell which clang-format)) $(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.") endif .PHONY: check-proto-format-deps +#? proto-gen: Generate protobuf files proto-gen: check-proto-deps @echo "Generating Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf generate + @go run github.com/bufbuild/buf/cmd/buf@latest generate @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ @cp ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc .PHONY: proto-gen # These targets are provided for convenience and are intended for local # execution only. +#? proto-lint: Lint protobuf files proto-lint: check-proto-deps @echo "Linting Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf lint + @go run github.com/bufbuild/buf/cmd/buf@latest lint .PHONY: proto-lint +#? proto-format: Format protobuf files proto-format: check-proto-format-deps @echo "Formatting Protobuf files" @find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \; .PHONY: proto-format +#? proto-check-breaking: Check for breaking changes in Protobuf files against local branch. This is only useful if your changes have not yet been committed proto-check-breaking: check-proto-deps @echo "Checking for breaking changes in Protobuf files against local branch" @echo "Note: This is only useful if your changes have not yet been committed." @echo " Otherwise read up on buf's \"breaking\" command usage:" @echo " https://docs.buf.build/breaking/usage" - @go run github.com/bufbuild/buf/cmd/buf breaking --against ".git" + @go run github.com/bufbuild/buf/cmd/buf@latest breaking --against ".git" .PHONY: proto-check-breaking proto-check-breaking-ci: - @go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x + @go run github.com/bufbuild/buf/cmd/buf@latest breaking --against $(HTTPS_GIT)#branch=v0.34.x .PHONY: proto-check-breaking-ci ############################################################################### ### Build ABCI ### ############################################################################### +#? build_abci: Build abci build_abci: @go build -mod=readonly -i ./abci/cmd/... .PHONY: build_abci +#? install_abci: Install abci install_abci: @go install -mod=readonly ./abci/cmd/... .PHONY: install_abci @@ -173,20 +187,24 @@ install_abci: # dist builds binaries for all platforms and packages them for distribution # TODO add abci to these scripts +#? dist: Build binaries for all platforms and package them for distribution dist: @BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'" .PHONY: dist +#? go-mod-cache: Download go modules to local cache go-mod-cache: go.sum @echo "--> Download go modules to local cache" @go mod download .PHONY: go-mod-cache +#? go.sum: Ensure dependencies have not been modified go.sum: go.mod @echo "--> Ensure dependencies have not been modified" @go mod verify @go mod tidy +#? draw_deps: Generate deps graph draw_deps: @# requires brew install graphviz or apt-get install graphviz go get github.com/RobotsAndPencils/goviz @@ -204,7 +222,7 @@ get_deps_bin_size: ### Libs ### ############################################################################### -# generates certificates for TLS testing in remotedb and RPC server +#? gen_certs: Generate certificates for TLS testing in remotedb and RPC server gen_certs: clean_certs certstrap init --common-name "cometbft.com" --passphrase "" certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase "" @@ -214,7 +232,7 @@ gen_certs: clean_certs rm -rf out .PHONY: gen_certs -# deletes generated certificates +#? clean_certs: Delete generated certificates clean_certs: rm -f rpc/jsonrpc/server/test.crt rm -f rpc/jsonrpc/server/test.key @@ -229,15 +247,35 @@ format: find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/cometbft/cometbft .PHONY: format +#? lint: Run latest golangci-lint linter lint: @echo "--> Running linter" - @go run github.com/golangci/golangci-lint/cmd/golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run .PHONY: lint +# https://github.com/cometbft/cometbft/pull/1925#issuecomment-1875127862 +# Revisit using lint-format after CometBFT v1 release and/or after 2024-06-01. +#lint-format: +# @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run --fix +# @go run mvdan.cc/gofumpt -l -w ./.. +#.PHONY: lint-format + +#? vulncheck: Run latest govulncheck vulncheck: @go run golang.org/x/vuln/cmd/govulncheck@latest ./... .PHONY: vulncheck +#? lint-typo: Run codespell to check typos +lint-typo: + which codespell || pip3 install codespell + @codespell +.PHONY: lint-typo + +#? lint-typo: Run codespell to auto fix typos +lint-fix-typo: + @codespell -w +.PHONY: lint-fix-typo + DESTINATION = ./index.html.md @@ -245,7 +283,7 @@ DESTINATION = ./index.html.md ### Documentation ### ############################################################################### -# Verify that important design docs have ToC entries. +#? check-docs-toc: Verify that important design docs have ToC entries. check-docs-toc: @./docs/presubmit.sh .PHONY: check-docs-toc @@ -256,6 +294,7 @@ check-docs-toc: # On Linux, you may need to run `DOCKER_BUILDKIT=1 make build-docker` for this # to work. +#? build-docker: Build docker image cometbft/cometbft build-docker: docker build \ --label=cometbft \ @@ -267,11 +306,12 @@ build-docker: ### Local testnet using docker ### ############################################################################### -# Build linux binary on other platforms +#? build-linux: Build linux binary on other platforms build-linux: GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build .PHONY: build-linux +#? build-docker-localnode: Build the "localnode" docker image build-docker-localnode: @cd networks/local && make .PHONY: build-docker-localnode @@ -284,18 +324,18 @@ build_c-amazonlinux: docker run --rm -it -v `pwd`:/cometbft cometbft/cometbft:build_c-amazonlinux .PHONY: build_c-amazonlinux -# Run a 4-node testnet locally +#? localnet-start: Run a 4-node testnet locally localnet-start: localnet-stop build-docker-localnode @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/cometbft:Z cometbft/localnode testnet --config /etc/cometbft/config-template.toml --o . --starting-ip-address 192.167.10.2; fi - docker-compose up + docker compose up -d .PHONY: localnet-start -# Stop testnet +#? localnet-stop: Stop testnet localnet-stop: - docker-compose down + docker compose down .PHONY: localnet-stop -# Build hooks for dredd, to skip or add information on some steps +#? build-contract-tests-hooks: Build hooks for dredd, to skip or add information on some steps build-contract-tests-hooks: ifeq ($(OS),Windows_NT) go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests @@ -304,7 +344,7 @@ else endif .PHONY: build-contract-tests-hooks -# Run a nodejs tool to test endpoints against a localnet +#? contract-tests: Run a nodejs tool to test endpoints against a localnet # The command takes care of starting and stopping the network # prerequisits: build-contract-tests-hooks build-linux # the two build commands were not added to let this command run from generic containers or machines. @@ -334,3 +374,9 @@ split-test-packages:$(BUILDDIR)/packages.txt split -d -n l/$(NUM_SPLIT) $< $<. test-group-%:split-test-packages cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out + +#? help: Get more info on make commands. +help: Makefile + @echo " Choose a command run in comebft:" + @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' +.PHONY: help diff --git a/README.md b/README.md index 2d39be1faa3..ca1b4462ad3 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,12 @@ [![License][license-badge]][license-url] [![Sourcegraph][sg-badge]][sg-url] -| Branch | Tests | Linting | -|---------|------------------------------------------|---------------------------------------| -| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] | -| v0.37.x | [![Tests][tests-badge-v037x]][tests-url] | [![Lint][lint-badge-v037x]][lint-url] | -| v0.34.x | [![Tests][tests-badge-v034x]][tests-url] | [![Lint][lint-badge-v034x]][lint-url] | +| Branch | Tests | Linting | +|---------|------------------------------------------------|---------------------------------------------| +| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] | +| v0.38.x | [![Tests][tests-badge-v038x]][tests-url-v038x] | [![Lint][lint-badge-v038x]][lint-url-v038x] | +| v0.37.x | [![Tests][tests-badge-v037x]][tests-url-v037x] | [![Lint][lint-badge-v037x]][lint-url-v037x] | +| v0.34.x | [![Tests][tests-badge-v034x]][tests-url-v034x] | [![Lint][lint-badge-v034x]][lint-url-v034x] | CometBFT is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely @@ -39,14 +40,15 @@ Complete documentation can be found on the Please do not depend on `main` as your production branch. Use [releases](https://github.com/cometbft/cometbft/releases) instead. -We haven't released v1.0 yet -since we are making breaking changes to the protocol and the APIs. See below for -more details about [versioning](#versioning). +If you intend to run CometBFT in production, we're happy to help. To contact +us, in order of preference: -In any case, if you intend to run CometBFT in production, we're happy to help. - -To contact us, you can also -[join the chat](https://discord.com/channels/669268347736686612/669283915743232011). +- [Create a new discussion on + GitHub](https://github.com/cometbft/cometbft/discussions) +- Reach out to us via [Telegram](https://t.me/CometBFT) +- [Join the Cosmos Network Discord](https://discord.gg/interchain) and + discuss in + [`#cometbft`](https://discord.com/channels/669268347736686612/1069933855307472906) More on how releases are conducted can be found [here](./RELEASES.md). @@ -60,9 +62,10 @@ looking for, see [our security policy](SECURITY.md). | CometBFT version | Requirement | Notes | |------------------|-------------|-------------------| -| main | Go version | Go 1.20 or higher | -| v0.37.x | Go version | Go 1.20 or higher | -| v0.34.x | Go version | Go 1.19 or higher | +| main | Go version | Go 1.22 or higher | +| v0.38.x | Go version | Go 1.22 or higher | +| v0.37.x | Go version | Go 1.22 or higher | +| v0.34.x | Go version | Go 1.12 or higher | ### Install @@ -120,6 +123,8 @@ CometBFT up-to-date. Upgrading instructions can be found in Currently supported versions include: +- v0.38.x: CometBFT v0.38 introduces ABCI 2.0, which implements the entirety of + ABCI++ - v0.37.x: CometBFT v0.37 introduces ABCI 1.0, which is the first major step towards the full ABCI++ implementation in ABCI 2.0 - v0.34.x: The CometBFT v0.34 series is compatible with the Tendermint Core @@ -145,7 +150,7 @@ Currently supported versions include: ### Research Below are links to the original Tendermint consensus algorithm and relevant -whitepapers which CosmosBFT will continue to build on. +whitepapers which CometBFT will continue to build on. - [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938) - [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) @@ -168,20 +173,28 @@ maintains [cometbft.com](https://cometbft.com). [version-url]: https://github.com/cometbft/cometbft/releases/latest [api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 [api-url]: https://pkg.go.dev/github.com/cometbft/cometbft -[go-badge]: https://img.shields.io/badge/go-1.20-blue.svg +[go-badge]: https://img.shields.io/badge/go-1.22-blue.svg [go-url]: https://github.com/moovweb/gvm [discord-badge]: https://img.shields.io/discord/669268347736686612.svg -[discord-url]: https://discord.gg/cosmosnetwork +[discord-url]: https://discord.gg/interchain [license-badge]: https://img.shields.io/github/license/cometbft/cometbft.svg [license-url]: https://github.com/cometbft/cometbft/blob/main/LICENSE [sg-badge]: https://sourcegraph.com/github.com/cometbft/cometbft/-/badge.svg [sg-url]: https://sourcegraph.com/github.com/cometbft/cometbft?badge [tests-url]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml +[tests-url-v038x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.38.x +[tests-url-v037x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.37.x +[tests-url-v034x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.34.x [tests-badge]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=main +[tests-badge-v038x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.38.x [tests-badge-v037x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.37.x [tests-badge-v034x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.34.x [lint-badge]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=main [lint-badge-v034x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.34.x [lint-badge-v037x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.37.x +[lint-badge-v038x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.38.x [lint-url]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml +[lint-url-v034x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.34.x +[lint-url-v037x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.37.x +[lint-url-v038x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.38.x [tm-core]: https://github.com/tendermint/tendermint diff --git a/RELEASES.md b/RELEASES.md index 0122b113fe6..a4ca108a993 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -93,6 +93,15 @@ the 0.38.x line. Be sure to merge this PR before making other changes on the newly-created backport branch. +5. Ensure that the RPC docs' `version` field in `rpc/openapi/openapi.yaml` has + been updated from `main` to the backport branch version. + +6. Prepare the [CometBFT documentation + repository](https://github.com/cometbft/cometbft-docs) to build the release + branch's version by updating the + [VERSIONS](https://github.com/cometbft/cometbft-docs/blob/main/VERSIONS) + file. + After doing these steps, go back to `main` and do the following: 1. Create a new workflow to run e2e nightlies for the new backport branch. (See @@ -148,7 +157,7 @@ backport branch (see above). Otherwise: 1. Start from the backport branch (e.g. `v0.38.x`). 2. Run the integration tests and the E2E nightlies (which can be triggered from the GitHub UI; - e.g., ). + e.g., ). 3. Prepare the pre-release documentation: * Build the changelog with [unclog] _without_ doing an unclog release, and commit the built changelog. This ensures that all changelog entries appear @@ -316,8 +325,8 @@ and limitation that real-world deployments of CometBFT experience in production #### 200 Node Testnet To test the stability and performance of CometBFT in a real world scenario, -a 200 node test network is run. The network comprises 5 seed nodes, 100 -validators and 95 non-validating full nodes. All nodes begin by dialing +a 200 node test network is run. The network comprises 5 seed nodes, 175 +validators and 20 non-validating full nodes. All nodes begin by dialing a subset of the seed nodes to discover peers. The network is run for several days, with metrics being collected continuously. In cases of changes to performance critical systems, testnets of larger sizes should be considered. @@ -333,6 +342,20 @@ to blocksync to the head of the chain and begins producing blocks using consensus it is stopped. Once stopped, a new node is started and takes its place. This network is run for several days. +#### Vote-extension Testnet + +CometBFT v0.38.0 introduced **vote-extensions**, which are added as the name suggests, to precommit votes sent by validators. +The Vote-extension Testnet is used to determine how vote-extensions affect the performance of CometBFT, under various settings. +The application used in the experiment is the same used on the (#200-node-testnet), but is configured differently to gauge de effects of varying vote extension sizes. +In the (#200-node-testnet) the application extends pre-commit votes with a 64 bit number encoded with variable compression. +In the Vote-extension Testnet, pre-commit votes are extended with a non-compressed extension of configurable size. +Experiments are run with multiple sizes to determine their impact and, for comparison sake, we include a run with the same settings as in the (#200-node-testnet). + +The testnet consists of 175 validators, 20 non-validator full-nodes, and 5 seed nodes. +All 195 full-nodes begin by dialing a subset of the seed nodes to discover peers. +Once all full-nodes are started, a 5 minute period is waited before starting an experiment. +For each experiment, the load generators issue requests at a constant rate during 150 seconds, then wait for 5 minutes to allow the system to quiesce, then repeat the load generation; the load generation step is repeated 5 times for each experiment. + #### Network Partition Testnet CometBFT is expected to recover from network partitions. A partition where no diff --git a/SECURITY.md b/SECURITY.md index 01b989c6b1f..2a5c5666415 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,208 +1,33 @@ -# Security +# How to Report a Security Bug -## Reporting a Bug +If you believe you have found a security vulnerability in the Interchain Stack, +you can report it to our primary vulnerability disclosure channel, the [Cosmos +HackerOne Bug Bounty program][h1]. -As part of our Coordinated Vulnerability Disclosure Policy (link will be added -once this policy is finalized for CometBFT), we operate a [bug -bounty][hackerone]. See the policy for more details on submissions and rewards, -and see "Example Vulnerabilities" (below) for examples of the kinds of bugs -we're most interested in. +If you prefer to report an issue via email, you may send a bug report to + with the issue details, reproduction, impact, and other +information. Please submit only one unique email thread per vulnerability. Any +issues reported via email are ineligible for bounty rewards. -### Guidelines +Artifacts from an email report are saved at the time the email is triaged. +Please note: our team is not able to monitor dynamic content (e.g. a Google Docs +link that is edited after receipt) throughout the lifecycle of a report. If you +would like to share additional information or modify previous information, +please include it in an additional reply as an additional attachment. -We require that all researchers: +Please **DO NOT** file a public issue in this repository to report a security +vulnerability. -* Use the bug bounty to disclose all vulnerabilities, and avoid posting - vulnerability information in public places, including GitHub Issues, Discord - channels, and Telegram groups -* Make every effort to avoid privacy violations, degradation of user experience, - disruption to production systems (including but not limited to the Cosmos - Hub), and destruction of data -* Keep any information about vulnerabilities that you’ve discovered confidential - between yourself and the CometBFT engineering team until the issue has been - resolved and disclosed -* Avoid posting personally identifiable information, privately or publicly +## Coordinated Vulnerability Disclosure Policy and Safe Harbor -If you follow these guidelines when reporting an issue to us, we commit to: +For the most up-to-date version of the policies that govern vulnerability +disclosure, please consult the [HackerOne program page][h1-policy]. -* Not pursue or support any legal action related to your research on this - vulnerability -* Work with you to understand, resolve and ultimately disclose the issue in a - timely fashion +The policy hosted on HackerOne is the official Coordinated Vulnerability +Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and +infrastructure it supports, and it supersedes previous security policies that +have been used in the past by individual teams and projects with targets in +scope of the program. -## Disclosure Process - -CometBFT uses the following disclosure process: - -1. Once a security report is received, the CometBFT team works to verify the - issue and confirm its severity level using CVSS. -2. The CometBFT team collaborates with the Gaia team to determine the - vulnerability’s potential impact on the Cosmos Hub. -3. Patches are prepared for eligible releases of CometBFT in private - repositories. See “Supported Releases” below for more information on which - releases are considered eligible. -4. If it is determined that a CVE-ID is required, we request a CVE through a CVE - Numbering Authority. -5. We notify the community that a security release is coming, to give users time - to prepare their systems for the update. Notifications can include forum - posts, tweets, and emails to partners and validators. -6. 24 hours following this notification, the fixes are applied publicly and new - releases are issued. -7. Cosmos SDK and Gaia update their CometBFT dependencies to use these releases, - and then themselves issue new releases. -8. Once releases are available for CometBFT, Cosmos SDK and Gaia, we notify the - community, again, through the same channels as above. We also publish a - Security Advisory on GitHub and publish the CVE, as long as neither the - Security Advisory nor the CVE include any information on how to exploit these - vulnerabilities beyond what information is already available in the patch - itself. -9. Once the community is notified, we will pay out any relevant bug bounties to - submitters. -10. One week after the releases go out, we will publish a post with further - details on the vulnerability as well as our response to it. - -This process can take some time. Every effort will be made to handle the bug in -as timely a manner as possible, however it's important that we follow the -process described above to ensure that disclosures are handled consistently and -to keep CometBFT and its downstream dependent projects--including but not -limited to Gaia and the Cosmos Hub--as secure as possible. - -### Example Timeline - -The following is an example timeline for the triage and response. The required -roles and team members are described in parentheses after each task; however, -multiple people can play each role and each person may play multiple roles. - -#### 24+ Hours Before Release Time - -1. Request CVE number (ADMIN) -2. Gather emails and other contact info for validators (COMMS LEAD) -3. Create patches in a private security repo, and ensure that PRs are open - targeting all relevant release branches (CometBFT ENG, CometBFT LEAD) -4. Test fixes on a testnet (CometBFT ENG, COSMOS SDK ENG) -5. Write “Security Advisory” for forum (CometBFT LEAD) - -#### 24 Hours Before Release Time - -1. Post “Security Advisory” pre-notification on forum (CometBFT LEAD) -2. Post Tweet linking to forum post (COMMS LEAD) -3. Announce security advisory/link to post in various other social channels - (Telegram, Discord) (COMMS LEAD) -4. Send emails to validators or other users (PARTNERSHIPS LEAD) - -#### Release Time - -1. Cut CometBFT releases for eligible versions (CometBFT ENG, CometBFT - LEAD) -2. Cut Cosmos SDK release for eligible versions (COSMOS ENG) -3. Cut Gaia release for eligible versions (GAIA ENG) -4. Post “Security releases” on forum (CometBFT LEAD) -5. Post new Tweet linking to forum post (COMMS LEAD) -6. Remind everyone via social channels (Telegram, Discord) that the release is - out (COMMS LEAD) -7. Send emails to validators or other users (COMMS LEAD) -8. Publish Security Advisory and CVE, if CVE has no sensitive information - (ADMIN) - -#### After Release Time - -1. Write forum post with exploit details (CometBFT LEAD) -2. Approve pay-out on HackerOne for submitter (ADMIN) - -#### 7 Days After Release Time - -1. Publish CVE if it has not yet been published (ADMIN) -2. Publish forum post with exploit details (CometBFT ENG, CometBFT LEAD) - -## Supported Releases - -The CometBFT team commits to releasing security patch releases for both -the latest minor release as well for the major/minor release that the Cosmos Hub -is running. - -If you are running older versions of CometBFT, we encourage you to -upgrade at your earliest opportunity so that you can receive security patches -directly from the CometBFT repo. While you are welcome to backport security -patches to older versions for your own use, we will not publish or promote these -backports. - -## Scope - -The full scope of our bug bounty program is outlined on our -[Hacker One program page][hackerone]. Please also note that, in the interest of -the safety of our users and staff, a few things are explicitly excluded from -scope: - -* Any third-party services -* Findings from physical testing, such as office access -* Findings derived from social engineering (e.g., phishing) - -## Example Vulnerabilities - -The following is a list of examples of the kinds of vulnerabilities that we’re -most interested in. It is not exhaustive: there are other kinds of issues we may -also be interested in! - -### Specification - -* Conceptual flaws -* Ambiguities, inconsistencies, or incorrect statements -* Mis-match between specification and implementation of any component - -### Consensus - -Assuming less than 1/3 of the voting power is Byzantine (malicious): - -* Validation of blockchain data structures, including blocks, block parts, - votes, and so on -* Execution of blocks -* Validator set changes -* Proposer round robin -* Two nodes committing conflicting blocks for the same height (safety failure) -* A correct node signing conflicting votes -* A node halting (liveness failure) -* Syncing new and old nodes - -Assuming more than 1/3 the voting power is Byzantine: - -* Attacks that go unpunished (unhandled evidence) - -### Networking - -* Authenticated encryption (MITM, information leakage) -* Eclipse attacks -* Sybil attacks -* Long-range attacks -* Denial-of-Service - -### RPC - -* Write-access to anything besides sending transactions -* Denial-of-Service -* Leakage of secrets - -### Denial-of-Service - -Attacks may come through the P2P network or the RPC layer: - -* Amplification attacks -* Resource abuse -* Deadlocks and race conditions - -### Libraries - -* Serialization -* Reading/Writing files and databases - -### Cryptography - -* Elliptic curves for validator signatures -* Hash algorithms and Merkle trees for block validation -* Authenticated encryption for P2P connections - -### Light Client - -* Core verification -* Bisection/sequential algorithms - -[hackerone]: https://hackerone.com/cosmos +[h1]: https://hackerone.com/cosmos?type=team +[h1-policy]: https://hackerone.com/cosmos?type=team&view_policy=true diff --git a/UPGRADING.md b/UPGRADING.md index cec35029b87..dfec44f8deb 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,40 +2,35 @@ This guide provides instructions for upgrading to specific versions of CometBFT. -## Unreleased +## v0.38.13 + +It is recommended that CometBFT be built with Go v1.22+ since v1.21 is no longer +supported. + +## v0.38.0 + +This release introduces state machine-breaking changes, as well as substantial changes +on the ABCI interface and indexing. It therefore requires a +coordinated upgrade. ### Config Changes -* A new config field, `BootstrapPeers` has been introduced as a means of - adding a list of addresses to the addressbook upon initializing a node. This is an - alternative to `PersistentPeers`. `PersistentPeers` shold be only used for - nodes that you want to keep a constant connection with i.e. sentry nodes * The field `Version` in the mempool section has been removed. The priority mempool (what was called version `v1`) has been removed (see below), thus there is only one implementation of the mempool available (what was called `v0`). -* Config fields `TTLDuration` and `TTLNumBlocks`, which were only used by the priority - mempool, have been removed. +* Config fields `TTLDuration` and `TTLNumBlocks`, which were only used by the + priority mempool, have been removed. ### Mempool Changes * The priority mempool (what was referred in the code as version `v1`) has been removed. There is now only one mempool (what was called version `v0`), that - is, the default implementation as a queue of transactions. + is, the default implementation as a queue of transactions. * In the protobuf message `ResponseCheckTx`, fields `sender`, `priority`, and `mempool_error`, which were only used by the priority mempool, were removed but still kept in the message as "reserved". -## v0.37.0 - -This release introduces state machine-breaking changes, and therefore requires a -coordinated upgrade. - -### Go API - -When upgrading from the v0.34 release series, please note that the Go module has -now changed to `github.com/cometbft/cometbft`. - ### ABCI Changes * The `ABCIVersion` is now `2.0.0`. @@ -43,14 +38,53 @@ now changed to `github.com/cometbft/cometbft`. Applications upgrading to v0.38.0 must implement these methods as described [here](./spec/abci/abci%2B%2B_comet_expected_behavior.md#adapting-existing-applications-that-use-abci) * Removed methods `BeginBlock`, `DeliverTx`, `EndBlock`, and replaced them by - method `FinalizeBlock`. Applications upgrading to v0.38.0 must refactor + method `FinalizeBlock`. Applications upgrading to `v0.38.0` must refactor the logic handling the methods removed to handle `FinalizeBlock`. * The Application's hash (or any data representing the Application's current state) is known by the time `FinalizeBlock` finishes its execution. Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to `ResponseFinalizeBlock`. -* For details, please see the updated [specification](spec/abci/README.md) +* Field `signed_last_block` in structure `VoteInfo` has been replaced by the + more expressive `block_id_flag`. Applications willing to keep the semantics + of `signed_last_block` can now use the following predicate + * `voteInfo.block_id_flag != BlockIDFlagAbsent` +* For further details, please see the updated [specification](spec/abci/README.md) + +## v0.37.0 + +This release introduces state machine-breaking changes, and therefore requires a +coordinated upgrade. + +### Go API + +When upgrading from the v0.34 release series, please note that the Go module has +now changed to `github.com/cometbft/cometbft`. + +### ABCI Changes +* The `ABCIVersion` is now `1.0.0`. +* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details, + please see the [spec](spec/abci/README.md). Applications upgrading to + v0.37.0 must implement these methods, at the very minimum, as described + [here](./spec/abci/abci++_app_requirements.md) +* Deduplicated `ConsensusParams` and `BlockParams`. + In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`. + The definitions in `abci/types.proto` have been removed. + In-process applications should make sure they are not using the deleted + version of those structures. +* In v0.34, messages on the wire used to be length-delimited with `int64` varint + values, which was inconsistent with the `uint64` varint length delimiters used + in the P2P layer. Both now consistently use `uint64` varint length delimiters. +* Added `AbciVersion` to `RequestInfo`. + Applications should check that CometBFT's ABCI version matches the one they expect + in order to ensure compatibility. +* The `SetOption` method has been removed from the ABCI `Client` interface. + The corresponding Protobuf types have been deprecated. +* The `key` and `value` fields in the `EventAttribute` type have been changed + from type `bytes` to `string`. As per the [Protocol Buffers updating + guidelines](https://developers.google.com/protocol-buffers/docs/proto3#updating), + this should have no effect on the wire-level encoding for UTF8-encoded + strings. ### RPC @@ -93,7 +127,7 @@ on instead of `~/.tendermint`. ### Environment variables The environment variable prefixes have now changed from `TM` to `CMT`. For -example, `TMHOME` or `TM_HOME` become `CMTHOME` or `CMT_HOME`. +example, `TMHOME` becomes `CMTHOME`. We have implemented a fallback check in case `TMHOME` is still set and `CMTHOME` is not, but you will start to see a warning message in the logs if the old diff --git a/abci/README.md b/abci/README.md index e83e61d42a8..fbe09878ae6 100644 --- a/abci/README.md +++ b/abci/README.md @@ -18,7 +18,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g A detailed description of the ABCI methods and message types is contained in: -- [The main spec](https://github.com/cometbft/cometbft/blob/main/spec/abci/README.md) +- [The main spec](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/README.md) - [A protobuf file](../proto/tendermint/types/types.proto) - [A Go interface](./types/application.go) diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index cd8f61d3f95..df78ea34710 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -49,7 +49,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client { return cli } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } @@ -87,7 +87,7 @@ func (cli *grpcClient) OnStart() error { RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, + conn, err := grpc.NewClient(cli.addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc), ) @@ -202,7 +202,7 @@ func (cli *grpcClient) Query(ctx context.Context, req *types.RequestQuery) (*typ return cli.client.Query(ctx, types.ToRequestQuery(req).GetQuery(), grpc.WaitForReady(true)) } -func (cli *grpcClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (cli *grpcClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true)) } diff --git a/abci/client/grpc_client_test.go b/abci/client/grpc_client_test.go index ac866d39b64..ff57d782bf8 100644 --- a/abci/client/grpc_client_test.go +++ b/abci/client/grpc_client_test.go @@ -75,6 +75,6 @@ func TestGRPC(t *testing.T) { } } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 6494bdb5db7..18d4d65e736 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -23,6 +23,10 @@ type Client struct { func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + var r0 *types.ResponseApplySnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { @@ -49,6 +53,10 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *types.ResponseCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { @@ -75,6 +83,10 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTxAsync") + } + var r0 *abcicli.ReqRes var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*abcicli.ReqRes, error)); ok { @@ -101,6 +113,10 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) ( func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.ResponseCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) (*types.ResponseCommit, error)); ok { @@ -127,6 +143,10 @@ func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types. func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Echo") + } + var r0 *types.ResponseEcho var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*types.ResponseEcho, error)); ok { @@ -153,6 +173,10 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er func (_m *Client) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -167,6 +191,10 @@ func (_m *Client) Error() error { func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + var r0 *types.ResponseExtendVote var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { @@ -193,6 +221,10 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 *types.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { @@ -219,6 +251,10 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB func (_m *Client) Flush(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Flush") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -233,6 +269,10 @@ func (_m *Client) Flush(_a0 context.Context) error { func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Info") + } + var r0 *types.ResponseInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { @@ -259,6 +299,10 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + var r0 *types.ResponseInitChain var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { @@ -285,6 +329,10 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (* func (_m *Client) IsRunning() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsRunning") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -299,6 +347,10 @@ func (_m *Client) IsRunning() bool { func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + var r0 *types.ResponseListSnapshots var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { @@ -325,6 +377,10 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + var r0 *types.ResponseLoadSnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { @@ -351,6 +407,10 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + var r0 *types.ResponseOfferSnapshot var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { @@ -377,6 +437,10 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap func (_m *Client) OnReset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnReset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -391,6 +455,10 @@ func (_m *Client) OnReset() error { func (_m *Client) OnStart() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnStart") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -410,6 +478,10 @@ func (_m *Client) OnStop() { func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } + var r0 *types.ResponsePrepareProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { @@ -436,6 +508,10 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 *types.ResponseProcessProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { @@ -462,6 +538,10 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcess func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 *types.ResponseQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { @@ -488,6 +568,10 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re func (_m *Client) Quit() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Quit") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -504,6 +588,10 @@ func (_m *Client) Quit() <-chan struct{} { func (_m *Client) Reset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -528,6 +616,10 @@ func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { func (_m *Client) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -542,6 +634,10 @@ func (_m *Client) Start() error { func (_m *Client) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -556,6 +652,10 @@ func (_m *Client) Stop() error { func (_m *Client) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -570,6 +670,10 @@ func (_m *Client) String() string { func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 *types.ResponseVerifyVoteExtension var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { @@ -592,13 +696,12 @@ func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVer return r0, r1 } -type mockConstructorTestingTNewClient interface { +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index ccac4bbab19..47382e31292 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -169,7 +169,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) { return } - var res = &types.Response{} + res := &types.Response{} err := types.ReadMessage(r, res) if err != nil { cli.stopForError(fmt.Errorf("read message: %w", err)) @@ -291,7 +291,7 @@ func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*t return reqRes.Response.GetQuery(), cli.Error() } -func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (cli *socketClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit()) if err != nil { return nil, err diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index ab37588e0df..f4bade22934 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -122,7 +122,8 @@ func TestBulk(t *testing.T) { } func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abcicli.Client) { + service.Service, abcicli.Client, +) { t.Helper() // some port between 20k and 30k @@ -156,7 +157,7 @@ type slowApp struct { types.BaseApplication } -func (slowApp) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (slowApp) CheckTx(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error) { time.Sleep(time.Second) return &types.ResponseCheckTx{}, nil } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 290474993e6..17d9230105d 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -51,7 +51,6 @@ var RootCmd = &cobra.Command{ Short: "the ABCI CLI tool wraps an ABCI client", Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - switch cmd.Use { case "kvstore", "version", "help [command]": return nil @@ -196,6 +195,7 @@ var echoCmd = &cobra.Command{ Args: cobra.ExactArgs(1), RunE: cmdEcho, } + var infoCmd = &cobra.Command{ Use: "info", Short: "get some info about the application", @@ -281,7 +281,6 @@ var testCmd = &cobra.Command{ // Generates new Args array based off of previous call args to maintain flag persistence func persistentArgs(line []byte) []string { - // generate the arguments to run from original os.Args // to maintain flag arguments args := os.Args @@ -308,7 +307,7 @@ func compose(fs []func() error) error { return err } -func cmdTest(cmd *cobra.Command, args []string) error { +func cmdTest(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() return compose( []func() error{ @@ -361,7 +360,7 @@ func cmdTest(cmd *cobra.Command, args []string) error { }) } -func cmdBatch(cmd *cobra.Command, args []string) error { +func cmdBatch(cmd *cobra.Command, _ []string) error { bufReader := bufio.NewReader(os.Stdin) LOOP: for { @@ -387,7 +386,7 @@ LOOP: return nil } -func cmdConsole(cmd *cobra.Command, args []string) error { +func cmdConsole(cmd *cobra.Command, _ []string) error { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) @@ -695,7 +694,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { return nil } -func cmdKVStore(cmd *cobra.Command, args []string) error { +func cmdKVStore(*cobra.Command, []string) error { logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) // Create the application - in memory or persisted to disk @@ -734,7 +733,6 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { //-------------------------------------------------------------------------------- func printResponse(cmd *cobra.Command, args []string, rsps ...response) { - if flagVerbose { fmt.Println(">", cmd.Use, strings.Join(args, " ")) } @@ -745,7 +743,6 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { fmt.Printf("-> code: OK\n") } else { fmt.Printf("-> code: %d\n", rsp.Code) - } if len(rsp.Data) != 0 { diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index e9e38b53c1e..55c13572366 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -8,9 +8,10 @@ The app has no replay protection (other than what the mempool provides). Validator set changes are effected using the following transaction format: ```md -"val:pubkey1!power1,pubkey2!power2,pubkey3!power3" +"val:pubkeytype1!pubkey1!power1,pubkeytype2!pubkey2!power2,pubkeytype3!pubkey3!power3" ``` -where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). +where `pubkeyN` is a base64-encoded 32-byte key, `pubkeytypeN` is a string representing the key type, +and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). To remove a validator from the validator set, set power to `0`. There is no sybil protection against new validators joining. diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 094f3ae1908..8f9575191e0 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -14,7 +14,7 @@ import ( // RandVal creates one random validator, with a key derived // from the input value -func RandVal(i int) types.ValidatorUpdate { +func RandVal() types.ValidatorUpdate { pubkey := cmtrand.Bytes(32) power := cmtrand.Uint16() + 1 v := types.UpdateValidator(pubkey, int64(power), "") @@ -28,7 +28,7 @@ func RandVal(i int) types.ValidatorUpdate { func RandVals(cnt int) []types.ValidatorUpdate { res := make([]types.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { - res[i] = RandVal(i) + res[i] = RandVal() } return res } @@ -75,5 +75,6 @@ func MakeValSetChangeTx(pubkey crypto.PublicKey, power int64) []byte { panic(err) } pubStr := base64.StdEncoding.EncodeToString(pk.Bytes()) - return []byte(fmt.Sprintf("%s%s!%d", ValidatorPrefix, pubStr, power)) + pubTypeStr := pk.Type() + return []byte(fmt.Sprintf("%s%s!%s!%d", ValidatorPrefix, pubTypeStr, pubStr, power)) } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 0f8794893b3..b403d0c9fa3 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -84,7 +84,7 @@ func (app *Application) SetGenBlockEvents() { // begins and let's the application know what Tendermint versions it's interacting with. Based from this information, // Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the // Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data -func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (app *Application) Info(context.Context, *types.RequestInfo) (*types.ResponseInfo, error) { // Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 { validators := app.getValidators() @@ -130,7 +130,8 @@ func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { // If it is a validator update transaction, check that it is correctly formatted if isValidatorTx(req.Tx) { - if _, _, err := parseValidatorTx(req.Tx); err != nil { + if _, _, _, err := parseValidatorTx(req.Tx); err != nil { + //nolint:nilerr return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil } } else if !isValidTx(req.Tx) { @@ -156,10 +157,10 @@ func isValidTx(tx []byte) bool { return false } -// PrepareProposal is called when the node is a proposer. Tendermint stages a set of transactions to the application. As the +// PrepareProposal is called when the node is a proposer. CometBFT stages a set of transactions to the application. As the // KVStore has two accepted formats, `:` and `=`, we modify all instances of `:` with `=` to make it consistent. Note: this is // quite a trivial example of transaction modification. -// NOTE: we assume that Tendermint will never provide more transactions than can fit in a block. +// NOTE: we assume that CometBFT will never provide more transactions than can fit in a block. func (app *Application) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { return &types.ResponsePrepareProposal{Txs: app.formatTxs(ctx, req.Txs)}, nil } @@ -217,11 +218,11 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal respTxs := make([]*types.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { if isValidatorTx(tx) { - pubKey, power, err := parseValidatorTx(tx) + keyType, pubKey, power, err := parseValidatorTx(tx) if err != nil { panic(err) } - app.valUpdates = append(app.valUpdates, types.UpdateValidator(pubKey, power, "")) + app.valUpdates = append(app.valUpdates, types.UpdateValidator(pubKey, power, keyType)) } else { app.stagedTxs = append(app.stagedTxs, tx) } @@ -324,8 +325,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal // Commit is called after FinalizeBlock and after Tendermint state which includes the updates to // AppHash, ConsensusParams and ValidatorSet has occurred. // The KVStore persists the validator updates and the new key values -func (app *Application) Commit(_ context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { - +func (app *Application) Commit(context.Context, *types.RequestCommit) (*types.ResponseCommit, error) { // apply the validator updates to state (note this is really the validator set at h + 2) for _, valUpdate := range app.valUpdates { app.updateValidator(valUpdate) @@ -414,33 +414,33 @@ func isValidatorTx(tx []byte) bool { return strings.HasPrefix(string(tx), ValidatorPrefix) } -func parseValidatorTx(tx []byte) ([]byte, int64, error) { +func parseValidatorTx(tx []byte) (string, []byte, int64, error) { tx = tx[len(ValidatorPrefix):] // get the pubkey and power - pubKeyAndPower := strings.Split(string(tx), "!") - if len(pubKeyAndPower) != 2 { - return nil, 0, fmt.Errorf("expected 'pubkey!power'. Got %v", pubKeyAndPower) + typePubKeyAndPower := strings.Split(string(tx), "!") + if len(typePubKeyAndPower) != 3 { + return "", nil, 0, fmt.Errorf("expected 'pubkeytype!pubkey!power'. Got %v", typePubKeyAndPower) } - pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1] + keyType, pubkeyS, powerS := typePubKeyAndPower[0], typePubKeyAndPower[1], typePubKeyAndPower[2] // decode the pubkey pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { - return nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS) + return "", nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS) } // decode the power power, err := strconv.ParseInt(powerS, 10, 64) if err != nil { - return nil, 0, fmt.Errorf("power (%s) is not an int", powerS) + return "", nil, 0, fmt.Errorf("power (%s) is not an int", powerS) } if power < 0 { - return nil, 0, fmt.Errorf("power can not be less than 0, got %d", power) + return "", nil, 0, fmt.Errorf("power can not be less than 0, got %d", power) } - return pubkey, power, nil + return keyType, pubkey, power, nil } // add, update, or remove a validator diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 9aa4e6f2d3b..60ef73fe1b8 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -133,7 +133,6 @@ func TestPersistentKVStoreInfo(t *testing.T) { resInfo, err = kvstore.Info(ctx, &types.RequestInfo{}) require.NoError(t, err) require.Equal(t, height, resInfo.LastBlockHeight) - } // add a validator, remove a validator, update a validator @@ -200,7 +199,6 @@ func TestValUpdates(t *testing.T) { vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.getValidators() valsEqual(t, vals1, vals2) - } func TestCheckTx(t *testing.T) { @@ -208,7 +206,7 @@ func TestCheckTx(t *testing.T) { defer cancel() kvstore := NewInMemoryApplication() - val := RandVal(1) + val := RandVal() testCases := []struct { expCode uint32 @@ -255,7 +253,8 @@ func makeApplyBlock( kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, - txs ...[]byte) { + txs ...[]byte, +) { // make and apply block height := int64(heightInt) hash := []byte("foo") @@ -270,7 +269,6 @@ func makeApplyBlock( require.NoError(t, err) valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates) - } // order doesn't matter diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 6c0344cf0bb..e0eaefa648e 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -37,7 +37,6 @@ func NewGRPCServer(protoAddr string, app types.Application) service.Service { // OnStart starts the gRPC service. func (s *GRPCServer) OnStart() error { - ln, err := net.Listen(s.proto, s.addr) if err != nil { return err @@ -72,6 +71,6 @@ func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*ty return &types.ResponseEcho{Message: req.Message}, nil } -func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) { +func (app *gRPCApplication) Flush(context.Context, *types.RequestFlush) (*types.ResponseFlush, error) { return &types.ResponseFlush{}, nil } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index fc745211d4d..9c33c8eb7d2 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -68,7 +68,7 @@ func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, return nil } -func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error { +func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, _ []byte) error { res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) for i, tx := range res.Txs { if !bytes.Equal(tx, txExpected[i]) { diff --git a/abci/types/application.go b/abci/types/application.go index 3d3a75b55a6..4ccfd229ebc 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -45,39 +45,39 @@ func NewBaseApplication() *BaseApplication { return &BaseApplication{} } -func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) { +func (BaseApplication) Info(context.Context, *RequestInfo) (*ResponseInfo, error) { return &ResponseInfo{}, nil } -func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { +func (BaseApplication) CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) { return &ResponseCheckTx{Code: CodeTypeOK}, nil } -func (BaseApplication) Commit(_ context.Context, req *RequestCommit) (*ResponseCommit, error) { +func (BaseApplication) Commit(context.Context, *RequestCommit) (*ResponseCommit, error) { return &ResponseCommit{}, nil } -func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) { +func (BaseApplication) Query(context.Context, *RequestQuery) (*ResponseQuery, error) { return &ResponseQuery{Code: CodeTypeOK}, nil } -func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) { +func (BaseApplication) InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) { return &ResponseInitChain{}, nil } -func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { +func (BaseApplication) ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) { return &ResponseListSnapshots{}, nil } -func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { +func (BaseApplication) OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { return &ResponseOfferSnapshot{}, nil } -func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { +func (BaseApplication) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { return &ResponseLoadSnapshotChunk{}, nil } -func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { +func (BaseApplication) ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { return &ResponseApplySnapshotChunk{}, nil } @@ -94,15 +94,15 @@ func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPreparePro return &ResponsePrepareProposal{Txs: txs}, nil } -func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { +func (BaseApplication) ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) { return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil } -func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { +func (BaseApplication) ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) { return &ResponseExtendVote{}, nil } -func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { +func (BaseApplication) VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { return &ResponseVerifyVoteExtension{ Status: ResponseVerifyVoteExtension_ACCEPT, }, nil diff --git a/abci/types/messages.go b/abci/types/messages.go index b081098d0bd..44d2f956838 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -2,6 +2,7 @@ package types import ( "io" + "math" "github.com/cosmos/gogoproto/proto" @@ -9,7 +10,7 @@ import ( ) const ( - maxMsgSize = 104857600 // 100MB + maxMsgSize = math.MaxInt32 // 2GB ) // WriteMessage writes a varint length-delimited protobuf message. diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index b7f0b51ded0..8eefa5568bf 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -18,6 +18,10 @@ type Application struct { func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + var r0 *types.ResponseApplySnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { @@ -44,6 +48,10 @@ func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.Reques func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *types.ResponseCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { @@ -70,6 +78,10 @@ func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) ( func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.ResponseCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) (*types.ResponseCommit, error)); ok { @@ -96,6 +108,10 @@ func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*t func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + var r0 *types.ResponseExtendVote var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { @@ -122,6 +138,10 @@ func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendV func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 *types.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { @@ -148,6 +168,10 @@ func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFina func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Info") + } + var r0 *types.ResponseInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { @@ -174,6 +198,10 @@ func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + var r0 *types.ResponseInitChain var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { @@ -200,6 +228,10 @@ func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChai func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + var r0 *types.ResponseListSnapshots var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { @@ -226,6 +258,10 @@ func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestList func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + var r0 *types.ResponseLoadSnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { @@ -252,6 +288,10 @@ func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.Request func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + var r0 *types.ResponseOfferSnapshot var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { @@ -278,6 +318,10 @@ func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOffe func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } + var r0 *types.ResponsePrepareProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { @@ -304,6 +348,10 @@ func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPr func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 *types.ResponseProcessProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { @@ -330,6 +378,10 @@ func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestPr func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 *types.ResponseQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { @@ -356,6 +408,10 @@ func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*typ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 *types.ResponseVerifyVoteExtension var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { @@ -378,13 +434,12 @@ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.Reque return r0, r1 } -type mockConstructorTestingTNewApplication interface { +// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewApplication(t interface { mock.TestingT Cleanup(func()) -} - -// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewApplication(t mockConstructorTestingTNewApplication) *Application { +}) *Application { mock := &Application{} mock.Mock.Test(t) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 804b0a60fde..5b9860d6b7e 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1296,10 +1296,18 @@ func (m *RequestProcessProposal) GetProposerAddress() []byte { // Extends a vote with application-injected data type RequestExtendVote struct { - // the hash of the block that this vote may be referring to + // the hash of the block that this vote may be referring to Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` // the height of the extended vote Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + // info of the block that this vote may be referring to + Time time.Time `protobuf:"bytes,3,opt,name=time,proto3,stdtime" json:"time"` + Txs [][]byte `protobuf:"bytes,4,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,5,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,6,rep,name=misbehavior,proto3" json:"misbehavior"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the original proposer of the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` } func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } @@ -1349,6 +1357,48 @@ func (m *RequestExtendVote) GetHeight() int64 { return 0 } +func (m *RequestExtendVote) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestExtendVote) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestExtendVote) GetProposedLastCommit() CommitInfo { + if m != nil { + return m.ProposedLastCommit + } + return CommitInfo{} +} + +func (m *RequestExtendVote) GetMisbehavior() []Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestExtendVote) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestExtendVote) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + // Verify the vote extension type RequestVerifyVoteExtension struct { // the hash of the block that this received vote corresponds to @@ -3002,8 +3052,8 @@ type ExecTxResult struct { Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } @@ -3614,205 +3664,205 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3164 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xbb, 0x73, 0x23, 0xc7, - 0xd1, 0xc7, 0xe2, 0x8d, 0xc6, 0x83, 0xcb, 0x21, 0xef, 0x84, 0x83, 0xee, 0x48, 0x6a, 0x55, 0x92, - 0x4e, 0x27, 0x89, 0xd4, 0xc7, 0xfb, 0x4e, 0x8f, 0x3a, 0xe9, 0xfb, 0x0a, 0xc4, 0xe1, 0x3e, 0x90, - 0x47, 0x91, 0xd4, 0x12, 0x3c, 0x95, 0x3e, 0xdb, 0x5a, 0x2d, 0x81, 0x01, 0xb0, 0x3a, 0x00, 0xbb, - 0xda, 0x1d, 0x50, 0xa0, 0x42, 0x3f, 0xaa, 0x5c, 0x8a, 0x54, 0x65, 0x07, 0x0a, 0xac, 0xc0, 0x81, - 0x13, 0xff, 0x05, 0x8e, 0xec, 0xc4, 0x81, 0x02, 0x07, 0x0a, 0x1d, 0xc9, 0x2e, 0x29, 0x53, 0xea, - 0xc0, 0x99, 0xcb, 0x35, 0x8f, 0x7d, 0x01, 0x58, 0x02, 0x3c, 0xa9, 0x5c, 0xe5, 0x2a, 0x67, 0x3b, - 0xbd, 0xdd, 0x3d, 0x33, 0xbd, 0x3d, 0xdd, 0xfd, 0xeb, 0x1d, 0x78, 0x92, 0xe0, 0x61, 0x1b, 0xdb, - 0x03, 0x63, 0x48, 0xb6, 0xf4, 0xd3, 0x96, 0xb1, 0x45, 0xce, 0x2d, 0xec, 0x6c, 0x5a, 0xb6, 0x49, - 0x4c, 0xb4, 0xe4, 0xbf, 0xdc, 0xa4, 0x2f, 0x2b, 0x37, 0x02, 0xdc, 0x2d, 0xfb, 0xdc, 0x22, 0xe6, - 0x96, 0x65, 0x9b, 0x66, 0x87, 0xf3, 0x57, 0xae, 0x4f, 0xbf, 0x7e, 0x84, 0xcf, 0x85, 0xb6, 0x90, - 0x30, 0x9b, 0x65, 0xcb, 0xd2, 0x6d, 0x7d, 0xe0, 0xbe, 0xde, 0x98, 0x7a, 0x7d, 0xa6, 0xf7, 0x8d, - 0xb6, 0x4e, 0x4c, 0x5b, 0x70, 0xac, 0x77, 0x4d, 0xb3, 0xdb, 0xc7, 0x5b, 0x6c, 0x74, 0x3a, 0xea, - 0x6c, 0x11, 0x63, 0x80, 0x1d, 0xa2, 0x0f, 0x2c, 0xc1, 0xb0, 0xda, 0x35, 0xbb, 0x26, 0x7b, 0xdc, - 0xa2, 0x4f, 0x9c, 0xaa, 0xfc, 0x21, 0x07, 0x19, 0x15, 0x7f, 0x38, 0xc2, 0x0e, 0x41, 0xdb, 0x90, - 0xc4, 0xad, 0x9e, 0x59, 0x96, 0x36, 0xa4, 0x9b, 0xf9, 0xed, 0xeb, 0x9b, 0x13, 0x1b, 0xdc, 0x14, - 0x7c, 0xf5, 0x56, 0xcf, 0x6c, 0xc4, 0x54, 0xc6, 0x8b, 0xee, 0x40, 0xaa, 0xd3, 0x1f, 0x39, 0xbd, - 0x72, 0x9c, 0x09, 0xdd, 0x88, 0x12, 0xba, 0x4f, 0x99, 0x1a, 0x31, 0x95, 0x73, 0xd3, 0xa9, 0x8c, - 0x61, 0xc7, 0x2c, 0x27, 0x2e, 0x9e, 0x6a, 0x77, 0xd8, 0x61, 0x53, 0x51, 0x5e, 0xb4, 0x03, 0x60, - 0x0c, 0x0d, 0xa2, 0xb5, 0x7a, 0xba, 0x31, 0x2c, 0xa7, 0x98, 0xe4, 0x53, 0xd1, 0x92, 0x06, 0xa9, - 0x51, 0xc6, 0x46, 0x4c, 0xcd, 0x19, 0xee, 0x80, 0x2e, 0xf7, 0xc3, 0x11, 0xb6, 0xcf, 0xcb, 0xe9, - 0x8b, 0x97, 0xfb, 0x36, 0x65, 0xa2, 0xcb, 0x65, 0xdc, 0xe8, 0x0d, 0xc8, 0xb6, 0x7a, 0xb8, 0xf5, - 0x48, 0x23, 0xe3, 0x72, 0x96, 0x49, 0xae, 0x47, 0x49, 0xd6, 0x28, 0x5f, 0x73, 0xdc, 0x88, 0xa9, - 0x99, 0x16, 0x7f, 0x44, 0xaf, 0x41, 0xba, 0x65, 0x0e, 0x06, 0x06, 0x29, 0xe7, 0x99, 0xec, 0x5a, - 0xa4, 0x2c, 0xe3, 0x6a, 0xc4, 0x54, 0xc1, 0x8f, 0x0e, 0xa0, 0xd4, 0x37, 0x1c, 0xa2, 0x39, 0x43, - 0xdd, 0x72, 0x7a, 0x26, 0x71, 0xca, 0x05, 0xa6, 0xe1, 0x99, 0x28, 0x0d, 0xfb, 0x86, 0x43, 0x8e, - 0x5d, 0xe6, 0x46, 0x4c, 0x2d, 0xf6, 0x83, 0x04, 0xaa, 0xcf, 0xec, 0x74, 0xb0, 0xed, 0x29, 0x2c, - 0x17, 0x2f, 0xd6, 0x77, 0x48, 0xb9, 0x5d, 0x79, 0xaa, 0xcf, 0x0c, 0x12, 0xd0, 0x0f, 0x60, 0xa5, - 0x6f, 0xea, 0x6d, 0x4f, 0x9d, 0xd6, 0xea, 0x8d, 0x86, 0x8f, 0xca, 0x25, 0xa6, 0xf4, 0xf9, 0xc8, - 0x45, 0x9a, 0x7a, 0xdb, 0x55, 0x51, 0xa3, 0x02, 0x8d, 0x98, 0xba, 0xdc, 0x9f, 0x24, 0xa2, 0xf7, - 0x60, 0x55, 0xb7, 0xac, 0xfe, 0xf9, 0xa4, 0xf6, 0x25, 0xa6, 0xfd, 0x56, 0x94, 0xf6, 0x2a, 0x95, - 0x99, 0x54, 0x8f, 0xf4, 0x29, 0x2a, 0x6a, 0x82, 0x6c, 0xd9, 0xd8, 0xd2, 0x6d, 0xac, 0x59, 0xb6, - 0x69, 0x99, 0x8e, 0xde, 0x2f, 0xcb, 0x4c, 0xf7, 0x73, 0x51, 0xba, 0x8f, 0x38, 0xff, 0x91, 0x60, - 0x6f, 0xc4, 0xd4, 0x25, 0x2b, 0x4c, 0xe2, 0x5a, 0xcd, 0x16, 0x76, 0x1c, 0x5f, 0xeb, 0xf2, 0x3c, - 0xad, 0x8c, 0x3f, 0xac, 0x35, 0x44, 0x42, 0x75, 0xc8, 0xe3, 0x31, 0x15, 0xd7, 0xce, 0x4c, 0x82, - 0xcb, 0x88, 0x29, 0x54, 0x22, 0x4f, 0x28, 0x63, 0x7d, 0x68, 0x12, 0xdc, 0x88, 0xa9, 0x80, 0xbd, - 0x11, 0xd2, 0xe1, 0xca, 0x19, 0xb6, 0x8d, 0xce, 0x39, 0x53, 0xa3, 0xb1, 0x37, 0x8e, 0x61, 0x0e, - 0xcb, 0x2b, 0x4c, 0xe1, 0x0b, 0x51, 0x0a, 0x1f, 0x32, 0x21, 0xaa, 0xa2, 0xee, 0x8a, 0x34, 0x62, - 0xea, 0xca, 0xd9, 0x34, 0x99, 0xba, 0x58, 0xc7, 0x18, 0xea, 0x7d, 0xe3, 0x63, 0xac, 0x9d, 0xf6, - 0xcd, 0xd6, 0xa3, 0xf2, 0xea, 0xc5, 0x2e, 0x76, 0x5f, 0x70, 0xef, 0x50, 0x66, 0xea, 0x62, 0x9d, - 0x20, 0x61, 0x27, 0x03, 0xa9, 0x33, 0xbd, 0x3f, 0xc2, 0x7b, 0xc9, 0x6c, 0x52, 0x4e, 0xed, 0x25, - 0xb3, 0x19, 0x39, 0xbb, 0x97, 0xcc, 0xe6, 0x64, 0xd8, 0x4b, 0x66, 0x41, 0xce, 0x2b, 0xcf, 0x41, - 0x3e, 0x10, 0x98, 0x50, 0x19, 0x32, 0x03, 0xec, 0x38, 0x7a, 0x17, 0xb3, 0x38, 0x96, 0x53, 0xdd, - 0xa1, 0x52, 0x82, 0x42, 0x30, 0x18, 0x29, 0x9f, 0x4a, 0x9e, 0x24, 0x8d, 0x33, 0x54, 0xf2, 0x0c, - 0xdb, 0xcc, 0x1c, 0x42, 0x52, 0x0c, 0xd1, 0xd3, 0x50, 0x64, 0x5b, 0xd1, 0xdc, 0xf7, 0x34, 0xd8, - 0x25, 0xd5, 0x02, 0x23, 0x3e, 0x14, 0x4c, 0xeb, 0x90, 0xb7, 0xb6, 0x2d, 0x8f, 0x25, 0xc1, 0x58, - 0xc0, 0xda, 0xb6, 0x5c, 0x86, 0xa7, 0xa0, 0x40, 0xf7, 0xed, 0x71, 0x24, 0xd9, 0x24, 0x79, 0x4a, - 0x13, 0x2c, 0xca, 0x9f, 0xe2, 0x20, 0x4f, 0x06, 0x30, 0xf4, 0x1a, 0x24, 0x69, 0x2c, 0x17, 0x61, - 0xb9, 0xb2, 0xc9, 0x03, 0xfd, 0xa6, 0x1b, 0xe8, 0x37, 0x9b, 0x6e, 0xa0, 0xdf, 0xc9, 0x7e, 0xf1, - 0xd5, 0x7a, 0xec, 0xd3, 0xbf, 0xac, 0x4b, 0x2a, 0x93, 0x40, 0xd7, 0x68, 0xd8, 0xd2, 0x8d, 0xa1, - 0x66, 0xb4, 0xd9, 0x92, 0x73, 0x34, 0x26, 0xe9, 0xc6, 0x70, 0xb7, 0x8d, 0xf6, 0x41, 0x6e, 0x99, - 0x43, 0x07, 0x0f, 0x9d, 0x91, 0xa3, 0xf1, 0x54, 0x23, 0x82, 0x71, 0x28, 0xa4, 0xf2, 0x84, 0x57, - 0x73, 0x39, 0x8f, 0x18, 0xa3, 0xba, 0xd4, 0x0a, 0x13, 0xd0, 0x7d, 0x00, 0x2f, 0x1f, 0x39, 0xe5, - 0xe4, 0x46, 0xe2, 0x66, 0x7e, 0x7b, 0x63, 0xea, 0x83, 0x3f, 0x74, 0x59, 0x4e, 0xac, 0xb6, 0x4e, - 0xf0, 0x4e, 0x92, 0x2e, 0x57, 0x0d, 0x48, 0xa2, 0x67, 0x61, 0x49, 0xb7, 0x2c, 0xcd, 0x21, 0x3a, - 0xc1, 0xda, 0xe9, 0x39, 0xc1, 0x0e, 0x8b, 0xf3, 0x05, 0xb5, 0xa8, 0x5b, 0xd6, 0x31, 0xa5, 0xee, - 0x50, 0x22, 0x7a, 0x06, 0x4a, 0x34, 0xa6, 0x1b, 0x7a, 0x5f, 0xeb, 0x61, 0xa3, 0xdb, 0x23, 0x2c, - 0x9e, 0x27, 0xd4, 0xa2, 0xa0, 0x36, 0x18, 0x51, 0x69, 0x7b, 0x5f, 0x9c, 0xc5, 0x73, 0x84, 0x20, - 0xd9, 0xd6, 0x89, 0xce, 0x2c, 0x59, 0x50, 0xd9, 0x33, 0xa5, 0x59, 0x3a, 0xe9, 0x09, 0xfb, 0xb0, - 0x67, 0x74, 0x15, 0xd2, 0x42, 0x6d, 0x82, 0xa9, 0x15, 0x23, 0xb4, 0x0a, 0x29, 0xcb, 0x36, 0xcf, - 0x30, 0xfb, 0x74, 0x59, 0x95, 0x0f, 0x14, 0x15, 0x4a, 0xe1, 0xd8, 0x8f, 0x4a, 0x10, 0x27, 0x63, - 0x31, 0x4b, 0x9c, 0x8c, 0xd1, 0xcb, 0x90, 0xa4, 0x86, 0x64, 0x73, 0x94, 0x66, 0x64, 0x3b, 0x21, - 0xd7, 0x3c, 0xb7, 0xb0, 0xca, 0x38, 0x95, 0x25, 0x28, 0x86, 0x72, 0x82, 0x72, 0x15, 0x56, 0x67, - 0x85, 0x78, 0xa5, 0xe7, 0xd1, 0x43, 0xa1, 0x1a, 0xdd, 0x81, 0xac, 0x17, 0xe3, 0xb9, 0xe3, 0x5c, - 0x9b, 0x9a, 0xd6, 0x65, 0x56, 0x3d, 0x56, 0xea, 0x31, 0xf4, 0x03, 0xf4, 0x74, 0x91, 0xd1, 0x0b, - 0x6a, 0x46, 0xb7, 0xac, 0x86, 0xee, 0xf4, 0x94, 0xf7, 0xa1, 0x1c, 0x15, 0xbf, 0x03, 0x06, 0x93, - 0x98, 0xdb, 0xbb, 0x06, 0xbb, 0x0a, 0xe9, 0x8e, 0x69, 0x0f, 0x74, 0xc2, 0x94, 0x15, 0x55, 0x31, - 0xa2, 0x86, 0xe4, 0xb1, 0x3c, 0xc1, 0xc8, 0x7c, 0xa0, 0x68, 0x70, 0x2d, 0x32, 0x86, 0x53, 0x11, - 0x63, 0xd8, 0xc6, 0xdc, 0xac, 0x45, 0x95, 0x0f, 0x7c, 0x45, 0x7c, 0xb1, 0x7c, 0x40, 0xa7, 0x75, - 0xd8, 0x5e, 0x99, 0xfe, 0x9c, 0x2a, 0x46, 0xca, 0x67, 0x09, 0xb8, 0x3a, 0x3b, 0x92, 0xa3, 0x0d, - 0x28, 0x0c, 0xf4, 0xb1, 0x46, 0xc6, 0xc2, 0xed, 0x24, 0xf6, 0xe1, 0x61, 0xa0, 0x8f, 0x9b, 0x63, - 0xee, 0x73, 0x32, 0x24, 0xc8, 0xd8, 0x29, 0xc7, 0x37, 0x12, 0x37, 0x0b, 0x2a, 0x7d, 0x44, 0x27, - 0xb0, 0xdc, 0x37, 0x5b, 0x7a, 0x5f, 0xeb, 0xeb, 0x0e, 0xd1, 0x44, 0x8a, 0xe7, 0x87, 0xe8, 0xe9, - 0x29, 0x63, 0xf3, 0x98, 0x8c, 0xdb, 0xfc, 0x7b, 0xd2, 0x80, 0x23, 0xfc, 0x7f, 0x89, 0xe9, 0xd8, - 0xd7, 0xdd, 0x4f, 0x8d, 0xee, 0x41, 0x7e, 0x60, 0x38, 0xa7, 0xb8, 0xa7, 0x9f, 0x19, 0xa6, 0x2d, - 0x4e, 0xd3, 0xb4, 0xd3, 0xbc, 0xe5, 0xf3, 0x08, 0x4d, 0x41, 0xb1, 0xc0, 0x27, 0x49, 0x85, 0x7c, - 0xd8, 0x8d, 0x26, 0xe9, 0x4b, 0x47, 0x93, 0x97, 0x61, 0x75, 0x88, 0xc7, 0x44, 0xf3, 0xcf, 0x2b, - 0xf7, 0x93, 0x0c, 0x33, 0x3d, 0xa2, 0xef, 0xbc, 0x13, 0xee, 0x50, 0x97, 0x41, 0xcf, 0xb3, 0x5c, - 0x68, 0x99, 0x0e, 0xb6, 0x35, 0xbd, 0xdd, 0xb6, 0xb1, 0xe3, 0xb0, 0xf2, 0xa9, 0xc0, 0x12, 0x1c, - 0xa3, 0x57, 0x39, 0x59, 0xf9, 0x79, 0xf0, 0xd3, 0x84, 0x73, 0x9f, 0x30, 0xbc, 0xe4, 0x1b, 0xfe, - 0x18, 0x56, 0x85, 0x7c, 0x3b, 0x64, 0x7b, 0x5e, 0x83, 0x3e, 0x39, 0x7d, 0xbe, 0x26, 0x6d, 0x8e, - 0x5c, 0xf1, 0x68, 0xb3, 0x27, 0x1e, 0xcf, 0xec, 0x08, 0x92, 0xcc, 0x28, 0x49, 0x1e, 0x62, 0xe8, - 0xf3, 0xbf, 0xdb, 0xa7, 0xf8, 0x5f, 0x58, 0x9e, 0xaa, 0x23, 0xbc, 0x7d, 0x49, 0x33, 0xf7, 0x15, - 0x0f, 0xee, 0x4b, 0xf9, 0x95, 0x04, 0x95, 0xe8, 0xc2, 0x61, 0xa6, 0xaa, 0x17, 0x60, 0xd9, 0xdb, - 0x8b, 0xb7, 0x3e, 0x7e, 0xa6, 0x65, 0xef, 0x85, 0x58, 0x60, 0x64, 0x78, 0x7e, 0x06, 0x4a, 0x13, - 0x65, 0x0d, 0xff, 0x0a, 0xc5, 0xb3, 0xe0, 0xfc, 0xca, 0x4f, 0x13, 0x5e, 0xcc, 0x0c, 0xd5, 0x1e, - 0x33, 0x1c, 0xed, 0x6d, 0x58, 0x69, 0xe3, 0x96, 0xd1, 0x7e, 0x5c, 0x3f, 0x5b, 0x16, 0xd2, 0xff, - 0x71, 0xb3, 0x69, 0x37, 0xfb, 0x25, 0x40, 0x56, 0xc5, 0x8e, 0x45, 0x4b, 0x09, 0xb4, 0x03, 0x39, - 0x3c, 0x6e, 0x61, 0x8b, 0xb8, 0xd5, 0xd7, 0xec, 0xea, 0x96, 0x73, 0xd7, 0x5d, 0x4e, 0x8a, 0xed, - 0x3c, 0x31, 0x74, 0x5b, 0xc0, 0xd7, 0x68, 0x24, 0x2a, 0xc4, 0x83, 0xf8, 0xf5, 0x15, 0x17, 0xbf, - 0x26, 0x22, 0xa1, 0x19, 0x97, 0x9a, 0x00, 0xb0, 0xb7, 0x05, 0x80, 0x4d, 0xce, 0x99, 0x2c, 0x84, - 0x60, 0x6b, 0x21, 0x04, 0x9b, 0x9e, 0xb3, 0xcd, 0x08, 0x08, 0xfb, 0x8a, 0x0b, 0x61, 0x33, 0x73, - 0x56, 0x3c, 0x81, 0x61, 0xdf, 0x0c, 0x60, 0xd8, 0x1c, 0x13, 0xdd, 0x88, 0x14, 0x9d, 0x01, 0x62, - 0x5f, 0xf7, 0x40, 0x6c, 0x21, 0x12, 0x00, 0x0b, 0xe1, 0x49, 0x14, 0x7b, 0x38, 0x85, 0x62, 0x39, - 0xea, 0x7c, 0x36, 0x52, 0xc5, 0x1c, 0x18, 0x7b, 0x38, 0x05, 0x63, 0x4b, 0x73, 0x14, 0xce, 0xc1, - 0xb1, 0x3f, 0x9c, 0x8d, 0x63, 0xa3, 0x91, 0xa6, 0x58, 0xe6, 0x62, 0x40, 0x56, 0x8b, 0x00, 0xb2, - 0x72, 0x24, 0xe8, 0xe2, 0xea, 0x17, 0x46, 0xb2, 0x27, 0x33, 0x90, 0x2c, 0xc7, 0x9c, 0x37, 0x23, - 0x95, 0x2f, 0x00, 0x65, 0x4f, 0x66, 0x40, 0x59, 0x34, 0x57, 0xed, 0x5c, 0x2c, 0x7b, 0x3f, 0x8c, - 0x65, 0x57, 0x22, 0x0a, 0x26, 0xff, 0xb4, 0x47, 0x80, 0xd9, 0xd3, 0x28, 0x30, 0xcb, 0x01, 0xe7, - 0x8b, 0x91, 0x1a, 0x2f, 0x81, 0x66, 0x0f, 0xa7, 0xd0, 0xec, 0x95, 0x39, 0x9e, 0xb6, 0x38, 0x9c, - 0x4d, 0xc9, 0xe9, 0xbd, 0x64, 0x36, 0x2b, 0xe7, 0x38, 0x90, 0xdd, 0x4b, 0x66, 0xf3, 0x72, 0x41, - 0x79, 0x9e, 0x66, 0xdf, 0x89, 0x38, 0x47, 0xcb, 0x5c, 0x6c, 0xdb, 0xa6, 0x2d, 0x80, 0x29, 0x1f, - 0x28, 0x37, 0x29, 0xbc, 0xf1, 0x63, 0xda, 0x05, 0xd0, 0x97, 0xc1, 0x89, 0x40, 0x1c, 0x53, 0x7e, - 0x27, 0xf9, 0xb2, 0x0c, 0xfc, 0x06, 0xa1, 0x51, 0x4e, 0x40, 0xa3, 0x00, 0x20, 0x8e, 0x87, 0x01, - 0xf1, 0x3a, 0xe4, 0x29, 0x4c, 0x98, 0xc0, 0xba, 0xba, 0xe5, 0x61, 0xdd, 0x5b, 0xb0, 0xcc, 0x12, - 0x26, 0x87, 0xcd, 0x22, 0x2d, 0x25, 0x59, 0x5a, 0x5a, 0xa2, 0x2f, 0xb8, 0x75, 0x78, 0x7e, 0x7a, - 0x09, 0x56, 0x02, 0xbc, 0x1e, 0xfc, 0xe0, 0xc0, 0x4f, 0xf6, 0xb8, 0xab, 0x02, 0x87, 0xfc, 0x51, - 0xf2, 0x2d, 0xe4, 0x83, 0xe4, 0x59, 0x78, 0x56, 0xfa, 0x9e, 0xf0, 0x6c, 0xfc, 0xb1, 0xf1, 0x6c, - 0x10, 0x4e, 0x25, 0xc2, 0x70, 0xea, 0xef, 0x92, 0xff, 0x4d, 0x3c, 0x74, 0xda, 0x32, 0xdb, 0x58, - 0x00, 0x1c, 0xf6, 0x4c, 0x4b, 0x92, 0xbe, 0xd9, 0x15, 0x30, 0x86, 0x3e, 0x52, 0x2e, 0x2f, 0xf1, - 0xe4, 0x44, 0x5e, 0xf1, 0xb0, 0x11, 0x4f, 0xfc, 0x02, 0x1b, 0xc9, 0x90, 0x78, 0x84, 0x79, 0xa7, - 0xb3, 0xa0, 0xd2, 0x47, 0xca, 0xc7, 0x9c, 0x4f, 0x24, 0x70, 0x3e, 0x40, 0xaf, 0x41, 0x8e, 0xf5, - 0xa9, 0x35, 0xd3, 0x72, 0x44, 0x77, 0x33, 0x54, 0xda, 0xf0, 0x66, 0xf5, 0xe6, 0x11, 0xe5, 0x39, - 0xb4, 0x1c, 0x35, 0x6b, 0x89, 0xa7, 0x40, 0xc5, 0x91, 0x0b, 0x55, 0x1c, 0xd7, 0x21, 0x47, 0x57, - 0xef, 0x58, 0x7a, 0x0b, 0x97, 0x81, 0x2d, 0xd4, 0x27, 0x28, 0xbf, 0x8d, 0xc3, 0xd2, 0x44, 0xa2, - 0x99, 0xb9, 0x77, 0xd7, 0x25, 0xe3, 0x01, 0xb4, 0xbe, 0x98, 0x3d, 0xd6, 0x00, 0xba, 0xba, 0xa3, - 0x7d, 0xa4, 0x0f, 0x09, 0x6e, 0x0b, 0xa3, 0x04, 0x28, 0xa8, 0x02, 0x59, 0x3a, 0x1a, 0x39, 0xb8, - 0x2d, 0x1a, 0x07, 0xde, 0x18, 0x35, 0x20, 0x8d, 0xcf, 0xf0, 0x90, 0x38, 0xe5, 0x0c, 0xfb, 0xec, - 0x57, 0xa7, 0x91, 0x1c, 0x7d, 0xbd, 0x53, 0xa6, 0x1f, 0xfb, 0xdb, 0xaf, 0xd6, 0x65, 0xce, 0xfd, - 0xa2, 0x39, 0x30, 0x08, 0x1e, 0x58, 0xe4, 0x5c, 0x15, 0xf2, 0x61, 0x2b, 0x64, 0x27, 0xac, 0xc0, - 0x5a, 0x58, 0x05, 0x17, 0x99, 0x52, 0x9b, 0x1a, 0xa6, 0x6d, 0x90, 0x73, 0xb5, 0x38, 0xc0, 0x03, - 0xcb, 0x34, 0xfb, 0x1a, 0x3f, 0xe3, 0x55, 0x28, 0x85, 0xf3, 0x2a, 0x7a, 0x1a, 0x8a, 0x36, 0x26, - 0xba, 0x31, 0xd4, 0x42, 0x45, 0x70, 0x81, 0x13, 0xf9, 0x99, 0xda, 0x4b, 0x66, 0x25, 0x39, 0xbe, - 0x97, 0xcc, 0xc6, 0xe5, 0x84, 0x72, 0x04, 0x57, 0x66, 0xe6, 0x55, 0xf4, 0x2a, 0xe4, 0xfc, 0x94, - 0x2c, 0xb1, 0xdd, 0x5e, 0xd0, 0x24, 0xf0, 0x79, 0x95, 0xdf, 0x4b, 0xbe, 0xca, 0x70, 0xdb, 0xa1, - 0x0e, 0x69, 0x1b, 0x3b, 0xa3, 0x3e, 0x6f, 0x04, 0x94, 0xb6, 0x5f, 0x5a, 0x2c, 0x23, 0x53, 0xea, - 0xa8, 0x4f, 0x54, 0x21, 0xac, 0xbc, 0x07, 0x69, 0x4e, 0x41, 0x79, 0xc8, 0x9c, 0x1c, 0x3c, 0x38, - 0x38, 0x7c, 0xe7, 0x40, 0x8e, 0x21, 0x80, 0x74, 0xb5, 0x56, 0xab, 0x1f, 0x35, 0x65, 0x09, 0xe5, - 0x20, 0x55, 0xdd, 0x39, 0x54, 0x9b, 0x72, 0x9c, 0x92, 0xd5, 0xfa, 0x5e, 0xbd, 0xd6, 0x94, 0x13, - 0x68, 0x19, 0x8a, 0xfc, 0x59, 0xbb, 0x7f, 0xa8, 0xbe, 0x55, 0x6d, 0xca, 0xc9, 0x00, 0xe9, 0xb8, - 0x7e, 0x70, 0xaf, 0xae, 0xca, 0x29, 0xe5, 0xbf, 0xe0, 0x5a, 0x64, 0x0e, 0xf7, 0x7b, 0x0a, 0x52, - 0xa0, 0xa7, 0xa0, 0x7c, 0x16, 0xa7, 0xa0, 0x26, 0x2a, 0x31, 0xa3, 0xbd, 0x89, 0x8d, 0x6f, 0x5f, - 0x22, 0xab, 0x4f, 0xec, 0x9e, 0xe2, 0x18, 0x1b, 0x77, 0x30, 0x69, 0xf5, 0x78, 0xa1, 0xc0, 0x23, - 0x50, 0x51, 0x2d, 0x0a, 0x2a, 0x13, 0x72, 0x38, 0xdb, 0x07, 0xb8, 0x45, 0x34, 0xee, 0x44, 0x0e, - 0x03, 0x13, 0x39, 0xca, 0x46, 0xa9, 0xc7, 0x9c, 0xa8, 0xbc, 0x7f, 0x29, 0x5b, 0xe6, 0x20, 0xa5, - 0xd6, 0x9b, 0xea, 0xbb, 0x72, 0x02, 0x21, 0x28, 0xb1, 0x47, 0xed, 0xf8, 0xa0, 0x7a, 0x74, 0xdc, - 0x38, 0xa4, 0xb6, 0x5c, 0x81, 0x25, 0xd7, 0x96, 0x2e, 0x31, 0xa5, 0xbc, 0x00, 0x4f, 0x44, 0x54, - 0x15, 0xd3, 0x90, 0x4a, 0xf9, 0xb5, 0x14, 0xe4, 0x0e, 0x57, 0x06, 0x87, 0x90, 0x76, 0x88, 0x4e, - 0x46, 0x8e, 0x30, 0xe2, 0xab, 0x8b, 0x96, 0x19, 0x9b, 0xee, 0xc3, 0x31, 0x13, 0x57, 0x85, 0x1a, - 0xe5, 0x0e, 0x94, 0xc2, 0x6f, 0xa2, 0x6d, 0xe0, 0x3b, 0x51, 0x5c, 0xb9, 0x0b, 0x68, 0xba, 0xfa, - 0x98, 0x01, 0x2f, 0xa5, 0x59, 0xf0, 0xf2, 0x37, 0x12, 0x3c, 0x79, 0x41, 0xa5, 0x81, 0xde, 0x9e, - 0xd8, 0xe4, 0xeb, 0x97, 0xa9, 0x53, 0x36, 0x39, 0x6d, 0x62, 0x9b, 0xb7, 0xa1, 0x10, 0xa4, 0x2f, - 0xb6, 0xc9, 0x6f, 0xe3, 0xfe, 0x21, 0x0e, 0xe3, 0x60, 0x3f, 0x04, 0x4a, 0xdf, 0x31, 0x04, 0xbe, - 0x01, 0x40, 0xc6, 0x1a, 0x77, 0x6b, 0x37, 0x8f, 0xde, 0x98, 0xd1, 0x1a, 0xc3, 0xad, 0xe6, 0x58, - 0x1c, 0x82, 0x1c, 0x11, 0x4f, 0x0e, 0x3a, 0x0e, 0x36, 0x05, 0x46, 0x2c, 0xc7, 0x3a, 0x02, 0x30, - 0x2f, 0x9a, 0x8c, 0xfd, 0xe6, 0x01, 0x27, 0x3b, 0xe8, 0x5d, 0x78, 0x62, 0xa2, 0x50, 0xf0, 0x54, - 0x27, 0x17, 0xad, 0x17, 0xae, 0x84, 0xeb, 0x05, 0x57, 0x75, 0x30, 0xdb, 0xa7, 0xc2, 0xd9, 0xfe, - 0x5d, 0x00, 0xbf, 0x39, 0x40, 0x23, 0x8c, 0x6d, 0x8e, 0x86, 0x6d, 0xe6, 0x01, 0x29, 0x95, 0x0f, - 0xd0, 0x1d, 0x48, 0x51, 0x4f, 0x72, 0xed, 0x34, 0x1d, 0x8a, 0xa9, 0x27, 0x04, 0x9a, 0x0b, 0x9c, - 0x5b, 0x31, 0x00, 0x4d, 0xf7, 0x16, 0x23, 0xa6, 0x78, 0x33, 0x3c, 0xc5, 0x53, 0x91, 0x5d, 0xca, - 0xd9, 0x53, 0x7d, 0x0c, 0x29, 0xf6, 0xe5, 0x69, 0xd2, 0x65, 0x0d, 0x6d, 0x51, 0x2d, 0xd2, 0x67, - 0xf4, 0x23, 0x00, 0x9d, 0x10, 0xdb, 0x38, 0x1d, 0xf9, 0x13, 0xac, 0xcf, 0xf6, 0x9c, 0xaa, 0xcb, - 0xb7, 0x73, 0x5d, 0xb8, 0xd0, 0xaa, 0x2f, 0x1a, 0x70, 0xa3, 0x80, 0x42, 0xe5, 0x00, 0x4a, 0x61, - 0x59, 0xb7, 0xbe, 0xe1, 0x6b, 0x08, 0xd7, 0x37, 0xbc, 0x5c, 0x15, 0xf5, 0x8d, 0x57, 0x1d, 0x25, - 0x78, 0xd7, 0x9e, 0x0d, 0x94, 0x7f, 0x48, 0x50, 0x08, 0x3a, 0xde, 0xf7, 0x5c, 0x82, 0xdc, 0x98, - 0x51, 0x82, 0xe4, 0xba, 0xba, 0xf3, 0x0e, 0xaf, 0x40, 0xae, 0x4d, 0x55, 0x20, 0x99, 0xae, 0xee, - 0x9c, 0xfc, 0x0b, 0x0b, 0x10, 0xe5, 0x67, 0x12, 0x64, 0xbd, 0xcd, 0x87, 0x1b, 0xf8, 0xa1, 0x3f, - 0x1e, 0xdc, 0x76, 0xf1, 0x60, 0xd7, 0x9d, 0xff, 0xdf, 0x48, 0x78, 0xff, 0x37, 0xee, 0x7a, 0xc9, - 0x2f, 0xaa, 0x1d, 0x12, 0xb4, 0xb4, 0xf0, 0x29, 0x37, 0xd7, 0xdf, 0x85, 0x9c, 0x77, 0x76, 0x29, - 0xe4, 0x70, 0xdb, 0x46, 0x92, 0x38, 0x41, 0xa2, 0xe9, 0xb7, 0x0a, 0x29, 0xcb, 0xfc, 0x48, 0xb4, - 0xf4, 0x13, 0x2a, 0x1f, 0x28, 0x6d, 0x58, 0x9a, 0x38, 0xf8, 0xe8, 0x2e, 0x64, 0xac, 0xd1, 0xa9, - 0xe6, 0xba, 0xc6, 0x44, 0x73, 0xcd, 0x2d, 0x66, 0x47, 0xa7, 0x7d, 0xa3, 0xf5, 0x00, 0x9f, 0xbb, - 0x8b, 0xb1, 0x46, 0xa7, 0x0f, 0xb8, 0x07, 0xf1, 0x59, 0xe2, 0xc1, 0x59, 0x7e, 0x21, 0x41, 0xd6, - 0x3d, 0x11, 0xe8, 0x7f, 0x20, 0xe7, 0x05, 0x15, 0xef, 0x9f, 0x5c, 0x64, 0x34, 0x12, 0xfa, 0x7d, - 0x11, 0x54, 0x75, 0x7f, 0x26, 0x1a, 0x6d, 0xad, 0xd3, 0xd7, 0xb9, 0x27, 0x95, 0xc2, 0x36, 0xe3, - 0x61, 0x87, 0x45, 0xe3, 0xdd, 0x7b, 0xf7, 0xfb, 0x7a, 0x57, 0xcd, 0x33, 0x99, 0xdd, 0x36, 0x1d, - 0x88, 0xba, 0xee, 0x6f, 0x12, 0xc8, 0x93, 0xe7, 0xf5, 0x3b, 0xaf, 0x6e, 0x3a, 0xc9, 0x25, 0x66, - 0x24, 0x39, 0xb4, 0x05, 0x2b, 0x1e, 0x87, 0xe6, 0x18, 0xdd, 0xa1, 0x4e, 0x46, 0x36, 0x16, 0xed, - 0x48, 0xe4, 0xbd, 0x3a, 0x76, 0xdf, 0x4c, 0xef, 0x3a, 0xf5, 0x98, 0xbb, 0xfe, 0x49, 0x1c, 0xf2, - 0x81, 0xe6, 0x28, 0xfa, 0xef, 0x40, 0x28, 0x2a, 0xcd, 0xc8, 0x0b, 0x01, 0x5e, 0xff, 0xff, 0x5a, - 0xd8, 0x4c, 0xf1, 0xcb, 0x9b, 0x29, 0xaa, 0x05, 0xed, 0xf6, 0x5a, 0x93, 0x97, 0xee, 0xb5, 0xbe, - 0x08, 0x88, 0x98, 0x44, 0xef, 0x6b, 0x67, 0x26, 0x31, 0x86, 0x5d, 0x8d, 0xbb, 0x21, 0x0f, 0x1c, - 0x32, 0x7b, 0xf3, 0x90, 0xbd, 0x38, 0x62, 0x1e, 0xf9, 0x63, 0x09, 0xb2, 0x5e, 0xd1, 0x7d, 0xd9, - 0xbf, 0x6f, 0x57, 0x21, 0x2d, 0xea, 0x4a, 0xfe, 0xfb, 0x4d, 0x8c, 0x66, 0x36, 0x95, 0x2b, 0x90, - 0x1d, 0x60, 0xa2, 0xb3, 0x28, 0xc8, 0x73, 0x9a, 0x37, 0xbe, 0xf5, 0x3a, 0xe4, 0x03, 0x7f, 0x2e, - 0x69, 0x60, 0x3c, 0xa8, 0xbf, 0x23, 0xc7, 0x2a, 0x99, 0x4f, 0x3e, 0xdf, 0x48, 0x1c, 0xe0, 0x8f, - 0xe8, 0x69, 0x56, 0xeb, 0xb5, 0x46, 0xbd, 0xf6, 0x40, 0x96, 0x2a, 0xf9, 0x4f, 0x3e, 0xdf, 0xc8, - 0xa8, 0x98, 0xf5, 0x13, 0x6f, 0x3d, 0x80, 0xa5, 0x89, 0x0f, 0x13, 0x2e, 0x5a, 0x10, 0x94, 0xee, - 0x9d, 0x1c, 0xed, 0xef, 0xd6, 0xaa, 0xcd, 0xba, 0xf6, 0xf0, 0xb0, 0x59, 0x97, 0x25, 0xf4, 0x04, - 0xac, 0xec, 0xef, 0xfe, 0x5f, 0xa3, 0xa9, 0xd5, 0xf6, 0x77, 0xeb, 0x07, 0x4d, 0xad, 0xda, 0x6c, - 0x56, 0x6b, 0x0f, 0xe4, 0xf8, 0xf6, 0xe7, 0x79, 0x48, 0x56, 0x77, 0x6a, 0xbb, 0xa8, 0x06, 0x49, - 0xd6, 0x08, 0xb9, 0xf0, 0xea, 0x52, 0xe5, 0xe2, 0xce, 0x30, 0xba, 0x0f, 0x29, 0xd6, 0x23, 0x41, - 0x17, 0xdf, 0x65, 0xaa, 0xcc, 0x69, 0x15, 0xd3, 0xc5, 0xb0, 0x13, 0x79, 0xe1, 0xe5, 0xa6, 0xca, - 0xc5, 0x9d, 0x63, 0xb4, 0x0f, 0x19, 0x17, 0x22, 0xcf, 0xbb, 0x71, 0x54, 0x99, 0xdb, 0xce, 0xa5, - 0x5b, 0xe3, 0xad, 0x86, 0x8b, 0xef, 0x3d, 0x55, 0xe6, 0xf4, 0x94, 0xd1, 0x2e, 0xa4, 0x05, 0x18, - 0x9d, 0x73, 0x95, 0xa9, 0x32, 0xaf, 0x4b, 0x8c, 0x54, 0xc8, 0xf9, 0x4d, 0x9c, 0xf9, 0xb7, 0xb9, - 0x2a, 0x0b, 0xb4, 0xcb, 0xd1, 0x7b, 0x50, 0x0c, 0x03, 0xdd, 0xc5, 0xae, 0x4b, 0x55, 0x16, 0xec, - 0x47, 0x53, 0xfd, 0x61, 0xd4, 0xbb, 0xd8, 0xf5, 0xa9, 0xca, 0x82, 0xed, 0x69, 0xf4, 0x01, 0x2c, - 0x4f, 0xa3, 0xd2, 0xc5, 0x6f, 0x53, 0x55, 0x2e, 0xd1, 0xb0, 0x46, 0x03, 0x40, 0x33, 0xd0, 0xec, - 0x25, 0x2e, 0x57, 0x55, 0x2e, 0xd3, 0xbf, 0x46, 0x6d, 0x58, 0x9a, 0x84, 0x88, 0x8b, 0x5e, 0xb6, - 0xaa, 0x2c, 0xdc, 0xcb, 0xe6, 0xb3, 0x84, 0xa1, 0xe5, 0xa2, 0x97, 0xaf, 0x2a, 0x0b, 0xb7, 0xb6, - 0xd1, 0x09, 0x40, 0x00, 0x1d, 0x2e, 0x70, 0x19, 0xab, 0xb2, 0x48, 0x93, 0x1b, 0x59, 0xb0, 0x32, - 0x0b, 0x36, 0x5e, 0xe6, 0x6e, 0x56, 0xe5, 0x52, 0xbd, 0x6f, 0xea, 0xcf, 0x61, 0x00, 0xb8, 0xd8, - 0x5d, 0xad, 0xca, 0x82, 0x4d, 0xf0, 0x9d, 0xea, 0x17, 0x5f, 0xaf, 0x49, 0x5f, 0x7e, 0xbd, 0x26, - 0xfd, 0xf5, 0xeb, 0x35, 0xe9, 0xd3, 0x6f, 0xd6, 0x62, 0x5f, 0x7e, 0xb3, 0x16, 0xfb, 0xf3, 0x37, - 0x6b, 0xb1, 0xff, 0x7f, 0xae, 0x6b, 0x90, 0xde, 0xe8, 0x74, 0xb3, 0x65, 0x0e, 0xb6, 0x5a, 0xe6, - 0x00, 0x93, 0xd3, 0x0e, 0xf1, 0x1f, 0xfc, 0x2b, 0xb7, 0xa7, 0x69, 0x96, 0x41, 0x6f, 0xff, 0x33, - 0x00, 0x00, 0xff, 0xff, 0x55, 0x7b, 0x1c, 0x1f, 0x92, 0x2b, 0x00, 0x00, + // 3167 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x73, 0x23, 0xd5, + 0xd5, 0x57, 0xeb, 0xad, 0xa3, 0x87, 0xdb, 0xd7, 0x9e, 0x41, 0x23, 0x06, 0xdb, 0x34, 0x05, 0x0c, + 0x03, 0xd8, 0x7c, 0x9e, 0x6f, 0x78, 0xd4, 0xc0, 0x57, 0x25, 0x6b, 0x34, 0x9f, 0xec, 0x31, 0xb6, + 0x69, 0xcb, 0x43, 0x91, 0x07, 0x4d, 0x5b, 0xba, 0xb2, 0x9a, 0x91, 0xd4, 0x4d, 0xf7, 0x95, 0x91, + 0x59, 0xa5, 0x42, 0x52, 0x95, 0x62, 0x45, 0x55, 0xb2, 0x60, 0x11, 0x16, 0x59, 0x64, 0x93, 0xbf, + 0x20, 0xab, 0x64, 0x93, 0x05, 0x8b, 0x2c, 0x58, 0x66, 0x45, 0x52, 0xb0, 0x63, 0x9b, 0x45, 0xb6, + 0xa9, 0xfb, 0xe8, 0x97, 0xa4, 0xb6, 0xa4, 0x81, 0x2c, 0x52, 0xc9, 0xae, 0xef, 0xe9, 0x73, 0xce, + 0xed, 0x7b, 0xee, 0xb9, 0xe7, 0xf1, 0xeb, 0x0b, 0x8f, 0x13, 0x3c, 0x68, 0x63, 0xbb, 0x6f, 0x0c, + 0xc8, 0x96, 0x7e, 0xda, 0x32, 0xb6, 0xc8, 0x85, 0x85, 0x9d, 0x4d, 0xcb, 0x36, 0x89, 0x89, 0x96, + 0xfc, 0x97, 0x9b, 0xf4, 0x65, 0xe5, 0x89, 0x00, 0x77, 0xcb, 0xbe, 0xb0, 0x88, 0xb9, 0x65, 0xd9, + 0xa6, 0xd9, 0xe1, 0xfc, 0x95, 0xeb, 0x93, 0xaf, 0x1f, 0xe2, 0x0b, 0xa1, 0x2d, 0x24, 0xcc, 0x66, + 0xd9, 0xb2, 0x74, 0x5b, 0xef, 0xbb, 0xaf, 0x37, 0x26, 0x5e, 0x9f, 0xeb, 0x3d, 0xa3, 0xad, 0x13, + 0xd3, 0x16, 0x1c, 0xeb, 0x67, 0xa6, 0x79, 0xd6, 0xc3, 0x5b, 0x6c, 0x74, 0x3a, 0xec, 0x6c, 0x11, + 0xa3, 0x8f, 0x1d, 0xa2, 0xf7, 0x2d, 0xc1, 0xb0, 0x7a, 0x66, 0x9e, 0x99, 0xec, 0x71, 0x8b, 0x3e, + 0x71, 0xaa, 0xf2, 0xc7, 0x1c, 0x64, 0x54, 0xfc, 0xc1, 0x10, 0x3b, 0x04, 0x6d, 0x43, 0x12, 0xb7, + 0xba, 0x66, 0x59, 0xda, 0x90, 0x6e, 0xe4, 0xb7, 0xaf, 0x6f, 0x8e, 0x2d, 0x70, 0x53, 0xf0, 0xd5, + 0x5b, 0x5d, 0xb3, 0x11, 0x53, 0x19, 0x2f, 0xba, 0x0d, 0xa9, 0x4e, 0x6f, 0xe8, 0x74, 0xcb, 0x71, + 0x26, 0xf4, 0x44, 0x94, 0xd0, 0x3d, 0xca, 0xd4, 0x88, 0xa9, 0x9c, 0x9b, 0x4e, 0x65, 0x0c, 0x3a, + 0x66, 0x39, 0x71, 0xf9, 0x54, 0xbb, 0x83, 0x0e, 0x9b, 0x8a, 0xf2, 0xa2, 0x1d, 0x00, 0x63, 0x60, + 0x10, 0xad, 0xd5, 0xd5, 0x8d, 0x41, 0x39, 0xc5, 0x24, 0x9f, 0x8c, 0x96, 0x34, 0x48, 0x8d, 0x32, + 0x36, 0x62, 0x6a, 0xce, 0x70, 0x07, 0xf4, 0x73, 0x3f, 0x18, 0x62, 0xfb, 0xa2, 0x9c, 0xbe, 0xfc, + 0x73, 0xdf, 0xa2, 0x4c, 0xf4, 0x73, 0x19, 0x37, 0x7a, 0x1d, 0xb2, 0xad, 0x2e, 0x6e, 0x3d, 0xd4, + 0xc8, 0xa8, 0x9c, 0x65, 0x92, 0xeb, 0x51, 0x92, 0x35, 0xca, 0xd7, 0x1c, 0x35, 0x62, 0x6a, 0xa6, + 0xc5, 0x1f, 0xd1, 0xab, 0x90, 0x6e, 0x99, 0xfd, 0xbe, 0x41, 0xca, 0x79, 0x26, 0xbb, 0x16, 0x29, + 0xcb, 0xb8, 0x1a, 0x31, 0x55, 0xf0, 0xa3, 0x03, 0x28, 0xf5, 0x0c, 0x87, 0x68, 0xce, 0x40, 0xb7, + 0x9c, 0xae, 0x49, 0x9c, 0x72, 0x81, 0x69, 0x78, 0x3a, 0x4a, 0xc3, 0xbe, 0xe1, 0x90, 0x63, 0x97, + 0xb9, 0x11, 0x53, 0x8b, 0xbd, 0x20, 0x81, 0xea, 0x33, 0x3b, 0x1d, 0x6c, 0x7b, 0x0a, 0xcb, 0xc5, + 0xcb, 0xf5, 0x1d, 0x52, 0x6e, 0x57, 0x9e, 0xea, 0x33, 0x83, 0x04, 0xf4, 0x43, 0x58, 0xe9, 0x99, + 0x7a, 0xdb, 0x53, 0xa7, 0xb5, 0xba, 0xc3, 0xc1, 0xc3, 0x72, 0x89, 0x29, 0x7d, 0x2e, 0xf2, 0x23, + 0x4d, 0xbd, 0xed, 0xaa, 0xa8, 0x51, 0x81, 0x46, 0x4c, 0x5d, 0xee, 0x8d, 0x13, 0xd1, 0xbb, 0xb0, + 0xaa, 0x5b, 0x56, 0xef, 0x62, 0x5c, 0xfb, 0x12, 0xd3, 0x7e, 0x33, 0x4a, 0x7b, 0x95, 0xca, 0x8c, + 0xab, 0x47, 0xfa, 0x04, 0x15, 0x35, 0x41, 0xb6, 0x6c, 0x6c, 0xe9, 0x36, 0xd6, 0x2c, 0xdb, 0xb4, + 0x4c, 0x47, 0xef, 0x95, 0x65, 0xa6, 0xfb, 0xd9, 0x28, 0xdd, 0x47, 0x9c, 0xff, 0x48, 0xb0, 0x37, + 0x62, 0xea, 0x92, 0x15, 0x26, 0x71, 0xad, 0x66, 0x0b, 0x3b, 0x8e, 0xaf, 0x75, 0x79, 0x96, 0x56, + 0xc6, 0x1f, 0xd6, 0x1a, 0x22, 0xa1, 0x3a, 0xe4, 0xf1, 0x88, 0x8a, 0x6b, 0xe7, 0x26, 0xc1, 0x65, + 0xc4, 0x14, 0x2a, 0x91, 0x27, 0x94, 0xb1, 0x3e, 0x30, 0x09, 0x6e, 0xc4, 0x54, 0xc0, 0xde, 0x08, + 0xe9, 0x70, 0xe5, 0x1c, 0xdb, 0x46, 0xe7, 0x82, 0xa9, 0xd1, 0xd8, 0x1b, 0xc7, 0x30, 0x07, 0xe5, + 0x15, 0xa6, 0xf0, 0xf9, 0x28, 0x85, 0x0f, 0x98, 0x10, 0x55, 0x51, 0x77, 0x45, 0x1a, 0x31, 0x75, + 0xe5, 0x7c, 0x92, 0x4c, 0x5d, 0xac, 0x63, 0x0c, 0xf4, 0x9e, 0xf1, 0x11, 0xd6, 0x4e, 0x7b, 0x66, + 0xeb, 0x61, 0x79, 0xf5, 0x72, 0x17, 0xbb, 0x27, 0xb8, 0x77, 0x28, 0x33, 0x75, 0xb1, 0x4e, 0x90, + 0xb0, 0x93, 0x81, 0xd4, 0xb9, 0xde, 0x1b, 0xe2, 0xbd, 0x64, 0x36, 0x29, 0xa7, 0xf6, 0x92, 0xd9, + 0x8c, 0x9c, 0xdd, 0x4b, 0x66, 0x73, 0x32, 0xec, 0x25, 0xb3, 0x20, 0xe7, 0x95, 0x67, 0x21, 0x1f, + 0x08, 0x4c, 0xa8, 0x0c, 0x99, 0x3e, 0x76, 0x1c, 0xfd, 0x0c, 0xb3, 0x38, 0x96, 0x53, 0xdd, 0xa1, + 0x52, 0x82, 0x42, 0x30, 0x18, 0x29, 0x9f, 0x4a, 0x9e, 0x24, 0x8d, 0x33, 0x54, 0xf2, 0x1c, 0xdb, + 0xcc, 0x1c, 0x42, 0x52, 0x0c, 0xd1, 0x53, 0x50, 0x64, 0x4b, 0xd1, 0xdc, 0xf7, 0x34, 0xd8, 0x25, + 0xd5, 0x02, 0x23, 0x3e, 0x10, 0x4c, 0xeb, 0x90, 0xb7, 0xb6, 0x2d, 0x8f, 0x25, 0xc1, 0x58, 0xc0, + 0xda, 0xb6, 0x5c, 0x86, 0x27, 0xa1, 0x40, 0xd7, 0xed, 0x71, 0x24, 0xd9, 0x24, 0x79, 0x4a, 0x13, + 0x2c, 0xca, 0x9f, 0xe3, 0x20, 0x8f, 0x07, 0x30, 0xf4, 0x2a, 0x24, 0x69, 0x2c, 0x17, 0x61, 0xb9, + 0xb2, 0xc9, 0x03, 0xfd, 0xa6, 0x1b, 0xe8, 0x37, 0x9b, 0x6e, 0xa0, 0xdf, 0xc9, 0x7e, 0xf1, 0xd5, + 0x7a, 0xec, 0xd3, 0xbf, 0xae, 0x4b, 0x2a, 0x93, 0x40, 0xd7, 0x68, 0xd8, 0xd2, 0x8d, 0x81, 0x66, + 0xb4, 0xd9, 0x27, 0xe7, 0x68, 0x4c, 0xd2, 0x8d, 0xc1, 0x6e, 0x1b, 0xed, 0x83, 0xdc, 0x32, 0x07, + 0x0e, 0x1e, 0x38, 0x43, 0x47, 0xe3, 0xa9, 0x46, 0x04, 0xe3, 0x50, 0x48, 0xe5, 0x09, 0xaf, 0xe6, + 0x72, 0x1e, 0x31, 0x46, 0x75, 0xa9, 0x15, 0x26, 0xa0, 0x7b, 0x00, 0x5e, 0x3e, 0x72, 0xca, 0xc9, + 0x8d, 0xc4, 0x8d, 0xfc, 0xf6, 0xc6, 0xc4, 0x86, 0x3f, 0x70, 0x59, 0x4e, 0xac, 0xb6, 0x4e, 0xf0, + 0x4e, 0x92, 0x7e, 0xae, 0x1a, 0x90, 0x44, 0xcf, 0xc0, 0x92, 0x6e, 0x59, 0x9a, 0x43, 0x74, 0x82, + 0xb5, 0xd3, 0x0b, 0x82, 0x1d, 0x16, 0xe7, 0x0b, 0x6a, 0x51, 0xb7, 0xac, 0x63, 0x4a, 0xdd, 0xa1, + 0x44, 0xf4, 0x34, 0x94, 0x68, 0x4c, 0x37, 0xf4, 0x9e, 0xd6, 0xc5, 0xc6, 0x59, 0x97, 0xb0, 0x78, + 0x9e, 0x50, 0x8b, 0x82, 0xda, 0x60, 0x44, 0xa5, 0xed, 0xed, 0x38, 0x8b, 0xe7, 0x08, 0x41, 0xb2, + 0xad, 0x13, 0x9d, 0x59, 0xb2, 0xa0, 0xb2, 0x67, 0x4a, 0xb3, 0x74, 0xd2, 0x15, 0xf6, 0x61, 0xcf, + 0xe8, 0x2a, 0xa4, 0x85, 0xda, 0x04, 0x53, 0x2b, 0x46, 0x68, 0x15, 0x52, 0x96, 0x6d, 0x9e, 0x63, + 0xb6, 0x75, 0x59, 0x95, 0x0f, 0x14, 0x15, 0x4a, 0xe1, 0xd8, 0x8f, 0x4a, 0x10, 0x27, 0x23, 0x31, + 0x4b, 0x9c, 0x8c, 0xd0, 0x4b, 0x90, 0xa4, 0x86, 0x64, 0x73, 0x94, 0xa6, 0x64, 0x3b, 0x21, 0xd7, + 0xbc, 0xb0, 0xb0, 0xca, 0x38, 0x95, 0x25, 0x28, 0x86, 0x72, 0x82, 0x72, 0x15, 0x56, 0xa7, 0x85, + 0x78, 0xa5, 0xeb, 0xd1, 0x43, 0xa1, 0x1a, 0xdd, 0x86, 0xac, 0x17, 0xe3, 0xb9, 0xe3, 0x5c, 0x9b, + 0x98, 0xd6, 0x65, 0x56, 0x3d, 0x56, 0xea, 0x31, 0x74, 0x03, 0xba, 0xba, 0xc8, 0xe8, 0x05, 0x35, + 0xa3, 0x5b, 0x56, 0x43, 0x77, 0xba, 0xca, 0x7b, 0x50, 0x8e, 0x8a, 0xdf, 0x01, 0x83, 0x49, 0xcc, + 0xed, 0x5d, 0x83, 0x5d, 0x85, 0x74, 0xc7, 0xb4, 0xfb, 0x3a, 0x61, 0xca, 0x8a, 0xaa, 0x18, 0x51, + 0x43, 0xf2, 0x58, 0x9e, 0x60, 0x64, 0x3e, 0x50, 0x34, 0xb8, 0x16, 0x19, 0xc3, 0xa9, 0x88, 0x31, + 0x68, 0x63, 0x6e, 0xd6, 0xa2, 0xca, 0x07, 0xbe, 0x22, 0xfe, 0xb1, 0x7c, 0x40, 0xa7, 0x75, 0xd8, + 0x5a, 0x99, 0xfe, 0x9c, 0x2a, 0x46, 0xca, 0x67, 0x09, 0xb8, 0x3a, 0x3d, 0x92, 0xa3, 0x0d, 0x28, + 0xf4, 0xf5, 0x91, 0x46, 0x46, 0xc2, 0xed, 0x24, 0xb6, 0xf1, 0xd0, 0xd7, 0x47, 0xcd, 0x11, 0xf7, + 0x39, 0x19, 0x12, 0x64, 0xe4, 0x94, 0xe3, 0x1b, 0x89, 0x1b, 0x05, 0x95, 0x3e, 0xa2, 0x13, 0x58, + 0xee, 0x99, 0x2d, 0xbd, 0xa7, 0xf5, 0x74, 0x87, 0x68, 0x22, 0xc5, 0xf3, 0x43, 0xf4, 0xd4, 0x84, + 0xb1, 0x79, 0x4c, 0xc6, 0x6d, 0xbe, 0x9f, 0x34, 0xe0, 0x08, 0xff, 0x5f, 0x62, 0x3a, 0xf6, 0x75, + 0x77, 0xab, 0xd1, 0x5d, 0xc8, 0xf7, 0x0d, 0xe7, 0x14, 0x77, 0xf5, 0x73, 0xc3, 0xb4, 0xc5, 0x69, + 0x9a, 0x74, 0x9a, 0x37, 0x7d, 0x1e, 0xa1, 0x29, 0x28, 0x16, 0xd8, 0x92, 0x54, 0xc8, 0x87, 0xdd, + 0x68, 0x92, 0x5e, 0x38, 0x9a, 0xbc, 0x04, 0xab, 0x03, 0x3c, 0x22, 0x9a, 0x7f, 0x5e, 0xb9, 0x9f, + 0x64, 0x98, 0xe9, 0x11, 0x7d, 0xe7, 0x9d, 0x70, 0x87, 0xba, 0x0c, 0x7a, 0x8e, 0xe5, 0x42, 0xcb, + 0x74, 0xb0, 0xad, 0xe9, 0xed, 0xb6, 0x8d, 0x1d, 0x87, 0x95, 0x4f, 0x05, 0x96, 0xe0, 0x18, 0xbd, + 0xca, 0xc9, 0xca, 0x2f, 0x82, 0x5b, 0x13, 0xce, 0x7d, 0xc2, 0xf0, 0x92, 0x6f, 0xf8, 0x63, 0x58, + 0x15, 0xf2, 0xed, 0x90, 0xed, 0x79, 0x0d, 0xfa, 0xf8, 0xe4, 0xf9, 0x1a, 0xb7, 0x39, 0x72, 0xc5, + 0xa3, 0xcd, 0x9e, 0x78, 0x34, 0xb3, 0x23, 0x48, 0x32, 0xa3, 0x24, 0x79, 0x88, 0xa1, 0xcf, 0xff, + 0x6e, 0x5b, 0xf1, 0x71, 0x02, 0x96, 0x27, 0x0a, 0x09, 0x6f, 0x61, 0xd2, 0xd4, 0x85, 0xc5, 0xa7, + 0x2e, 0x2c, 0xb1, 0xf0, 0xc2, 0xc4, 0x5e, 0x27, 0x67, 0xef, 0x75, 0xea, 0x7b, 0xdc, 0xeb, 0xf4, + 0xa3, 0xed, 0xf5, 0xbf, 0x74, 0x17, 0x7e, 0x2d, 0x41, 0x25, 0xba, 0xfa, 0x9a, 0xba, 0x1d, 0xcf, + 0xc3, 0xb2, 0xf7, 0x29, 0x9e, 0x7a, 0x1e, 0x18, 0x65, 0xef, 0x85, 0xd0, 0x1f, 0x99, 0xe3, 0x9e, + 0x86, 0xd2, 0x58, 0x6d, 0xc8, 0x5d, 0xb9, 0x78, 0x1e, 0x9c, 0x5f, 0xf9, 0x59, 0xc2, 0x4b, 0x3c, + 0xa1, 0x02, 0x6e, 0xca, 0x69, 0x7d, 0x0b, 0x56, 0xda, 0xb8, 0x65, 0xb4, 0x1f, 0xf5, 0xb0, 0x2e, + 0x0b, 0xe9, 0xff, 0x9e, 0xd5, 0x49, 0x2f, 0xf9, 0x15, 0x40, 0x56, 0xc5, 0x8e, 0x45, 0xeb, 0x31, + 0xb4, 0x03, 0x39, 0x3c, 0x6a, 0x61, 0x8b, 0xb8, 0x25, 0xec, 0xf4, 0x16, 0x81, 0x73, 0xd7, 0x5d, + 0x4e, 0xda, 0x20, 0x7b, 0x62, 0xe8, 0x96, 0xc0, 0x00, 0xa2, 0xdb, 0x79, 0x21, 0x1e, 0x04, 0x01, + 0x5e, 0x76, 0x41, 0x80, 0x44, 0x64, 0x7f, 0xcb, 0xa5, 0xc6, 0x50, 0x80, 0x5b, 0x02, 0x05, 0x48, + 0xce, 0x98, 0x2c, 0x04, 0x03, 0xd4, 0x42, 0x30, 0x40, 0x7a, 0xc6, 0x32, 0x23, 0x70, 0x80, 0x97, + 0x5d, 0x1c, 0x20, 0x33, 0xe3, 0x8b, 0xc7, 0x80, 0x80, 0x37, 0x02, 0x40, 0x40, 0x8e, 0x89, 0x6e, + 0x44, 0x8a, 0x4e, 0x41, 0x02, 0x5e, 0xf3, 0x90, 0x80, 0x42, 0x24, 0x8a, 0x20, 0x84, 0xc7, 0xa1, + 0x80, 0xc3, 0x09, 0x28, 0x80, 0xb7, 0xee, 0xcf, 0x44, 0xaa, 0x98, 0x81, 0x05, 0x1c, 0x4e, 0x60, + 0x01, 0xa5, 0x19, 0x0a, 0x67, 0x80, 0x01, 0x3f, 0x9a, 0x0e, 0x06, 0x44, 0xb7, 0xeb, 0xe2, 0x33, + 0xe7, 0x43, 0x03, 0xb4, 0x08, 0x34, 0x40, 0x8e, 0xec, 0x5c, 0xb9, 0xfa, 0xb9, 0xe1, 0x80, 0x93, + 0x29, 0x70, 0x00, 0x6f, 0xdc, 0x6f, 0x44, 0x2a, 0x9f, 0x03, 0x0f, 0x38, 0x99, 0x82, 0x07, 0xa0, + 0x99, 0x6a, 0x67, 0x02, 0x02, 0xf7, 0xc2, 0x80, 0xc0, 0x4a, 0x44, 0xd5, 0xe9, 0x9f, 0xf6, 0x08, + 0x44, 0xe0, 0x34, 0x0a, 0x11, 0xe0, 0x5d, 0xfb, 0x0b, 0x91, 0x1a, 0x17, 0x80, 0x04, 0x0e, 0x27, + 0x20, 0x81, 0x2b, 0x33, 0x3c, 0x6d, 0x7e, 0x4c, 0x20, 0x25, 0xa7, 0xf7, 0x92, 0xd9, 0xac, 0x9c, + 0xe3, 0x68, 0xc0, 0x5e, 0x32, 0x9b, 0x97, 0x0b, 0xca, 0x73, 0xb4, 0x82, 0x19, 0x8b, 0x73, 0xb4, + 0x57, 0xc0, 0xb6, 0x6d, 0xda, 0xa2, 0xbb, 0xe7, 0x03, 0xe5, 0x06, 0xed, 0x11, 0xfd, 0x98, 0x76, + 0x09, 0x7e, 0xc0, 0x7a, 0xb2, 0x40, 0x1c, 0x53, 0x7e, 0x2f, 0xf9, 0xb2, 0x0c, 0x41, 0x08, 0xf6, + 0x97, 0x39, 0xd1, 0x5f, 0x06, 0x50, 0x85, 0x78, 0x18, 0x55, 0x58, 0x87, 0x3c, 0xed, 0xb5, 0xc6, + 0x00, 0x03, 0xdd, 0xf2, 0x00, 0x83, 0x9b, 0xb0, 0xcc, 0x12, 0x26, 0xc7, 0x1e, 0x44, 0x5a, 0x4a, + 0xb2, 0xb4, 0xb4, 0x44, 0x5f, 0x70, 0xeb, 0xf0, 0xfc, 0xf4, 0x22, 0xac, 0x04, 0x78, 0xbd, 0x1e, + 0x8e, 0x77, 0xcf, 0xb2, 0xc7, 0x5d, 0x15, 0xcd, 0xdc, 0x9f, 0x24, 0xdf, 0x42, 0x3e, 0xd2, 0x30, + 0x0d, 0x14, 0x90, 0xbe, 0x27, 0x50, 0x20, 0xfe, 0xc8, 0xa0, 0x40, 0xb0, 0x27, 0x4d, 0x84, 0x7b, + 0xd2, 0x7f, 0x48, 0xfe, 0x9e, 0x78, 0x2d, 0x7e, 0xcb, 0x6c, 0x63, 0xd1, 0x25, 0xb2, 0x67, 0x5a, + 0x92, 0xf4, 0xcc, 0x33, 0xd1, 0x0b, 0xd2, 0x47, 0xca, 0xe5, 0x25, 0x9e, 0x9c, 0xc8, 0x2b, 0x5e, + 0x83, 0xc9, 0x13, 0xbf, 0x68, 0x30, 0x65, 0x48, 0x3c, 0xc4, 0x1c, 0x2e, 0x2e, 0xa8, 0xf4, 0x91, + 0xf2, 0x31, 0xe7, 0x13, 0x09, 0x9c, 0x0f, 0xd0, 0xab, 0x90, 0x63, 0x60, 0xbf, 0x66, 0x5a, 0x8e, + 0x80, 0x88, 0x43, 0xa5, 0x0d, 0x47, 0xfc, 0x37, 0x8f, 0x28, 0xcf, 0xa1, 0xe5, 0xa8, 0x59, 0x4b, + 0x3c, 0x05, 0x2a, 0x8e, 0x5c, 0xa8, 0xe2, 0xb8, 0x0e, 0x39, 0xfa, 0xf5, 0x8e, 0xa5, 0xb7, 0x70, + 0x19, 0xd8, 0x87, 0xfa, 0x04, 0xe5, 0x77, 0x71, 0x58, 0x1a, 0x4b, 0x34, 0x53, 0xd7, 0xee, 0xba, + 0x64, 0x3c, 0x00, 0x79, 0xcc, 0x67, 0x8f, 0x35, 0x80, 0x33, 0xdd, 0xd1, 0x3e, 0xd4, 0x07, 0x04, + 0xb7, 0x85, 0x51, 0x02, 0x14, 0x54, 0x81, 0x2c, 0x1d, 0x0d, 0x1d, 0xdc, 0x16, 0xe8, 0x8b, 0x37, + 0x46, 0x0d, 0x48, 0xe3, 0x73, 0x3c, 0x20, 0x4e, 0x39, 0xc3, 0xb6, 0xfd, 0xea, 0x64, 0x3b, 0x4c, + 0x5f, 0xef, 0x94, 0xe9, 0x66, 0x7f, 0xfb, 0xd5, 0xba, 0xcc, 0xb9, 0x5f, 0x30, 0xfb, 0x06, 0xc1, + 0x7d, 0x8b, 0x5c, 0xa8, 0x42, 0x3e, 0x6c, 0x85, 0xec, 0x98, 0x15, 0x18, 0x0e, 0x58, 0x70, 0xdb, + 0x7b, 0x6a, 0x53, 0xc3, 0xb4, 0x0d, 0x72, 0xa1, 0x16, 0xfb, 0xb8, 0x6f, 0x99, 0x66, 0x4f, 0xe3, + 0x67, 0xbc, 0x0a, 0xa5, 0x70, 0x5e, 0x45, 0x4f, 0x41, 0xd1, 0xc6, 0x44, 0x37, 0x06, 0x5a, 0xa8, + 0x08, 0x2e, 0x70, 0x22, 0x3f, 0x53, 0x7b, 0xc9, 0xac, 0x24, 0xc7, 0xf7, 0x92, 0xd9, 0xb8, 0x9c, + 0x50, 0x8e, 0xe0, 0xca, 0xd4, 0xbc, 0x8a, 0x5e, 0x81, 0x9c, 0x9f, 0x92, 0x25, 0xb6, 0xda, 0x4b, + 0x90, 0x16, 0x9f, 0x57, 0xf9, 0x83, 0xe4, 0xab, 0x0c, 0x63, 0x37, 0x75, 0x48, 0xdb, 0xd8, 0x19, + 0xf6, 0x38, 0x9a, 0x52, 0xda, 0x7e, 0x71, 0xbe, 0x8c, 0x4c, 0xa9, 0xc3, 0x1e, 0x51, 0x85, 0xb0, + 0xf2, 0x2e, 0xa4, 0x39, 0x05, 0xe5, 0x21, 0x73, 0x72, 0x70, 0xff, 0xe0, 0xf0, 0xed, 0x03, 0x39, + 0x86, 0x00, 0xd2, 0xd5, 0x5a, 0xad, 0x7e, 0xd4, 0x94, 0x25, 0x94, 0x83, 0x54, 0x75, 0xe7, 0x50, + 0x6d, 0xca, 0x71, 0x4a, 0x56, 0xeb, 0x7b, 0xf5, 0x5a, 0x53, 0x4e, 0xa0, 0x65, 0x28, 0xf2, 0x67, + 0xed, 0xde, 0xa1, 0xfa, 0x66, 0xb5, 0x29, 0x27, 0x03, 0xa4, 0xe3, 0xfa, 0xc1, 0xdd, 0xba, 0x2a, + 0xa7, 0x94, 0xff, 0x81, 0x6b, 0x91, 0x39, 0xdc, 0x07, 0x66, 0xa4, 0x00, 0x30, 0xa3, 0x7c, 0x16, + 0xa7, 0x4d, 0x4d, 0x54, 0x62, 0x46, 0x7b, 0x63, 0x0b, 0xdf, 0x5e, 0x20, 0xab, 0x8f, 0xad, 0x9e, + 0xf6, 0x31, 0x36, 0xee, 0x60, 0xd2, 0xea, 0xf2, 0x42, 0x81, 0x47, 0xa0, 0xa2, 0x5a, 0x14, 0x54, + 0x26, 0xe4, 0x70, 0xb6, 0xf7, 0x71, 0x8b, 0x68, 0xdc, 0x89, 0x1c, 0xd6, 0x4c, 0xe4, 0x28, 0x1b, + 0xa5, 0x1e, 0x73, 0xa2, 0xf2, 0xde, 0x42, 0xb6, 0xcc, 0x41, 0x4a, 0xad, 0x37, 0xd5, 0x77, 0xe4, + 0x04, 0x42, 0x50, 0x62, 0x8f, 0xda, 0xf1, 0x41, 0xf5, 0xe8, 0xb8, 0x71, 0x48, 0x6d, 0xb9, 0x02, + 0x4b, 0xae, 0x2d, 0x5d, 0x62, 0x4a, 0x79, 0x1e, 0x1e, 0x8b, 0xa8, 0x2a, 0x26, 0x5b, 0x2a, 0xe5, + 0x37, 0x52, 0x90, 0x3b, 0x5c, 0x19, 0x1c, 0x42, 0xda, 0x21, 0x3a, 0x19, 0x3a, 0xc2, 0x88, 0xaf, + 0xcc, 0x5b, 0x66, 0x6c, 0xba, 0x0f, 0xc7, 0x4c, 0x5c, 0x15, 0x6a, 0x94, 0xdb, 0x50, 0x0a, 0xbf, + 0x89, 0xb6, 0x81, 0xef, 0x44, 0x71, 0xe5, 0x0e, 0xa0, 0xc9, 0xea, 0x63, 0x4a, 0x7b, 0x29, 0x4d, + 0x6b, 0x2f, 0x7f, 0x2b, 0xc1, 0xe3, 0x97, 0x54, 0x1a, 0xe8, 0xad, 0xb1, 0x45, 0xbe, 0xb6, 0x48, + 0x9d, 0xb2, 0xc9, 0x69, 0x63, 0xcb, 0xbc, 0x05, 0x85, 0x20, 0x7d, 0xbe, 0x45, 0x7e, 0x1b, 0xf7, + 0x0f, 0x71, 0xb8, 0x0f, 0xf6, 0x43, 0xa0, 0xf4, 0x1d, 0x43, 0xe0, 0xeb, 0x00, 0x64, 0xa4, 0x71, + 0xb7, 0x76, 0xf3, 0xe8, 0x13, 0x53, 0xf0, 0x45, 0xdc, 0x6a, 0x8e, 0xc4, 0x21, 0xc8, 0x11, 0xf1, + 0xe4, 0xa0, 0xe3, 0x20, 0x28, 0x30, 0x64, 0x39, 0xd6, 0x11, 0x0d, 0xf3, 0xbc, 0xc9, 0xd8, 0x07, + 0x0f, 0x38, 0xd9, 0x41, 0xef, 0xc0, 0x63, 0x63, 0x85, 0x82, 0xa7, 0x3a, 0x39, 0x6f, 0xbd, 0x70, + 0x25, 0x5c, 0x2f, 0xb8, 0xaa, 0x83, 0xd9, 0x3e, 0x15, 0xce, 0xf6, 0xef, 0x00, 0xf8, 0xe0, 0x00, + 0x8d, 0x30, 0xb6, 0x39, 0x1c, 0xb4, 0x99, 0x07, 0xa4, 0x54, 0x3e, 0x40, 0xb7, 0x21, 0x45, 0x3d, + 0xc9, 0xb5, 0xd3, 0x64, 0x28, 0xa6, 0x9e, 0x10, 0x00, 0x17, 0x38, 0xb7, 0x62, 0x00, 0x9a, 0x04, + 0x68, 0x23, 0xa6, 0x78, 0x23, 0x3c, 0xc5, 0x93, 0x91, 0x50, 0xef, 0xf4, 0xa9, 0x3e, 0x82, 0x14, + 0xdb, 0x79, 0x9a, 0x74, 0xd9, 0x5f, 0x01, 0x51, 0x2d, 0xd2, 0x67, 0xf4, 0x63, 0x00, 0x9d, 0x10, + 0xdb, 0x38, 0x1d, 0xfa, 0x13, 0xac, 0x4f, 0xf7, 0x9c, 0xaa, 0xcb, 0xb7, 0x73, 0x5d, 0xb8, 0xd0, + 0xaa, 0x2f, 0x1a, 0x70, 0xa3, 0x80, 0x42, 0xe5, 0x00, 0x4a, 0x61, 0x59, 0xb7, 0xbe, 0xe1, 0xdf, + 0x10, 0xae, 0x6f, 0x78, 0xb9, 0x2a, 0xea, 0x1b, 0xaf, 0x3a, 0x4a, 0xf0, 0x5f, 0x1f, 0x6c, 0xa0, + 0xfc, 0x24, 0x0e, 0x85, 0xa0, 0xe3, 0xfd, 0xe7, 0x95, 0x20, 0xca, 0xcf, 0x25, 0xc8, 0x7a, 0xcb, + 0x0f, 0xff, 0x07, 0x09, 0xfd, 0x38, 0xe2, 0xd6, 0x8b, 0x07, 0x7f, 0x5e, 0xf0, 0xdf, 0x44, 0x09, + 0xef, 0x37, 0xd1, 0x1d, 0x2f, 0xfd, 0x45, 0x01, 0x22, 0x41, 0x5b, 0x0b, 0xaf, 0x72, 0xb3, 0xfd, + 0x1d, 0xc8, 0x79, 0xa7, 0x97, 0x36, 0x1d, 0x2e, 0x70, 0x24, 0x89, 0x33, 0x24, 0x60, 0xbf, 0x55, + 0x48, 0x59, 0xe6, 0x87, 0xe2, 0xcf, 0x48, 0x42, 0xe5, 0x03, 0xa5, 0x0d, 0x4b, 0x63, 0x47, 0x1f, + 0xdd, 0x81, 0x8c, 0x35, 0x3c, 0xd5, 0x5c, 0xe7, 0x18, 0x83, 0xd7, 0xdc, 0x72, 0x76, 0x78, 0xda, + 0x33, 0x5a, 0xf7, 0xf1, 0x85, 0xfb, 0x31, 0xd6, 0xf0, 0xf4, 0x3e, 0xf7, 0x21, 0x3e, 0x4b, 0x3c, + 0x38, 0xcb, 0x2f, 0x25, 0xc8, 0xba, 0x67, 0x02, 0xfd, 0x1f, 0xe4, 0xbc, 0xb0, 0xe2, 0xfd, 0xda, + 0x8c, 0x8c, 0x47, 0x42, 0xbf, 0x2f, 0x82, 0xaa, 0xee, 0x3f, 0x59, 0xa3, 0xad, 0x75, 0x7a, 0x3a, + 0xf7, 0xa5, 0x52, 0xd8, 0x66, 0x3c, 0xf0, 0xb0, 0x78, 0xbc, 0x7b, 0xf7, 0x5e, 0x4f, 0x3f, 0x53, + 0xf3, 0x4c, 0x66, 0xb7, 0x4d, 0x07, 0xa2, 0xb2, 0xfb, 0xbb, 0x04, 0xf2, 0xf8, 0x89, 0xfd, 0xce, + 0x5f, 0x37, 0x99, 0xe6, 0x12, 0x53, 0xd2, 0x1c, 0xda, 0x82, 0x15, 0x8f, 0x43, 0x73, 0x8c, 0xb3, + 0x81, 0x4e, 0x86, 0x36, 0x16, 0x80, 0x24, 0xf2, 0x5e, 0x1d, 0xbb, 0x6f, 0x26, 0x57, 0x9d, 0x7a, + 0xc4, 0x55, 0x7f, 0x1c, 0x87, 0x7c, 0x00, 0x1e, 0x45, 0xff, 0x1b, 0x08, 0x46, 0xa5, 0x29, 0x99, + 0x21, 0xc0, 0xeb, 0xff, 0xa6, 0x0c, 0x9b, 0x29, 0xbe, 0xb8, 0x99, 0xa2, 0x40, 0x68, 0x17, 0x6d, + 0x4d, 0x2e, 0x8c, 0xb6, 0xbe, 0x00, 0x88, 0x98, 0x44, 0xef, 0x69, 0xe7, 0x26, 0x31, 0x06, 0x67, + 0x1a, 0x77, 0x43, 0x1e, 0x3a, 0x64, 0xf6, 0xe6, 0x01, 0x7b, 0x71, 0xc4, 0x3c, 0xf2, 0xa7, 0x12, + 0x64, 0xbd, 0xb2, 0x7b, 0xd1, 0x9f, 0x98, 0x57, 0x21, 0x2d, 0x2a, 0x4b, 0xfe, 0x17, 0x53, 0x8c, + 0xa6, 0xc2, 0xca, 0x15, 0xc8, 0xf6, 0x31, 0xd1, 0x59, 0x1c, 0xe4, 0x59, 0xcd, 0x1b, 0xdf, 0x7c, + 0x0d, 0xf2, 0x81, 0x1f, 0xc0, 0x34, 0x34, 0x1e, 0xd4, 0xdf, 0x96, 0x63, 0x95, 0xcc, 0x27, 0x9f, + 0x6f, 0x24, 0x0e, 0xf0, 0x87, 0xf4, 0x34, 0xab, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x2f, 0x4b, 0x95, + 0xfc, 0x27, 0x9f, 0x6f, 0x64, 0x54, 0xcc, 0x10, 0xc5, 0x9b, 0xf7, 0x61, 0x69, 0x6c, 0x63, 0xc2, + 0x65, 0x0b, 0x82, 0xd2, 0xdd, 0x93, 0xa3, 0xfd, 0xdd, 0x5a, 0xb5, 0x59, 0xd7, 0x1e, 0x1c, 0x36, + 0xeb, 0xb2, 0x84, 0x1e, 0x83, 0x95, 0xfd, 0xdd, 0xff, 0x6f, 0x34, 0xb5, 0xda, 0xfe, 0x6e, 0xfd, + 0xa0, 0xa9, 0x55, 0x9b, 0xcd, 0x6a, 0xed, 0xbe, 0x1c, 0xdf, 0xfe, 0x3c, 0x0f, 0xc9, 0xea, 0x4e, + 0x6d, 0x17, 0xd5, 0x20, 0xc9, 0xa0, 0x90, 0x4b, 0x6f, 0x80, 0x55, 0x2e, 0xc7, 0x86, 0xd1, 0x3d, + 0x48, 0x31, 0x94, 0x04, 0x5d, 0x7e, 0x25, 0xac, 0x32, 0x03, 0x2c, 0xa6, 0x1f, 0xc3, 0x4e, 0xe4, + 0xa5, 0x77, 0xc4, 0x2a, 0x97, 0x63, 0xc7, 0x68, 0x1f, 0x32, 0x6e, 0x93, 0x3c, 0xeb, 0xe2, 0x56, + 0x65, 0x26, 0xa0, 0x4b, 0x97, 0xc6, 0xc1, 0x86, 0xcb, 0xaf, 0x8f, 0x55, 0x66, 0xa0, 0xca, 0x68, + 0x17, 0xd2, 0xa2, 0x1d, 0x9d, 0x71, 0x23, 0xac, 0x32, 0x0b, 0x27, 0x46, 0x2a, 0xe4, 0x7c, 0x18, + 0x67, 0xf6, 0xa5, 0xb8, 0xca, 0x1c, 0x80, 0x39, 0x7a, 0x17, 0x8a, 0xe1, 0x56, 0x77, 0xbe, 0x5b, + 0x67, 0x95, 0x39, 0x11, 0x69, 0xaa, 0x3f, 0xdc, 0xf7, 0xce, 0x77, 0x0b, 0xad, 0x32, 0x27, 0x40, + 0x8d, 0xde, 0x87, 0xe5, 0xc9, 0xbe, 0x74, 0xfe, 0x4b, 0x69, 0x95, 0x05, 0x20, 0x6b, 0xd4, 0x07, + 0x34, 0xa5, 0x9f, 0x5d, 0xe0, 0x8e, 0x5a, 0x65, 0x11, 0x04, 0x1b, 0xb5, 0x61, 0x69, 0xbc, 0x49, + 0x9c, 0xf7, 0xce, 0x5a, 0x65, 0x6e, 0x34, 0x9b, 0xcf, 0x12, 0x6e, 0x2e, 0xe7, 0xbd, 0xc3, 0x56, + 0x99, 0x1b, 0xdc, 0x46, 0x27, 0x00, 0x81, 0xfe, 0x70, 0x8e, 0x3b, 0x6d, 0x95, 0x79, 0x60, 0x6e, + 0x64, 0xc1, 0xca, 0xb4, 0xc6, 0x71, 0x91, 0x2b, 0x6e, 0x95, 0x85, 0xd0, 0x6f, 0xea, 0xcf, 0xe1, + 0x16, 0x70, 0xbe, 0x2b, 0x6f, 0x95, 0x39, 0x61, 0xf0, 0x9d, 0xea, 0x17, 0x5f, 0xaf, 0x49, 0x5f, + 0x7e, 0xbd, 0x26, 0xfd, 0xed, 0xeb, 0x35, 0xe9, 0xd3, 0x6f, 0xd6, 0x62, 0x5f, 0x7e, 0xb3, 0x16, + 0xfb, 0xcb, 0x37, 0x6b, 0xb1, 0x1f, 0x3c, 0x7b, 0x66, 0x90, 0xee, 0xf0, 0x74, 0xb3, 0x65, 0xf6, + 0xb7, 0x5a, 0x66, 0x1f, 0x93, 0xd3, 0x0e, 0xf1, 0x1f, 0xfc, 0x9b, 0xcb, 0xa7, 0x69, 0x96, 0x41, + 0x6f, 0xfd, 0x33, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd0, 0x90, 0x6e, 0xd9, 0x2c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4362,6 +4412,7 @@ func _ABCI_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +var ABCI_serviceDesc = _ABCI_serviceDesc var _ABCI_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.abci.ABCI", HandlerType: (*ABCIServer)(nil), @@ -5442,6 +5493,61 @@ func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + n25, err25 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err25 != nil { + return 0, err25 + } + i -= n25 + i = encodeVarintTypes(dAtA, i, uint64(n25)) + i-- + dAtA[i] = 0x1a if m.Height != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- @@ -5540,12 +5646,12 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n24, err24 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) - if err24 != nil { - return 0, err24 + n26, err26 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err26 != nil { + return 0, err26 } - i -= n24 - i = encodeVarintTypes(dAtA, i, uint64(n24)) + i -= n26 + i = encodeVarintTypes(dAtA, i, uint64(n26)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6508,20 +6614,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA46 := make([]byte, len(m.RefetchChunks)*10) - var j45 int + dAtA48 := make([]byte, len(m.RefetchChunks)*10) + var j47 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA46[j45] = uint8(uint64(num)&0x7f | 0x80) + dAtA48[j47] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j45++ + j47++ } - dAtA46[j45] = uint8(num) - j45++ + dAtA48[j47] = uint8(num) + j47++ } - i -= j45 - copy(dAtA[i:], dAtA46[:j45]) - i = encodeVarintTypes(dAtA, i, uint64(j45)) + i -= j47 + copy(dAtA[i:], dAtA48[:j47]) + i = encodeVarintTypes(dAtA, i, uint64(j47)) i-- dAtA[i] = 0x12 } @@ -7228,12 +7334,12 @@ func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n52, err52 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) - if err52 != nil { - return 0, err52 + n54, err54 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err54 != nil { + return 0, err54 } - i -= n52 - i = encodeVarintTypes(dAtA, i, uint64(n52)) + i -= n54 + i = encodeVarintTypes(dAtA, i, uint64(n54)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -7807,6 +7913,30 @@ func (m *RequestExtendVote) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ProposedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -11202,6 +11332,206 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/blocksync/errors.go b/blocksync/errors.go new file mode 100644 index 00000000000..04926db9945 --- /dev/null +++ b/blocksync/errors.go @@ -0,0 +1,53 @@ +package blocksync + +import ( + "errors" + "fmt" + + "github.com/cosmos/gogoproto/proto" +) + +var ( + // ErrNilMessage is returned when provided message is empty + ErrNilMessage = errors.New("message cannot be nil") +) + +// ErrInvalidBase is returned when peer informs of a status with invalid height +type ErrInvalidHeight struct { + Height int64 + Reason string +} + +func (e ErrInvalidHeight) Error() string { + return fmt.Sprintf("invalid height %v: %s", e.Height, e.Reason) +} + +// ErrInvalidBase is returned when peer informs of a status with invalid base +type ErrInvalidBase struct { + Base int64 + Reason string +} + +func (e ErrInvalidBase) Error() string { + return fmt.Sprintf("invalid base %v: %s", e.Base, e.Reason) +} + +type ErrUnknownMessageType struct { + Msg proto.Message +} + +func (e ErrUnknownMessageType) Error() string { + return fmt.Sprintf("unknown message type %T", e.Msg) +} + +type ErrReactorValidation struct { + Err error +} + +func (e ErrReactorValidation) Error() string { + return fmt.Sprintf("reactor validation error: %v", e.Err) +} + +func (e ErrReactorValidation) Unwrap() error { + return e.Err +} diff --git a/blocksync/msgs.go b/blocksync/msgs.go index 447748ecb92..d4f49ef6fb5 100644 --- a/blocksync/msgs.go +++ b/blocksync/msgs.go @@ -1,7 +1,6 @@ package blocksync import ( - "errors" "fmt" "github.com/cosmos/gogoproto/proto" @@ -22,37 +21,36 @@ const ( // ValidateMsg validates a message. func ValidateMsg(pb proto.Message) error { if pb == nil { - return errors.New("message cannot be nil") + return ErrNilMessage } switch msg := pb.(type) { case *bcproto.BlockRequest: if msg.Height < 0 { - return errors.New("negative Height") + return ErrInvalidHeight{Height: msg.Height, Reason: "negative height"} } case *bcproto.BlockResponse: - _, err := types.BlockFromProto(msg.Block) - if err != nil { - return err - } + // Avoid double-calling `types.BlockFromProto` for performance reasons. + // See https://github.com/cometbft/cometbft/issues/1964 + return nil case *bcproto.NoBlockResponse: if msg.Height < 0 { - return errors.New("negative Height") + return ErrInvalidHeight{Height: msg.Height, Reason: "negative height"} } case *bcproto.StatusResponse: if msg.Base < 0 { - return errors.New("negative Base") + return ErrInvalidBase{Base: msg.Base, Reason: "negative base"} } if msg.Height < 0 { - return errors.New("negative Height") + return ErrInvalidHeight{Height: msg.Height, Reason: "negative height"} } if msg.Base > msg.Height { - return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height) + return ErrInvalidHeight{Height: msg.Height, Reason: fmt.Sprintf("base %v cannot be greater than height", msg.Base)} } case *bcproto.StatusRequest: return nil default: - return fmt.Errorf("unknown message type %T", msg) + return ErrUnknownMessageType{Msg: msg} } return nil } diff --git a/blocksync/pool.go b/blocksync/pool.go index 522c3e68178..49ddf4809ef 100644 --- a/blocksync/pool.go +++ b/blocksync/pool.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "sort" "sync/atomic" "time" @@ -13,6 +14,7 @@ import ( cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) /* @@ -29,8 +31,6 @@ eg, L = latency = 0.1s const ( requestIntervalMS = 2 - maxTotalRequesters = 600 - maxPendingRequests = maxTotalRequesters maxPendingRequestsPerPeer = 20 requestRetrySeconds = 30 @@ -38,12 +38,20 @@ const ( // enough. If a peer is not sending us data at at least that rate, we // consider them to have timedout and we disconnect. // - // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, - // sending data across atlantic ~ 7.5 KB/s. - minRecvRate = 7680 - - // Maximum difference between current and new block's height. - maxDiffBetweenCurrentAndReceivedBlockHeight = 100 + // Based on the experiments with [Osmosis](https://osmosis.zone/), the + // minimum rate could be as high as 500 KB/s. However, we're setting it to + // 128 KB/s for now to be conservative. + minRecvRate = 128 * 1024 // 128 KB/s + + // peerConnWait is the time that must have elapsed since the pool routine + // was created before we start making requests. This is to give the peer + // routine time to connect to peers. + peerConnWait = 3 * time.Second + + // If we're within minBlocksForSingleRequest blocks of the pool's height, we + // send 2 parallel requests to 2 peers for the same block. If we're further + // away, we send a single request. + minBlocksForSingleRequest = 50 ) var peerTimeout = 15 * time.Second // not const so we can override with tests @@ -62,7 +70,8 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests // BlockPool keeps track of the block sync peers, block requests and block responses. type BlockPool struct { service.BaseService - startTime time.Time + startTime time.Time + startHeight int64 mtx cmtsync.Mutex // block requests @@ -70,7 +79,9 @@ type BlockPool struct { height int64 // the lowest key in requesters. // peers peers map[p2p.ID]*bpPeer - maxPeerHeight int64 // the biggest reported height + bannedPeers map[p2p.ID]time.Time + sortedPeers []*bpPeer // sorted by curRate, highest first + maxPeerHeight int64 // the biggest reported height // atomic numPending int32 // number of requests pending assignment or block response @@ -83,11 +94,12 @@ type BlockPool struct { // requests and errors will be sent to requestsCh and errorsCh accordingly. func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { bp := &BlockPool{ - peers: make(map[p2p.ID]*bpPeer), - - requesters: make(map[int64]*bpRequester), - height: start, - numPending: 0, + peers: make(map[p2p.ID]*bpPeer), + bannedPeers: make(map[p2p.ID]time.Time), + requesters: make(map[int64]*bpRequester), + height: start, + startHeight: start, + numPending: 0, requestsCh: requestsCh, errorsCh: errorsCh, @@ -99,8 +111,8 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. func (pool *BlockPool) OnStart() error { - go pool.makeRequestersRoutine() pool.startTime = time.Now() + go pool.makeRequestersRoutine() return nil } @@ -108,24 +120,37 @@ func (pool *BlockPool) OnStart() error { func (pool *BlockPool) makeRequestersRoutine() { for { if !pool.IsRunning() { - break + return } - _, numPending, lenRequesters := pool.GetStatus() + // Check if we are within peerConnWait seconds of start time + // This gives us some time to connect to peers before starting a wave of requests + if time.Since(pool.startTime) < peerConnWait { + // Calculate the duration to sleep until peerConnWait seconds have passed since pool.startTime + sleepDuration := peerConnWait - time.Since(pool.startTime) + time.Sleep(sleepDuration) + } + + pool.mtx.Lock() + var ( + maxRequestersCreated = len(pool.requesters) >= len(pool.peers)*maxPendingRequestsPerPeer + + nextHeight = pool.height + int64(len(pool.requesters)) + maxPeerHeightReached = nextHeight > pool.maxPeerHeight + ) + pool.mtx.Unlock() + switch { - case numPending >= maxPendingRequests: - // sleep for a bit. + case maxRequestersCreated: // If we have enough requesters, wait for them to finish. time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers pool.removeTimedoutPeers() - case lenRequesters >= maxTotalRequesters: - // sleep for a bit. + case maxPeerHeightReached: // If we're caught up, wait for a bit so reactor could finish or a higher height is reported. time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() default: // request for more blocks. - pool.makeNextRequester() + pool.makeNextRequester(nextHeight) + // Sleep for a bit to make the requests more ordered. + time.Sleep(requestIntervalMS * time.Millisecond) } } } @@ -147,11 +172,22 @@ func (pool *BlockPool) removeTimedoutPeers() { "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) peer.didTimeout = true } + + peer.curRate = curRate } + if peer.didTimeout { pool.removePeer(peer.id) } } + + for peerID := range pool.bannedPeers { + if !pool.isPeerBanned(peerID) { + delete(pool.bannedPeers, peerID) + } + } + + pool.sortPeers() } // GetStatus returns pool's height, numPending requests and the number of @@ -207,45 +243,62 @@ func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtComm return } -// PopRequest pops the first block at pool.height. -// It must have been validated by the second Commit from PeekTwoBlocks. -// TODO(thane): (?) and its corresponding ExtendedCommit. +// PopRequest removes the requester at pool.height and increments pool.height. func (pool *BlockPool) PopRequest() { pool.mtx.Lock() defer pool.mtx.Unlock() - if r := pool.requesters[pool.height]; r != nil { - /* The block can disappear at any time, due to removePeer(). - if r := pool.requesters[pool.height]; r == nil || r.block == nil { - PanicSanity("PopRequest() requires a valid block") - } - */ - if err := r.Stop(); err != nil { - pool.Logger.Error("Error stopping requester", "err", err) - } - delete(pool.requesters, pool.height) - pool.height++ - } else { + r := pool.requesters[pool.height] + if r == nil { panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) } + + if err := r.Stop(); err != nil { + pool.Logger.Error("Error stopping requester", "err", err) + } + delete(pool.requesters, pool.height) + pool.height++ + + // Notify the next minBlocksForSingleRequest requesters about new height, so + // they can potentially request a block from the second peer. + for i := int64(0); i < minBlocksForSingleRequest && i < int64(len(pool.requesters)); i++ { + pool.requesters[pool.height+i].newHeight(pool.height) + } } -// RedoRequest invalidates the block at pool.height, -// Remove the peer and redo request from others. +// RemovePeerAndRedoAllPeerRequests retries the request at the given height and +// all the requests made to the same peer. The peer is removed from the pool. // Returns the ID of the removed peer. -func (pool *BlockPool) RedoRequest(height int64) p2p.ID { +func (pool *BlockPool) RemovePeerAndRedoAllPeerRequests(height int64) p2p.ID { pool.mtx.Lock() defer pool.mtx.Unlock() request := pool.requesters[height] - peerID := request.getPeerID() - if peerID != p2p.ID("") { - // RemovePeer will redo all requesters associated with this peer. - pool.removePeer(peerID) - } + peerID := request.gotBlockFromPeerID() + // RemovePeer will redo all requesters associated with this peer. + pool.removePeer(peerID) + pool.banPeer(peerID) return peerID } +// RedoRequestFrom retries the request at the given height. It does not remove the +// peer. +func (pool *BlockPool) RedoRequestFrom(height int64, peerID p2p.ID) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + if requester, ok := pool.requesters[height]; ok { // If we requested this block + if requester.didRequestFrom(peerID) { // From this specific peer + requester.redo(peerID) + } + } +} + +// Deprecated: use RemovePeerAndRedoAllPeerRequests instead. +func (pool *BlockPool) RedoRequest(height int64) p2p.ID { + return pool.RemovePeerAndRedoAllPeerRequests(height) +} + // AddBlock validates that the block comes from the peer it was expected from // and calls the requester to store it. // @@ -260,44 +313,50 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, extCommit *ty defer pool.mtx.Unlock() if extCommit != nil && block.Height != extCommit.Height { - return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height) + err := fmt.Errorf("block height %d != extCommit height %d", block.Height, extCommit.Height) + // Peer sent us an invalid block => remove it. + pool.sendError(err, peerID) + return err } requester := pool.requesters[block.Height] if requester == nil { - pool.Logger.Info( - "peer sent us a block we didn't expect", - "peer", - peerID, - "curHeight", - pool.height, - "blockHeight", - block.Height) - diff := pool.height - block.Height - if diff < 0 { - diff *= -1 + // Because we're issuing 2nd requests for closer blocks, it's possible to + // receive a block we've already processed from a second peer. Hence, we + // can't punish it. But if the peer sent us a block we clearly didn't + // request, we disconnect. + if block.Height > pool.height || block.Height < pool.startHeight { + err := fmt.Errorf("peer sent us block #%d we didn't expect (current height: %d, start height: %d)", + block.Height, pool.height, pool.startHeight) + pool.sendError(err, peerID) + return err } - if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { - pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) - } - return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height) + + return fmt.Errorf("got an already committed block #%d (possibly from the slow peer %s)", block.Height, peerID) } - if requester.setBlock(block, extCommit, peerID) { - atomic.AddInt32(&pool.numPending, -1) - peer := pool.peers[peerID] - if peer != nil { - peer.decrPending(blockSize) - } - } else { - err := errors.New("requester is different or block already exists") + if !requester.setBlock(block, extCommit, peerID) { + err := fmt.Errorf("requested block #%d from %v, not %s", block.Height, requester.requestedFrom(), peerID) pool.sendError(err, peerID) - return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height) + return err + } + + atomic.AddInt32(&pool.numPending, -1) + peer := pool.peers[peerID] + if peer != nil { + peer.decrPending(blockSize) } return nil } +// Height returns the pool's height. +func (pool *BlockPool) Height() int64 { + pool.mtx.Lock() + defer pool.mtx.Unlock() + return pool.height +} + // MaxPeerHeight returns the highest reported height. func (pool *BlockPool) MaxPeerHeight() int64 { pool.mtx.Lock() @@ -315,9 +374,16 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) { peer.base = base peer.height = height } else { + if pool.isPeerBanned(peerID) { + pool.Logger.Debug("Ignoring banned peer", peerID) + return + } peer = newBPPeer(pool, peerID, base, height) peer.setLogger(pool.Logger.With("peer", peerID)) pool.peers[peerID] = peer + // no need to sort because curRate is 0 at start. + // just add to the beginning so it's picked first by pickIncrAvailablePeer. + pool.sortedPeers = append([]*bpPeer{peer}, pool.sortedPeers...) } if height > pool.maxPeerHeight { @@ -336,7 +402,7 @@ func (pool *BlockPool) RemovePeer(peerID p2p.ID) { func (pool *BlockPool) removePeer(peerID p2p.ID) { for _, requester := range pool.requesters { - if requester.getPeerID() == peerID { + if requester.didRequestFrom(peerID) { requester.redo(peerID) } } @@ -348,6 +414,12 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) { } delete(pool.peers, peerID) + for i, p := range pool.sortedPeers { + if p.id == peerID { + pool.sortedPeers = append(pool.sortedPeers[:i], pool.sortedPeers[i+1:]...) + break + } + } // Find a new peer with the biggest height and update maxPeerHeight if the // peer's height was the biggest. @@ -368,13 +440,26 @@ func (pool *BlockPool) updateMaxPeerHeight() { pool.maxPeerHeight = max } +func (pool *BlockPool) isPeerBanned(peerID p2p.ID) bool { + // Todo: replace with cmttime.Since in future versions + return time.Since(pool.bannedPeers[peerID]) < time.Second*60 +} + +func (pool *BlockPool) banPeer(peerID p2p.ID) { + pool.Logger.Debug("Banning peer", peerID) + pool.bannedPeers[peerID] = cmttime.Now() +} + // Pick an available peer with the given height available. // If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { +func (pool *BlockPool) pickIncrAvailablePeer(height int64, excludePeerID p2p.ID) *bpPeer { pool.mtx.Lock() defer pool.mtx.Unlock() - for _, peer := range pool.peers { + for _, peer := range pool.sortedPeers { + if peer.id == excludePeerID { + continue + } if peer.didTimeout { pool.removePeer(peer.id) continue @@ -388,33 +473,33 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { peer.incrPending() return peer } + return nil } -func (pool *BlockPool) makeNextRequester() { +// Sort peers by curRate, highest first. +// +// CONTRACT: pool.mtx must be locked. +func (pool *BlockPool) sortPeers() { + sort.Slice(pool.sortedPeers, func(i, j int) bool { + return pool.sortedPeers[i].curRate > pool.sortedPeers[j].curRate + }) +} + +func (pool *BlockPool) makeNextRequester(nextHeight int64) { pool.mtx.Lock() defer pool.mtx.Unlock() - nextHeight := pool.height + pool.requestersLen() - if nextHeight > pool.maxPeerHeight { - return - } - request := newBPRequester(pool, nextHeight) pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) - err := request.Start() - if err != nil { + if err := request.Start(); err != nil { request.Logger.Error("Error starting request", "err", err) } } -func (pool *BlockPool) requestersLen() int64 { - return int64(len(pool.requesters)) -} - func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) { if !pool.IsRunning() { return @@ -437,7 +522,7 @@ func (pool *BlockPool) debug() string { defer pool.mtx.Unlock() str := "" - nextHeight := pool.height + pool.requestersLen() + nextHeight := pool.height + int64(len(pool.requesters)) for h := pool.height; h < nextHeight; h++ { if pool.requesters[h] == nil { str += fmt.Sprintf("H(%v):X ", h) @@ -454,6 +539,7 @@ func (pool *BlockPool) debug() string { type bpPeer struct { didTimeout bool + curRate int64 numPending int32 height int64 base int64 @@ -526,28 +612,42 @@ func (peer *bpPeer) onTimeout() { //------------------------------------- +// bpRequester requests a block from a peer. +// +// If the height is within minBlocksForSingleRequest blocks of the pool's +// height, it will send an additional request to another peer. This is to avoid +// a situation where blocksync is stuck because of a single slow peer. Note +// that it's okay to send a single request when the requested height is far +// from the pool's height. If the peer is slow, it will timeout and be replaced +// with another peer. type bpRequester struct { service.BaseService - pool *BlockPool - height int64 - gotBlockCh chan struct{} - redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat - mtx cmtsync.Mutex - peerID p2p.ID - block *types.Block - extCommit *types.ExtendedCommit + pool *BlockPool + height int64 + gotBlockCh chan struct{} + redoCh chan p2p.ID // redo may got multiple messages, add peerId to identify repeat + newHeightCh chan int64 + + mtx cmtsync.Mutex + peerID p2p.ID + secondPeerID p2p.ID // alternative peer to request from (if close to pool's height) + gotBlockFrom p2p.ID + block *types.Block + extCommit *types.ExtendedCommit } func newBPRequester(pool *BlockPool, height int64) *bpRequester { bpr := &bpRequester{ - pool: pool, - height: height, - gotBlockCh: make(chan struct{}, 1), - redoCh: make(chan p2p.ID, 1), + pool: pool, + height: height, + gotBlockCh: make(chan struct{}, 1), + redoCh: make(chan p2p.ID, 1), + newHeightCh: make(chan int64, 1), - peerID: "", - block: nil, + peerID: "", + secondPeerID: "", + block: nil, } bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) return bpr @@ -558,15 +658,21 @@ func (bpr *bpRequester) OnStart() error { return nil } -// Returns true if the peer matches and block doesn't already exist. +// Returns true if the peer(s) match and block doesn't already exist. func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID p2p.ID) bool { bpr.mtx.Lock() - if bpr.block != nil || bpr.peerID != peerID { + if bpr.peerID != peerID && bpr.secondPeerID != peerID { bpr.mtx.Unlock() return false } + if bpr.block != nil { + bpr.mtx.Unlock() + return true // getting a block from both peers is not an error + } + bpr.block = block bpr.extCommit = extCommit + bpr.gotBlockFrom = peerID bpr.mtx.Unlock() select { @@ -588,24 +694,55 @@ func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit { return bpr.extCommit } -func (bpr *bpRequester) getPeerID() p2p.ID { +// Returns the IDs of peers we've requested a block from. +func (bpr *bpRequester) requestedFrom() []p2p.ID { bpr.mtx.Lock() defer bpr.mtx.Unlock() - return bpr.peerID + peerIDs := make([]p2p.ID, 0, 2) + if bpr.peerID != "" { + peerIDs = append(peerIDs, bpr.peerID) + } + if bpr.secondPeerID != "" { + peerIDs = append(peerIDs, bpr.secondPeerID) + } + return peerIDs } -// This is called from the requestRoutine, upon redo(). -func (bpr *bpRequester) reset() { +// Returns true if we've requested a block from the given peer. +func (bpr *bpRequester) didRequestFrom(peerID p2p.ID) bool { bpr.mtx.Lock() defer bpr.mtx.Unlock() + return bpr.peerID == peerID || bpr.secondPeerID == peerID +} - if bpr.block != nil { +// Returns the ID of the peer who sent us the block. +func (bpr *bpRequester) gotBlockFromPeerID() p2p.ID { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.gotBlockFrom +} + +// Removes the block (IF we got it from the given peer) and resets the peer. +func (bpr *bpRequester) reset(peerID p2p.ID) (removedBlock bool) { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + + // Only remove the block if we got it from that peer. + if bpr.gotBlockFrom == peerID { + bpr.block = nil + bpr.extCommit = nil + bpr.gotBlockFrom = "" + removedBlock = true atomic.AddInt32(&bpr.pool.numPending, 1) } - bpr.peerID = "" - bpr.block = nil - bpr.extCommit = nil + if bpr.peerID == peerID { + bpr.peerID = "" + } else { + bpr.secondPeerID = "" + } + + return removedBlock } // Tells bpRequester to pick another peer and try again. @@ -618,34 +755,81 @@ func (bpr *bpRequester) redo(peerID p2p.ID) { } } +func (bpr *bpRequester) pickPeerAndSendRequest() { + bpr.mtx.Lock() + secondPeerID := bpr.secondPeerID + bpr.mtx.Unlock() + + var peer *bpPeer +PICK_PEER_LOOP: + for { + if !bpr.IsRunning() || !bpr.pool.IsRunning() { + return + } + peer = bpr.pool.pickIncrAvailablePeer(bpr.height, secondPeerID) + if peer == nil { + bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height) + time.Sleep(requestIntervalMS * time.Millisecond) + continue PICK_PEER_LOOP + } + break PICK_PEER_LOOP + } + bpr.mtx.Lock() + bpr.peerID = peer.id + bpr.mtx.Unlock() + + bpr.pool.sendRequest(bpr.height, peer.id) +} + +// Picks a second peer and sends a request to it. If the second peer is already +// set, does nothing. +func (bpr *bpRequester) pickSecondPeerAndSendRequest() (picked bool) { + bpr.mtx.Lock() + if bpr.secondPeerID != "" { + bpr.mtx.Unlock() + return false + } + peerID := bpr.peerID + bpr.mtx.Unlock() + + secondPeer := bpr.pool.pickIncrAvailablePeer(bpr.height, peerID) + if secondPeer != nil { + bpr.mtx.Lock() + bpr.secondPeerID = secondPeer.id + bpr.mtx.Unlock() + + bpr.pool.sendRequest(bpr.height, secondPeer.id) + return true + } + + return false +} + +// Informs the requester of a new pool's height. +func (bpr *bpRequester) newHeight(height int64) { + select { + case bpr.newHeightCh <- height: + default: + } +} + // Responsible for making more requests as necessary // Returns only when a block is found (e.g. AddBlock() is called) func (bpr *bpRequester) requestRoutine() { + gotBlock := false + OUTER_LOOP: for { - // Pick a peer to send request to. - var peer *bpPeer - PICK_PEER_LOOP: - for { - if !bpr.IsRunning() || !bpr.pool.IsRunning() { - return - } - peer = bpr.pool.pickIncrAvailablePeer(bpr.height) - if peer == nil { - bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height) - time.Sleep(requestIntervalMS * time.Millisecond) - continue PICK_PEER_LOOP - } - break PICK_PEER_LOOP + bpr.pickPeerAndSendRequest() + + poolHeight := bpr.pool.Height() + if bpr.height-poolHeight < minBlocksForSingleRequest { + bpr.pickSecondPeerAndSendRequest() } - bpr.mtx.Lock() - bpr.peerID = peer.id - bpr.mtx.Unlock() - to := time.NewTimer(requestRetrySeconds * time.Second) - // Send request and wait. - bpr.pool.sendRequest(bpr.height, peer.id) - WAIT_LOOP: + retryTimer := time.NewTimer(requestRetrySeconds * time.Second) + defer retryTimer.Stop() + for { select { case <-bpr.pool.Quit(): @@ -655,22 +839,43 @@ OUTER_LOOP: return case <-bpr.Quit(): return - case <-to.C: - bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID) - // Simulate a redo - bpr.reset() - continue OUTER_LOOP + case <-retryTimer.C: + if !gotBlock { + bpr.Logger.Debug("Retrying block request(s) after timeout", "height", bpr.height, "peer", bpr.peerID, "secondPeerID", bpr.secondPeerID) + bpr.reset(bpr.peerID) + bpr.reset(bpr.secondPeerID) + continue OUTER_LOOP + } case peerID := <-bpr.redoCh: - if peerID == bpr.peerID { - bpr.reset() + if bpr.didRequestFrom(peerID) { + removedBlock := bpr.reset(peerID) + if removedBlock { + gotBlock = false + } + } + // If both peers returned NoBlockResponse or bad block, reschedule both + // requests. If not, wait for the other peer. + if len(bpr.requestedFrom()) == 0 { + retryTimer.Stop() continue OUTER_LOOP - } else { - continue WAIT_LOOP + } + case newHeight := <-bpr.newHeightCh: + if !gotBlock && bpr.height-newHeight < minBlocksForSingleRequest { + // The operation is a noop if the second peer is already set. The cost is checking a mutex. + // + // If the second peer was just set, reset the retryTimer to give the + // second peer a chance to respond. + if picked := bpr.pickSecondPeerAndSendRequest(); picked { + if !retryTimer.Stop() { + <-retryTimer.C + } + retryTimer.Reset(requestRetrySeconds * time.Second) + } } case <-bpr.gotBlockCh: + gotBlock = true // We got a block! // Continue the for-loop and wait til Quit. - continue WAIT_LOOP } } } diff --git a/blocksync/pool_test.go b/blocksync/pool_test.go index c5bfab46b5a..0952e6ab69e 100644 --- a/blocksync/pool_test.go +++ b/blocksync/pool_test.go @@ -23,6 +23,7 @@ type testPeer struct { base int64 height int64 inputChan chan inputData // make sure each peer's data is sequential + malicious bool } type inputData struct { @@ -31,6 +32,11 @@ type inputData struct { request BlockRequest } +// Malicious nodes parameters +const MaliciousLie = 5 // This is how much the malicious node claims to be higher than the real height +const BlackholeSize = 3 // This is how many blocks the malicious node will not return (missing) above real height +const MaliciousTestMaximumLength = 5 * time.Minute + func (p testPeer) runInputRoutine() { go func() { for input := range p.inputChan { @@ -41,18 +47,33 @@ func (p testPeer) runInputRoutine() { // Request desired, pretend like we got the block immediately. func (p testPeer) simulateInput(input inputData) { - block := &types.Block{Header: types.Header{Height: input.request.Height}} + block := &types.Block{Header: types.Header{Height: input.request.Height}, LastCommit: &types.Commit{}} // real blocks have LastCommit extCommit := &types.ExtendedCommit{ Height: input.request.Height, } - _ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) + // If this peer is malicious + if p.malicious { + realHeight := p.height - MaliciousLie + // And the requested height is above the real height + if input.request.Height > realHeight { + // Then provide a fake block + block.LastCommit = nil // Fake block, no LastCommit + // or provide no block at all, if we are close to the real height + if input.request.Height <= realHeight+BlackholeSize { + input.pool.RedoRequestFrom(input.request.Height, p.id) + return + } + } + } + err := input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) + require.NoError(input.t, err) // TODO: uncommenting this creates a race which is detected by: // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) } -type testPeers map[p2p.ID]testPeer +type testPeers map[p2p.ID]*testPeer func (ps testPeers) start() { for _, v := range ps { @@ -75,16 +96,18 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { if base > height { base = height } - peers[peerID] = testPeer{peerID, base, height, make(chan inputData, 10)} + peers[peerID] = &testPeer{peerID, base, height, make(chan inputData, 10), false} } return peers } func TestBlockPoolBasic(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) + var ( + start = int64(42) + peers = makePeers(10, start, 1000) + errorsCh = make(chan peerError) + requestsCh = make(chan BlockRequest) + ) pool := NewBlockPool(start, requestsCh, errorsCh) pool.SetLogger(log.TestingLogger()) @@ -141,10 +164,13 @@ func TestBlockPoolBasic(t *testing.T) { } func TestBlockPoolTimeout(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) + var ( + start = int64(42) + peers = makePeers(10, start, 1000) + errorsCh = make(chan peerError) + requestsCh = make(chan BlockRequest) + ) + pool := NewBlockPool(start, requestsCh, errorsCh) pool.SetLogger(log.TestingLogger()) err := pool.Start() @@ -208,7 +234,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { for i := 0; i < 10; i++ { peerID := p2p.ID(fmt.Sprintf("%d", i+1)) height := int64(i + 1) - peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)} + peers[peerID] = &testPeer{peerID, 0, height, make(chan inputData), false} } requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) @@ -243,3 +269,111 @@ func TestBlockPoolRemovePeer(t *testing.T) { assert.EqualValues(t, 0, pool.MaxPeerHeight()) } + +func TestBlockPoolMaliciousNode(t *testing.T) { + // Setup: + // * each peer has blocks 1..N but the malicious peer reports 1..N+5 (block N+1,N+2,N+3 missing, N+4,N+5 fake) + // * The malicious peer is ahead of the network but not by much, so it does not get dropped from the pool + // with a timeout error. (If a peer does not send blocks after 2 seconds, they are disconnected.) + // * The network creates new blocks every second. The malicious peer will also get ahead with another fake block. + // * The pool verifies blocks every half second. This ensures that the pool catches up with the network. + // * When the pool encounters a fake block sent by the malicious peer and has the previous block from a good peer, + // it can prove that the block is fake. The malicious peer gets banned, together with the sender of the previous (valid) block. + // Additional notes: + // * After a minute of ban, the malicious peer is unbanned. If the pool IsCaughtUp() by then and consensus started, + // there is no impact. If blocksync did not catch up yet, the malicious peer can continue its lie until the next ban. + // * The pool has an initial 3 seconds spin-up time before it starts verifying peers. (So peers have a chance to + // connect.) If the initial height is 7 and the block creation is 1/second, verification will start at height 10. + // * Testing with height 7, the main functionality of banning a malicious peer is tested. + // Testing with height 127, a malicious peer can reconnect and the subsequent banning is also tested. + // This takes a couple of minutes to complete, so we don't run it. + const InitialHeight = 7 + peers := testPeers{ + p2p.ID("good"): &testPeer{p2p.ID("good"), 1, InitialHeight, make(chan inputData), false}, + p2p.ID("bad"): &testPeer{p2p.ID("bad"), 1, InitialHeight + MaliciousLie, make(chan inputData), true}, + p2p.ID("good1"): &testPeer{p2p.ID("good1"), 1, InitialHeight, make(chan inputData), false}, + } + errorsCh := make(chan peerError) + requestsCh := make(chan BlockRequest) + + pool := NewBlockPool(1, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + + err := pool.Start() + if err != nil { + t.Error(err) + } + + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) + + peers.start() + t.Cleanup(func() { peers.stop() }) + + // Simulate blocks created on each peer regularly and update pool max height. + go func() { + // Introduce each peer + for _, peer := range peers { + pool.SetPeerRange(peer.id, peer.base, peer.height) + } + for { + time.Sleep(1 * time.Second) // Speed of new block creation + for _, peer := range peers { + peer.height += 1 // Network height increases on all peers + pool.SetPeerRange(peer.id, peer.base, peer.height) // Tell the pool that a new height is available + } + } + }() + + // Start a goroutine to verify blocks + go func() { + for { + time.Sleep(500 * time.Millisecond) // Speed of block verification + if !pool.IsRunning() { + return + } + first, second, _ := pool.PeekTwoBlocks() + if first != nil && second != nil { + if second.LastCommit == nil { + // Second block is fake + pool.RemovePeerAndRedoAllPeerRequests(second.Height) + } else { + pool.PopRequest() + } + } + } + }() + + testTicker := time.NewTicker(200 * time.Millisecond) // speed of test execution + t.Cleanup(func() { testTicker.Stop() }) + + bannedOnce := false // true when the malicious peer was banned at least once + startTime := time.Now() + + // Pull from channels + for { + select { + case err := <-errorsCh: + t.Error(err) + case request := <-requestsCh: + // Process request + peers[request.PeerID].inputChan <- inputData{t, pool, request} + case <-testTicker.C: + banned := pool.isPeerBanned("bad") + bannedOnce = bannedOnce || banned // Keep bannedOnce true, even if the malicious peer gets unbanned + caughtUp := pool.IsCaughtUp() + // Success: pool caught up and malicious peer was banned at least once + if caughtUp && bannedOnce { + t.Logf("Pool caught up, malicious peer was banned at least once, start consensus.") + return + } + // Failure: the pool caught up without banning the bad peer at least once + require.False(t, caughtUp, "Network caught up without banning the malicious peer at least once.") + // Failure: the network could not catch up in the allotted time + require.True(t, time.Since(startTime) < MaliciousTestMaximumLength, "Network ran too long, stopping test.") + } + } +} diff --git a/blocksync/reactor.go b/blocksync/reactor.go index b67c2d844c4..745f227663d 100644 --- a/blocksync/reactor.go +++ b/blocksync/reactor.go @@ -3,8 +3,10 @@ package blocksync import ( "fmt" "reflect" + "sync" "time" + "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" bcproto "github.com/cometbft/cometbft/proto/tendermint/blocksync" @@ -51,10 +53,12 @@ type Reactor struct { // immutable initialState sm.State - blockExec *sm.BlockExecutor - store sm.BlockStore - pool *BlockPool - blockSync bool + blockExec *sm.BlockExecutor + store sm.BlockStore + pool *BlockPool + blockSync bool + localAddr crypto.Address + poolRoutineWg sync.WaitGroup requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -66,19 +70,39 @@ type Reactor struct { // NewReactor returns new reactor instance. func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, - blockSync bool, metrics *Metrics) *Reactor { + blockSync bool, metrics *Metrics, offlineStateSyncHeight int64, +) *Reactor { + return NewReactorWithAddr(state, blockExec, store, blockSync, nil, metrics, offlineStateSyncHeight) +} - if state.LastBlockHeight != store.Height() { - panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, - store.Height())) +// Function added to keep existing API. +func NewReactorWithAddr(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, + blockSync bool, localAddr crypto.Address, metrics *Metrics, offlineStateSyncHeight int64, +) *Reactor { + + storeHeight := store.Height() + if storeHeight == 0 { + // If state sync was performed offline and the stores were bootstrapped to height H + // the state store's lastHeight will be H while blockstore's Height and Base are still 0 + // 1. This scenario should not lead to a panic in this case, which is indicated by + // having a OfflineStateSyncHeight > 0 + // 2. We need to instruct the blocksync reactor to start fetching blocks from H+1 + // instead of 0. + storeHeight = offlineStateSyncHeight + } + if state.LastBlockHeight != storeHeight { + panic(fmt.Sprintf("state (%v) and store (%v) height mismatch, stores were left in an inconsistent state", state.LastBlockHeight, + storeHeight)) } - requestsCh := make(chan BlockRequest, maxTotalRequesters) + // It's okay to block since sendRequest is called from a separate goroutine + // (bpRequester#requestRoutine; 1 per each peer). + requestsCh := make(chan BlockRequest) const capacity = 1000 // must be bigger than peers count errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock - startHeight := store.Height() + 1 + startHeight := storeHeight + 1 if startHeight == 1 { startHeight = state.InitialHeight } @@ -90,6 +114,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS store: store, pool: pool, blockSync: blockSync, + localAddr: localAddr, requestsCh: requestsCh, errorsCh: errorsCh, metrics: metrics, @@ -111,7 +136,11 @@ func (bcR *Reactor) OnStart() error { if err != nil { return err } - go bcR.poolRoutine(false) + bcR.poolRoutineWg.Add(1) + go func() { + defer bcR.poolRoutineWg.Done() + bcR.poolRoutine(false) + }() } return nil } @@ -126,7 +155,11 @@ func (bcR *Reactor) SwitchToBlockSync(state sm.State) error { if err != nil { return err } - go bcR.poolRoutine(true) + bcR.poolRoutineWg.Add(1) + go func() { + defer bcR.poolRoutineWg.Done() + bcR.poolRoutine(true) + }() return nil } @@ -136,6 +169,7 @@ func (bcR *Reactor) OnStop() { if err := bcR.pool.Stop(); err != nil { bcR.Logger.Error("Error stopping pool", "err", err) } + bcR.poolRoutineWg.Wait() } } @@ -169,15 +203,13 @@ func (bcR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements Reactor by removing peer from the pool. -func (bcR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (bcR *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { bcR.pool.RemovePeer(peer.ID()) } // respondToPeer loads a block and sends it to the requesting peer, // if we have it. Otherwise, we'll respond saying we don't have it. -func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, - src p2p.Peer) (queued bool) { - +func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { block := bcR.store.LoadBlock(msg.Height) if block == nil { bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) @@ -217,7 +249,7 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, } // Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *Reactor) Receive(e p2p.Envelope) { +func (bcR *Reactor) Receive(e p2p.Envelope) { //nolint: dupl // recreated in a test if err := ValidateMsg(e.Message); err != nil { bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) bcR.Switch.StopPeerForError(e.Src, err) @@ -232,7 +264,8 @@ func (bcR *Reactor) Receive(e p2p.Envelope) { case *bcproto.BlockResponse: bi, err := types.BlockFromProto(msg.Block) if err != nil { - bcR.Logger.Error("Block content is invalid", "err", err) + bcR.Logger.Error("Peer sent us invalid block", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) return } var extCommit *types.ExtendedCommit @@ -243,12 +276,13 @@ func (bcR *Reactor) Receive(e p2p.Envelope) { bcR.Logger.Error("failed to convert extended commit from proto", "peer", e.Src, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) return } } if err := bcR.pool.AddBlock(e.Src.ID(), bi, extCommit, msg.Block.Size()); err != nil { - bcR.Logger.Error("failed to add block", "err", err) + bcR.Logger.Error("failed to add block", "peer", e.Src, "err", err) } case *bcproto.StatusRequest: // Send peer our state. @@ -264,11 +298,21 @@ func (bcR *Reactor) Receive(e p2p.Envelope) { bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) case *bcproto.NoBlockResponse: bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) + bcR.pool.RedoRequestFrom(msg.Height, e.Src.ID()) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } +func (bcR *Reactor) localNodeBlocksTheChain(state sm.State) bool { + _, val := state.Validators.GetByAddress(bcR.localAddr) + if val == nil { + return false + } + total := state.Validators.TotalVotingPower() + return val.VotingPower >= total/3 +} + // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! func (bcR *Reactor) poolRoutine(stateSynced bool) { @@ -377,7 +421,7 @@ FOR_LOOP: ) continue FOR_LOOP } - if bcR.pool.IsCaughtUp() { + if bcR.pool.IsCaughtUp() || bcR.localNodeBlocksTheChain(state) { bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) if err := bcR.pool.Stop(); err != nil { bcR.Logger.Error("Error stopping pool", "err", err) @@ -424,11 +468,14 @@ FOR_LOOP: // Panicking because this is an obvious bug in the block pool, which is totally under our control panic(fmt.Errorf("heights of first and second block are not consecutive; expected %d, got %d", state.LastBlockHeight, first.Height)) } - if extCommit == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 - panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) - } + // Before priming didProcessCh for another check on the next + // iteration, break the loop if the BlockPool or the Reactor itself + // has quit. This avoids case ambiguity of the outer select when two + // channels are ready. + if !bcR.IsRunning() || !bcR.pool.IsRunning() { + break FOR_LOOP + } // Try again quickly next loop. didProcessCh <- struct{}{} @@ -453,31 +500,33 @@ FOR_LOOP: // validate the block before we persist it err = bcR.blockExec.ValidateBlock(state, first) } - if err == nil { + presentExtCommit := extCommit != nil + extensionsEnabled := state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) + if presentExtCommit != extensionsEnabled { + err = fmt.Errorf("non-nil extended commit must be received iff vote extensions are enabled for its height "+ + "(height %d, non-nil extended commit %t, extensions enabled %t)", + first.Height, presentExtCommit, extensionsEnabled, + ) + } + if err == nil && extensionsEnabled { // if vote extensions were required at this height, ensure they exist. - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - err = extCommit.EnsureExtensions(true) - } else { - if extCommit != nil { - err = fmt.Errorf("received non-nil extCommit for height %d (extensions disabled)", first.Height) - } - } + err = extCommit.EnsureExtensions(true) } if err != nil { bcR.Logger.Error("Error in validation", "err", err) - peerID := bcR.pool.RedoRequest(first.Height) + peerID := bcR.pool.RemovePeerAndRedoAllPeerRequests(first.Height) peer := bcR.Switch.Peers().Get(peerID) if peer != nil { // NOTE: we've already removed the peer's request, but we // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer, fmt.Errorf("Reactor validation error: %v", err)) + bcR.Switch.StopPeerForError(peer, ErrReactorValidation{Err: err}) } - peerID2 := bcR.pool.RedoRequest(second.Height) + peerID2 := bcR.pool.RemovePeerAndRedoAllPeerRequests(second.Height) peer2 := bcR.Switch.Peers().Get(peerID2) if peer2 != nil && peer2 != peer { // NOTE: we've already removed the peer's request, but we // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer2, fmt.Errorf("Reactor validation error: %v", err)) + bcR.Switch.StopPeerForError(peer2, ErrReactorValidation{Err: err}) } continue FOR_LOOP } @@ -485,7 +534,7 @@ FOR_LOOP: bcR.pool.PopRequest() // TODO: batch saves so we dont persist to disk every block - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { + if extensionsEnabled { bcR.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit) } else { // We use LastCommit here instead of extCommit. extCommit is not @@ -497,7 +546,7 @@ FOR_LOOP: // TODO: same thing for app - but we would need a way to // get the hash without persisting the state - state, err = bcR.blockExec.ApplyBlock(state, firstID, first) + state, err = bcR.blockExec.ApplyVerifiedBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -516,6 +565,8 @@ FOR_LOOP: case <-bcR.Quit(): break FOR_LOOP + case <-bcR.pool.Quit(): + break FOR_LOOP } } } diff --git a/blocksync/reactor_test.go b/blocksync/reactor_test.go index 8363e9f5941..acfefff6ab8 100644 --- a/blocksync/reactor_test.go +++ b/blocksync/reactor_test.go @@ -3,10 +3,13 @@ package blocksync import ( "fmt" "os" + "reflect" "sort" "testing" "time" + bcproto "github.com/cometbft/cometbft/proto/tendermint/blocksync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -53,7 +56,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G } type ReactorPair struct { - reactor *Reactor + reactor *ByzantineReactor app proxy.AppConns } @@ -63,10 +66,15 @@ func newReactor( genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64, + incorrectData ...int64, ) ReactorPair { if len(privVals) != 1 { panic("only support one validator") } + var incorrectBlock int64 = 0 + if len(incorrectData) > 0 { + incorrectBlock = incorrectData[0] + } app := abci.NewBaseApplication() cc := proxy.NewLocalClientCreator(app) @@ -103,7 +111,7 @@ func newReactor( // Make the Reactor itself. // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. - fastSync := true + blockSync := true db := dbm.NewMemDB() stateStore = sm.NewStore(db, sm.StoreOptions{ DiscardABCIResponses: false, @@ -117,8 +125,17 @@ func newReactor( // The commit we are building for the current height. seenExtCommit := &types.ExtendedCommit{} + pubKey, err := privVals[0].GetPubKey() + if err != nil { + panic(err) + } + addr := pubKey.Address() + idx, _ := state.Validators.GetByAddress(addr) + // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + voteExtensionIsEnabled := genDoc.ConsensusParams.ABCI.VoteExtensionsEnabled(blockHeight) + lastExtCommit := seenExtCommit.Clone() thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address) @@ -128,12 +145,6 @@ func newReactor( blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} // Simulate a commit for the current height - pubKey, err := privVals[0].GetPubKey() - if err != nil { - panic(err) - } - addr := pubKey.Address() - idx, _ := state.Validators.GetByAddress(addr) vote, err := types.MakeVote( privVals[0], thisBlock.Header.ChainID, @@ -159,10 +170,15 @@ func newReactor( panic(fmt.Errorf("error apply block: %w", err)) } - blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit) + saveCorrectVoteExtensions := blockHeight != incorrectBlock + if saveCorrectVoteExtensions == voteExtensionIsEnabled { + blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit) + } else { + blockStore.SaveBlock(thisBlock, thisParts, seenExtCommit.ToCommit()) + } } - bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics()) + bcReactor := NewByzantineReactor(incorrectBlock, NewReactor(state.Copy(), blockExec, blockStore, blockSync, NopMetrics(), 0)) bcReactor.SetLogger(logger.With("module", "blocksync")) return ReactorPair{bcReactor, proxyApp} @@ -378,3 +394,185 @@ func TestCheckSwitchToConsensusLastHeightZero(t *testing.T) { assert.GreaterOrEqual(t, r.reactor.store.Height(), maxBlockHeight-maxDiff) } } + +func ExtendedCommitNetworkHelper(t *testing.T, maxBlockHeight int64, enableVoteExtensionAt int64, invalidBlockHeightAt int64) { + config = test.ResetTestRoot("blocksync_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(1, false, 30) + genDoc.ConsensusParams.ABCI.VoteExtensionsEnableHeight = enableVoteExtensionAt + + reactorPairs := make([]ReactorPair, 1, 2) + reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs[0].reactor.switchToConsensusMs = 50 + defer func() { + for _, r := range reactorPairs { + err := r.reactor.Stop() + require.NoError(t, err) + err = r.app.Stop() + require.NoError(t, err) + } + }() + + reactorPairs = append(reactorPairs, newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight, invalidBlockHeightAt)) + + var switches []*p2p.Switch + for _, r := range reactorPairs { + switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", r.reactor) + return s + }, p2p.Connect2Switches)...) + } + + time.Sleep(60 * time.Millisecond) + + // Connect both switches + p2p.Connect2Switches(switches, 0, 1) + + startTime := time.Now() + for { + time.Sleep(20 * time.Millisecond) + // The reactor can never catch up, because at one point it disconnects. + require.False(t, reactorPairs[0].reactor.pool.IsCaughtUp(), "node caught up when it should not have") + // After 5 seconds, the test should have executed. + if time.Since(startTime) > 5*time.Second { + assert.Equal(t, 0, reactorPairs[0].reactor.Switch.Peers().Size(), "node should have disconnected but didn't") + assert.Equal(t, 0, reactorPairs[1].reactor.Switch.Peers().Size(), "node should have disconnected but didn't") + break + } + } +} + +// TestCheckExtendedCommitExtra tests when VoteExtension is disabled but an ExtendedVote is present in the block. +func TestCheckExtendedCommitExtra(t *testing.T) { + const maxBlockHeight = 10 + const enableVoteExtension = 5 + const invalidBlockHeight = 3 + + ExtendedCommitNetworkHelper(t, maxBlockHeight, enableVoteExtension, invalidBlockHeight) +} + +// TestCheckExtendedCommitMissing tests when VoteExtension is enabled but the ExtendedVote is missing from the block. +func TestCheckExtendedCommitMissing(t *testing.T) { + const maxBlockHeight = 10 + const enableVoteExtension = 5 + const invalidBlockHeight = 8 + + ExtendedCommitNetworkHelper(t, maxBlockHeight, enableVoteExtension, invalidBlockHeight) +} + +// ByzantineReactor is a blockstore reactor implementation where a corrupted block can be sent to a peer. +// The corruption is that the block contains extended commit signatures when vote extensions are disabled or +// it has no extended commit signatures while vote extensions are enabled. +// If the corrupted block height is set to 0, the reactor behaves as normal. +type ByzantineReactor struct { + *Reactor + corruptedBlock int64 +} + +func NewByzantineReactor(invalidBlock int64, conR *Reactor) *ByzantineReactor { + return &ByzantineReactor{ + Reactor: conR, + corruptedBlock: invalidBlock, + } +} + +// respondToPeer (overridden method) loads a block and sends it to the requesting peer, +// if we have it. Otherwise, we'll respond saying we don't have it. +// Byzantine modification: if corruptedBlock is set, send the wrong Block. +func (bcR *ByzantineReactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { + block := bcR.store.LoadBlock(msg.Height) + if block == nil { + bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }) + } + + state, err := bcR.blockExec.Store().Load() + if err != nil { + bcR.Logger.Error("loading state", "err", err) + return false + } + var extCommit *types.ExtendedCommit + voteExtensionEnabled := state.ConsensusParams.ABCI.VoteExtensionsEnabled(msg.Height) + incorrectBlock := bcR.corruptedBlock == msg.Height + if voteExtensionEnabled && !incorrectBlock || !voteExtensionEnabled && incorrectBlock { + extCommit = bcR.store.LoadBlockExtendedCommit(msg.Height) + if extCommit == nil { + bcR.Logger.Error("found block in store with no extended commit", "block", block) + return false + } + } + + bl, err := block.ToProto() + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return false + } + + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockResponse{ + Block: bl, + ExtCommit: extCommit.ToProto(), + }, + }) +} + +// Receive implements Reactor by handling 4 types of messages (look below). +// Copied unchanged from reactor.go so the correct respondToPeer is called. +func (bcR *ByzantineReactor) Receive(e p2p.Envelope) { //nolint: dupl + if err := ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + + bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) + + switch msg := e.Message.(type) { + case *bcproto.BlockRequest: + bcR.respondToPeer(msg, e.Src) + case *bcproto.BlockResponse: + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + bcR.Logger.Error("Peer sent us invalid block", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + var extCommit *types.ExtendedCommit + if msg.ExtCommit != nil { + var err error + extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) + if err != nil { + bcR.Logger.Error("failed to convert extended commit from proto", + "peer", e.Src, + "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + } + + if err := bcR.pool.AddBlock(e.Src.ID(), bi, extCommit, msg.Block.Size()); err != nil { + bcR.Logger.Error("failed to add block", "peer", e.Src, "err", err) + } + case *bcproto.StatusRequest: + // Send peer our state. + e.Src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }, + }) + case *bcproto.StatusResponse: + // Got a peer status. Unverified. + bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) + case *bcproto.NoBlockResponse: + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) + bcR.pool.RedoRequestFrom(msg.Height, e.Src.ID()) + default: + bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} diff --git a/buf.yaml b/buf.yaml deleted file mode 100644 index 5f837667856..00000000000 --- a/buf.yaml +++ /dev/null @@ -1,13 +0,0 @@ -build: - roots: - - proto -lint: - use: - - BASIC - - FILE_LOWER_SNAKE_CASE - - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/cmd/cometbft/commands/compact.go b/cmd/cometbft/commands/compact.go index a72cd69f1ab..327e1dbca68 100644 --- a/cmd/cometbft/commands/compact.go +++ b/cmd/cometbft/commands/compact.go @@ -14,8 +14,9 @@ import ( ) var CompactGoLevelDBCmd = &cobra.Command{ - Use: "experimental-compact-goleveldb", - Short: "force compacts the CometBFT storage engine (only GoLevelDB supported)", + Use: "experimental-compact-goleveldb", + Aliases: []string{"experimental_compact_goleveldb"}, + Short: "force compacts the CometBFT storage engine (only GoLevelDB supported)", Long: ` This is a temporary utility command that performs a force compaction on the state and blockstores to reduce disk space for a pruning node. This should only be run diff --git a/cmd/cometbft/commands/debug/io.go b/cmd/cometbft/commands/debug/io.go index 01a14ea710f..0a4f43318ef 100644 --- a/cmd/cometbft/commands/debug/io.go +++ b/cmd/cometbft/commands/debug/io.go @@ -110,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, 0o600) } diff --git a/cmd/cometbft/commands/debug/kill.go b/cmd/cometbft/commands/debug/kill.go index 3a1c993bcdc..90d84cf9eb7 100644 --- a/cmd/cometbft/commands/debug/kill.go +++ b/cmd/cometbft/commands/debug/kill.go @@ -32,8 +32,8 @@ $ cometbft debug 34255 /path/to/cmt-debug.zip`, RunE: killCmdHandler, } -func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) +func killCmdHandler(_ *cobra.Command, args []string) error { + pid, err := strconv.Atoi(args[0]) if err != nil { return err } @@ -100,7 +100,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the CometBFT process to a file // // NOTE: This will only work on UNIX systems. @@ -123,7 +123,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the CometBFT process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill CometBFT process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/cometbft/commands/debug/util.go b/cmd/cometbft/commands/debug/util.go index 0972a03a1da..1393b8da417 100644 --- a/cmd/cometbft/commands/debug/util.go +++ b/cmd/cometbft/commands/debug/util.go @@ -79,5 +79,5 @@ func dumpProfile(dir, addr, profile string, debug int) error { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, 0o600) } diff --git a/cmd/cometbft/commands/gen_node_key.go b/cmd/cometbft/commands/gen_node_key.go index 952e20fe610..7954257c9e6 100644 --- a/cmd/cometbft/commands/gen_node_key.go +++ b/cmd/cometbft/commands/gen_node_key.go @@ -15,11 +15,10 @@ var GenNodeKeyCmd = &cobra.Command{ Use: "gen-node-key", Aliases: []string{"gen_node_key"}, Short: "Generate a node key for this node and print its ID", - PreRun: deprecateSnakeCase, RunE: genNodeKey, } -func genNodeKey(cmd *cobra.Command, args []string) error { +func genNodeKey(*cobra.Command, []string) error { nodeKeyFile := config.NodeKeyFile() if cmtos.FileExists(nodeKeyFile) { return fmt.Errorf("node key at %s already exists", nodeKeyFile) diff --git a/cmd/cometbft/commands/gen_validator.go b/cmd/cometbft/commands/gen_validator.go index d0792306ca2..072b26576dd 100644 --- a/cmd/cometbft/commands/gen_validator.go +++ b/cmd/cometbft/commands/gen_validator.go @@ -15,11 +15,10 @@ var GenValidatorCmd = &cobra.Command{ Use: "gen-validator", Aliases: []string{"gen_validator"}, Short: "Generate new validator keypair", - PreRun: deprecateSnakeCase, Run: genValidator, } -func genValidator(cmd *cobra.Command, args []string) { +func genValidator(*cobra.Command, []string) { pv := privval.GenFilePV("", "") jsbz, err := cmtjson.Marshal(pv) if err != nil { diff --git a/cmd/cometbft/commands/init.go b/cmd/cometbft/commands/init.go index af7f60e6638..8bb572d3303 100644 --- a/cmd/cometbft/commands/init.go +++ b/cmd/cometbft/commands/init.go @@ -21,7 +21,7 @@ var InitFilesCmd = &cobra.Command{ RunE: initFiles, } -func initFiles(cmd *cobra.Command, args []string) error { +func initFiles(*cobra.Command, []string) error { return initFilesWithConfig(config) } diff --git a/cmd/cometbft/commands/inspect.go b/cmd/cometbft/commands/inspect.go index d8ccecf04ef..2d4c5948094 100644 --- a/cmd/cometbft/commands/inspect.go +++ b/cmd/cometbft/commands/inspect.go @@ -44,7 +44,7 @@ func init() { String("db-dir", config.DBPath, "database directory") } -func runInspect(cmd *cobra.Command, args []string) error { +func runInspect(cmd *cobra.Command, _ []string) error { ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() @@ -77,11 +77,8 @@ func runInspect(cmd *cobra.Command, args []string) error { if err != nil { return err } - ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger) + ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer) logger.Info("starting inspect server") - if err := ins.Run(ctx); err != nil { - return err - } - return nil + return ins.Run(ctx) } diff --git a/cmd/cometbft/commands/light.go b/cmd/cometbft/commands/light.go index 073dbc6ff10..490075f486b 100644 --- a/cmd/cometbft/commands/light.go +++ b/cmd/cometbft/commands/light.go @@ -100,7 +100,7 @@ func init() { ) } -func runProxy(cmd *cobra.Command, args []string) error { +func runProxy(_ *cobra.Command, args []string) error { // Initialize logger. logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) var option log.Option diff --git a/cmd/cometbft/commands/probe_upnp.go b/cmd/cometbft/commands/probe_upnp.go deleted file mode 100644 index cf22bb5ae2b..00000000000 --- a/cmd/cometbft/commands/probe_upnp.go +++ /dev/null @@ -1,34 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - cmtjson "github.com/cometbft/cometbft/libs/json" - "github.com/cometbft/cometbft/p2p/upnp" -) - -// ProbeUpnpCmd adds capabilities to test the UPnP functionality. -var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Aliases: []string{"probe_upnp"}, - Short: "Test UPnP functionality", - RunE: probeUpnp, - PreRun: deprecateSnakeCase, -} - -func probeUpnp(cmd *cobra.Command, args []string) error { - capabilities, err := upnp.Probe(logger) - if err != nil { - fmt.Println("Probe failed: ", err) - } else { - fmt.Println("Probe success!") - jsonBytes, err := cmtjson.Marshal(capabilities) - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - } - return nil -} diff --git a/cmd/cometbft/commands/reindex_event.go b/cmd/cometbft/commands/reindex_event.go index 958502d718e..e59e60bd30e 100644 --- a/cmd/cometbft/commands/reindex_event.go +++ b/cmd/cometbft/commands/reindex_event.go @@ -32,8 +32,9 @@ var ( // ReIndexEventCmd constructs a command to re-index events in a block height interval. var ReIndexEventCmd = &cobra.Command{ - Use: "reindex-event", - Short: "reindex events to the event store backends", + Use: "reindex-event", + Aliases: []string{"reindex_event"}, + Short: "reindex events to the event store backends", Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks, you can run this command when the event store backend dropped/disconnected or you want to diff --git a/cmd/cometbft/commands/replay.go b/cmd/cometbft/commands/replay.go index ceb96f873d1..93c3c179651 100644 --- a/cmd/cometbft/commands/replay.go +++ b/cmd/cometbft/commands/replay.go @@ -24,5 +24,4 @@ var ReplayConsoleCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) }, - PreRun: deprecateSnakeCase, } diff --git a/cmd/cometbft/commands/reset.go b/cmd/cometbft/commands/reset.go index 4eadbad91a7..bfd11821046 100644 --- a/cmd/cometbft/commands/reset.go +++ b/cmd/cometbft/commands/reset.go @@ -18,16 +18,15 @@ var ResetAllCmd = &cobra.Command{ Aliases: []string{"unsafe_reset_all"}, Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", RunE: resetAllCmd, - PreRun: deprecateSnakeCase, } var keepAddrBook bool // ResetStateCmd removes the database of the specified CometBFT core instance. var ResetStateCmd = &cobra.Command{ - Use: "reset-state", - Short: "Remove all the data and WAL", - PreRun: deprecateSnakeCase, + Use: "reset-state", + Aliases: []string{"reset_state"}, + Short: "Remove all the data and WAL", RunE: func(cmd *cobra.Command, args []string) (err error) { config, err = ParseConfig(cmd) if err != nil { @@ -47,13 +46,12 @@ var ResetPrivValidatorCmd = &cobra.Command{ Use: "unsafe-reset-priv-validator", Aliases: []string{"unsafe_reset_priv_validator"}, Short: "(unsafe) Reset this node's validator to genesis state", - PreRun: deprecateSnakeCase, RunE: resetPrivValidator, } // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetAllCmd(cmd *cobra.Command, args []string) (err error) { +func resetAllCmd(cmd *cobra.Command, _ []string) (err error) { config, err = ParseConfig(cmd) if err != nil { return err @@ -70,7 +68,7 @@ func resetAllCmd(cmd *cobra.Command, args []string) (err error) { // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetPrivValidator(cmd *cobra.Command, args []string) (err error) { +func resetPrivValidator(cmd *cobra.Command, _ []string) (err error) { config, err = ParseConfig(cmd) if err != nil { return err @@ -94,7 +92,7 @@ func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) } - if err := cmtos.EnsureDir(dbDir, 0700); err != nil { + if err := cmtos.EnsureDir(dbDir, 0o700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } @@ -151,7 +149,7 @@ func resetState(dbDir string, logger log.Logger) error { } } - if err := cmtos.EnsureDir(dbDir, 0700); err != nil { + if err := cmtos.EnsureDir(dbDir, 0o700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } return nil diff --git a/cmd/cometbft/commands/root.go b/cmd/cometbft/commands/root.go index 1d5fce3f3ae..c21b415758d 100644 --- a/cmd/cometbft/commands/root.go +++ b/cmd/cometbft/commands/root.go @@ -3,7 +3,6 @@ package commands import ( "fmt" "os" - "strings" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -40,7 +39,7 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { if os.Getenv("CMTHOME") != "" { home = os.Getenv("CMTHOME") } else if os.Getenv("TMHOME") != "" { - //XXX: Deprecated. + // XXX: Deprecated. home = os.Getenv("TMHOME") logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") } else { @@ -96,10 +95,3 @@ var RootCmd = &cobra.Command{ return nil }, } - -// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { - if strings.Contains(cmd.CalledAs(), "_") { - fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") - } -} diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 40909d86eb4..5213d940c84 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -17,30 +17,12 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" ) -var ( - defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") -) - // clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("CMTHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("CMT_HOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TMHOME"); err != nil { - //XXX: Deprecated. - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - //XXX: Deprecated. - panic(err) - } +func clearConfig(t *testing.T, dir string) { + os.Clearenv() + err := os.RemoveAll(dir) + require.NoError(t, err) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() config = cfg.DefaultConfig() } @@ -58,11 +40,11 @@ func testRootCmd() *cobra.Command { return rootCmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(defaultRoot) +func testSetup(t *testing.T, root string, args []string, env map[string]string) error { + clearConfig(t, root) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env args = append([]string{rootCmd.Use}, args...) @@ -70,22 +52,27 @@ func testSetup(rootDir string, args []string, env map[string]string) error { } func TestRootHome(t *testing.T) { - newRoot := filepath.Join(defaultRoot, "something-else") + tmpDir := os.TempDir() + root := filepath.Join(tmpDir, "adir") + newRoot := filepath.Join(tmpDir, "something-else") + defer clearConfig(t, root) + defer clearConfig(t, newRoot) + cases := []struct { args []string env map[string]string root string }{ - {nil, nil, defaultRoot}, + {nil, nil, root}, {[]string{"--home", newRoot}, nil, newRoot}, - {nil, map[string]string{"TMHOME": newRoot}, newRoot}, //XXX: Deprecated. + {nil, map[string]string{"TMHOME": newRoot}, newRoot}, // XXX: Deprecated. {nil, map[string]string{"CMTHOME": newRoot}, newRoot}, } for i, tc := range cases { - idxString := strconv.Itoa(i) + idxString := "idx: " + strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) @@ -96,7 +83,6 @@ func TestRootHome(t *testing.T) { } func TestRootFlagsEnv(t *testing.T) { - // defaults defaults := cfg.DefaultConfig() defaultLogLvl := defaults.LogLevel @@ -118,8 +104,10 @@ func TestRootFlagsEnv(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) + root := filepath.Join(os.TempDir(), "adir2_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) @@ -127,7 +115,6 @@ func TestRootFlagsEnv(t *testing.T) { } func TestRootConfig(t *testing.T) { - // write non-default config nonDefaultLogLvl := "abc:debug" cvals := map[string]string{ @@ -148,11 +135,12 @@ func TestRootConfig(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - + root := filepath.Join(os.TempDir(), "adir3_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") - err := cmtos.EnsureDir(configFilePath, 0700) + configFilePath := filepath.Join(root, "config") + err := cmtos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) // write the non-defaults to a different path @@ -161,7 +149,7 @@ func TestRootConfig(t *testing.T) { require.Nil(t, err) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env tc.args = append([]string{rootCmd.Use}, tc.args...) @@ -180,5 +168,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return os.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0o600) } diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index 2765b92e6e1..d4a94214070 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -65,10 +65,8 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup") cmd.Flags().String("p2p.unconditional_peer_ids", config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") - cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "enable/disable seed mode") cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") diff --git a/cmd/cometbft/commands/show_node_id.go b/cmd/cometbft/commands/show_node_id.go index 07a1937f613..17bc0ed20c4 100644 --- a/cmd/cometbft/commands/show_node_id.go +++ b/cmd/cometbft/commands/show_node_id.go @@ -14,10 +14,9 @@ var ShowNodeIDCmd = &cobra.Command{ Aliases: []string{"show_node_id"}, Short: "Show this node's ID", RunE: showNodeID, - PreRun: deprecateSnakeCase, } -func showNodeID(cmd *cobra.Command, args []string) error { +func showNodeID(*cobra.Command, []string) error { nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { return err diff --git a/cmd/cometbft/commands/show_validator.go b/cmd/cometbft/commands/show_validator.go index 80d970d2015..5dc4af83ec9 100644 --- a/cmd/cometbft/commands/show_validator.go +++ b/cmd/cometbft/commands/show_validator.go @@ -16,10 +16,9 @@ var ShowValidatorCmd = &cobra.Command{ Aliases: []string{"show_validator"}, Short: "Show this node's validator info", RunE: showValidator, - PreRun: deprecateSnakeCase, } -func showValidator(cmd *cobra.Command, args []string) error { +func showValidator(*cobra.Command, []string) error { keyFilePath := config.PrivValidatorKeyFile() if !cmtos.FileExists(keyFilePath) { return fmt.Errorf("private validator file %s does not exist", keyFilePath) diff --git a/cmd/cometbft/commands/testnet.go b/cmd/cometbft/commands/testnet.go index 4fc6501b5a8..6870876d101 100644 --- a/cmd/cometbft/commands/testnet.go +++ b/cmd/cometbft/commands/testnet.go @@ -37,7 +37,7 @@ var ( ) const ( - nodeDirPerm = 0755 + nodeDirPerm = 0o755 ) func init() { @@ -94,7 +94,7 @@ Example: RunE: testnetFiles, } -func testnetFiles(cmd *cobra.Command, args []string) error { +func testnetFiles(*cobra.Command, []string) error { if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { return fmt.Errorf( "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", diff --git a/cmd/cometbft/main.go b/cmd/cometbft/main.go index f24e227ebba..908f2a939d5 100644 --- a/cmd/cometbft/main.go +++ b/cmd/cometbft/main.go @@ -16,7 +16,6 @@ func main() { rootCmd.AddCommand( cmd.GenValidatorCmd, cmd.InitFilesCmd, - cmd.ProbeUpnpCmd, cmd.LightCmd, cmd.ReplayCmd, cmd.ReplayConsoleCmd, diff --git a/codecov.yml b/codecov.yml index 57c4bb16036..94448646568 100644 --- a/codecov.yml +++ b/codecov.yml @@ -19,7 +19,6 @@ ignore: - "DOCKER" - "scripts" - "**/*.pb.go" - - "libs/pubsub/query/query.peg.go" - "*.md" - "*.rst" - "*.yml" diff --git a/config/config.go b/config/config.go index 00127a550b9..f78f8088293 100644 --- a/config/config.go +++ b/config/config.go @@ -39,6 +39,9 @@ const ( DefaultNodeKeyName = "node_key.json" DefaultAddrBookName = "addrbook.json" + + MempoolTypeFlood = "flood" + MempoolTypeNop = "nop" ) // NOTE: Most of the structs & relevant comments + the @@ -149,6 +152,9 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.Instrumentation.ValidateBasic(); err != nil { return fmt.Errorf("error in [instrumentation] section: %w", err) } + if !cfg.Consensus.CreateEmptyBlocks && cfg.Mempool.Type == MempoolTypeNop { + return fmt.Errorf("`nop` mempool does not support create_empty_blocks = false") + } return nil } @@ -386,6 +392,10 @@ type RPCConfig struct { // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` + // Maximum number of requests that can be sent in a batch + // https://www.jsonrpc.org/specification#batch + MaxRequestBatchSize int `mapstructure:"max_request_batch_size"` + // Maximum size of request body, in bytes MaxBodyBytes int64 `mapstructure:"max_body_bytes"` @@ -434,8 +444,9 @@ func DefaultRPCConfig() *RPCConfig { TimeoutBroadcastTxCommit: 10 * time.Second, WebSocketWriteBufferSize: defaultSubscriptionBufferSize, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxRequestBatchSize: 10, // maximum requests in a JSON-RPC batch request + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default TLSCertFile: "", TLSKeyFile: "", @@ -481,6 +492,9 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.TimeoutBroadcastTxCommit < 0 { return errors.New("timeout_broadcast_tx_commit can't be negative") } + if cfg.MaxRequestBatchSize < 0 { + return errors.New("max_request_batch_size can't be negative") + } if cfg.MaxBodyBytes < 0 { return errors.New("max_body_bytes can't be negative") } @@ -536,17 +550,9 @@ type P2PConfig struct { //nolint: maligned // We only use these if we can’t connect to peers in the addrbook Seeds string `mapstructure:"seeds"` - // Comma separated list of peers to be added to the peer store - // on startup. Either BootstrapPeers or PersistentPeers are - // needed for peer discovery - BootstrapPeers string `mapstructure:"bootstrap_peers"` - // Comma separated list of nodes to keep persistent connections to PersistentPeers string `mapstructure:"persistent_peers"` - // UPNP port forwarding - UPNP bool `mapstructure:"upnp"` - // Path to address book AddrBook string `mapstructure:"addr_book_file"` @@ -601,7 +607,7 @@ type P2PConfig struct { //nolint: maligned // Testing params. // Force dial to fail TestDialFail bool `mapstructure:"test_dial_fail"` - // FUzz connection + // Fuzz connection TestFuzz bool `mapstructure:"test_fuzz"` TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` } @@ -611,7 +617,6 @@ func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", - UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, MaxNumInboundPeers: 40, @@ -703,6 +708,15 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { // implementation (previously called v0), and a prioritized mempool (v1), which // was removed (see https://github.com/cometbft/cometbft/issues/260). type MempoolConfig struct { + // The type of mempool for this node to use. + // + // Possible types: + // - "flood" : concurrent linked list mempool with flooding gossip protocol + // (default) + // - "nop" : nop-mempool (short for no operation; the ABCI app is + // responsible for storing, disseminating and proposing txs). + // "create_empty_blocks=false" is not supported. + Type string `mapstructure:"type"` // RootDir is the root directory for all data. This should be configured via // the $CMTHOME env variable or --home cmd flag rather than overriding this // struct field. @@ -713,6 +727,16 @@ type MempoolConfig struct { // mempool may become invalid. If this does not apply to your application, // you can disable rechecking. Recheck bool `mapstructure:"recheck"` + // RecheckTimeout is the time the application has during the rechecking process + // to return CheckTx responses, once all requests have been sent. Responses that + // arrive after the timeout expires are discarded. It only applies to + // non-local ABCI clients and when recheck is enabled. + // + // The ideal value will strongly depend on the application. It could roughly be estimated as the + // average size of the mempool multiplied by the average time it takes the application to validate one + // transaction. We consider that the ABCI application runs in the same location as the CometBFT binary + // so that the recheck duration is not affected by network delays when making requests and receiving responses. + RecheckTimeout time.Duration `mapstructure:"recheck_timeout"` // Broadcast (default: true) defines whether the mempool should relay // transactions to other peers. Setting this to false will stop the mempool // from relaying transactions to other peers until they are included in a @@ -743,20 +767,38 @@ type MempoolConfig struct { // Including space needed by encoding (one varint per transaction). // XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 MaxBatchBytes int `mapstructure:"max_batch_bytes"` + // Experimental parameters to limit gossiping txs to up to the specified number of peers. + // We use two independent upper values for persistent and non-persistent peers. + // Unconditional peers are not affected by this feature. + // If we are connected to more than the specified number of persistent peers, only send txs to + // ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those + // persistent peers disconnects, activate another persistent peer. + // Similarly for non-persistent peers, with an upper limit of + // ExperimentalMaxGossipConnectionsToNonPersistentPeers. + // If set to 0, the feature is disabled for the corresponding group of peers, that is, the + // number of active connections to that group of peers is not bounded. + // For non-persistent peers, if enabled, a value of 10 is recommended based on experimental + // performance results using the default P2P configuration. + ExperimentalMaxGossipConnectionsToPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_non_persistent_peers"` } // DefaultMempoolConfig returns a default configuration for the CometBFT mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, - Broadcast: true, - WalPath: "", + Type: MempoolTypeFlood, + Recheck: true, + RecheckTimeout: 1000 * time.Millisecond, + Broadcast: true, + WalPath: "", // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck Size: 5000, MaxTxsBytes: 1024 * 1024 * 1024, // 1GB CacheSize: 10000, MaxTxBytes: 1024 * 1024, // 1MB + ExperimentalMaxGossipConnectionsToNonPersistentPeers: 0, + ExperimentalMaxGossipConnectionsToPersistentPeers: 0, } } @@ -780,6 +822,12 @@ func (cfg *MempoolConfig) WalEnabled() bool { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *MempoolConfig) ValidateBasic() error { + switch cfg.Type { + case MempoolTypeFlood, MempoolTypeNop: + case "": // allow empty string to be backwards compatible + default: + return fmt.Errorf("unknown mempool type: %q", cfg.Type) + } if cfg.Size < 0 { return errors.New("size can't be negative") } @@ -792,6 +840,12 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.MaxTxBytes < 0 { return errors.New("max_tx_bytes can't be negative") } + if cfg.ExperimentalMaxGossipConnectionsToPersistentPeers < 0 { + return errors.New("experimental_max_gossip_connections_to_persistent_peers can't be negative") + } + if cfg.ExperimentalMaxGossipConnectionsToNonPersistentPeers < 0 { + return errors.New("experimental_max_gossip_connections_to_non_persistent_peers can't be negative") + } return nil } diff --git a/config/config_test.go b/config/config_test.go index 8f01bdc6e33..5092bc156fd 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -39,6 +39,11 @@ func TestConfigValidateBasic(t *testing.T) { // tamper with timeout_propose cfg.Consensus.TimeoutPropose = -10 * time.Second assert.Error(t, cfg.ValidateBasic()) + cfg.Consensus.TimeoutPropose = 3 * time.Second + + cfg.Consensus.CreateEmptyBlocks = false + cfg.Mempool.Type = config.MempoolTypeNop + assert.Error(t, cfg.ValidateBasic()) } func TestTLSConfiguration(t *testing.T) { @@ -78,6 +83,7 @@ func TestRPCConfigValidateBasic(t *testing.T) { "TimeoutBroadcastTxCommit", "MaxBodyBytes", "MaxHeaderBytes", + "MaxRequestBatchSize", } for _, fieldName := range fieldsToTest { @@ -123,6 +129,9 @@ func TestMempoolConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) } + + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString("invalid") + assert.Error(t, cfg.ValidateBasic()) } func TestStateSyncConfigValidateBasic(t *testing.T) { diff --git a/config/toml.go b/config/toml.go index 598f40f1967..0df9be29eed 100644 --- a/config/toml.go +++ b/config/toml.go @@ -236,6 +236,11 @@ experimental_close_on_slow_client = {{ .RPC.CloseOnSlowClient }} # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = {{ .RPC.MaxRequestBatchSize }} + # Maximum size of request body, in bytes max_body_bytes = {{ .RPC.MaxBodyBytes }} @@ -268,27 +273,17 @@ pprof_laddr = "{{ .RPC.PprofListenAddress }}" # Address to listen for incoming connections laddr = "{{ .P2P.ListenAddress }}" -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 external_address = "{{ .P2P.ExternalAddress }}" # Comma separated list of seed nodes to connect to seeds = "{{ .P2P.Seeds }}" -# Comma separated list of peers to be added to the peer store -# on startup. Either BootstrapPeers or PersistentPeers are -# needed for peer discovery -bootstrap_peers = "{{ .P2P.BootstrapPeers }}" - # Comma separated list of nodes to keep persistent connections to persistent_peers = "{{ .P2P.PersistentPeers }}" -# UPNP port forwarding -upnp = {{ .P2P.UPNP }} - # Path to address book addr_book_file = "{{ js .P2P.AddrBook }}" @@ -344,6 +339,16 @@ dial_timeout = "{{ .P2P.DialTimeout }}" ####################################################### [mempool] +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + # Recheck (default: true) defines whether CometBFT should recheck the # validity for all remaining transaction in the mempool after a block. # Since a block affects the application state, some transactions in the @@ -351,6 +356,17 @@ dial_timeout = "{{ .P2P.DialTimeout }}" # you can disable rechecking. recheck = {{ .Mempool.Recheck }} +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +# +# The ideal value will strongly depend on the application. It could roughly be estimated as the +# average size of the mempool multiplied by the average time it takes the application to validate one +# transaction. We consider that the ABCI application runs in the same location as the CometBFT binary +# so that the recheck duration is not affected by network delays when making requests and receiving responses. +recheck_timeout = "{{ .Mempool.RecheckTimeout }}" + # Broadcast (default: true) defines whether the mempool should relay # transactions to other peers. Setting this to false will stop the mempool # from relaying transactions to other peers until they are included in a @@ -389,6 +405,21 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }} # XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 max_batch_bytes = {{ .Mempool.MaxBatchBytes }} +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers }} +experimental_max_gossip_connections_to_non_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers }} + ####################################################### ### State Sync Configuration Options ### ####################################################### diff --git a/consensus/README.md b/consensus/README.md index 90a48eaefc7..e74c03ab088 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -1,3 +1,3 @@ # Consensus -See the [consensus spec](https://github.com/cometbft/cometbft/tree/main/spec/consensus) for more information. +See the [consensus spec](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/consensus) for more information. diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 6365e48911a..bb72e7932a9 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -145,9 +145,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // allow first height to happen normally so that byzantine validator is no longer proposer if height == prevoteHeight { bcs.Logger.Info("Sending two votes") - prevote1, err := bcs.signVote(cmtproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header()) + prevote1, err := bcs.signVote(cmtproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header(), nil) require.NoError(t, err) - prevote2, err := bcs.signVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) + prevote2, err := bcs.signVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) require.NoError(t, err) peerList := reactors[byzantineNode].Switch.Peers().List() bcs.Logger.Info("Getting peer list", "peers", peerList) @@ -319,7 +319,6 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { switches[i] = p2p.MakeSwitch( config.P2P, i, - "foo", "1.0.0", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) @@ -337,11 +336,10 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // NOTE: Now, test validators are MockPV, which by default doesn't // do any safety checks. css[i].privValidator.(types.MockPV).DisableChecks() - css[i].decideProposal = func(j int32) func(int64, int32) { - return func(height int64, round int32) { - byzantineDecideProposalFunc(ctx, t, height, round, css[j], switches[j]) - } - }(int32(i)) + j := i + css[i].decideProposal = func(height int64, round int32) { + byzantineDecideProposalFunc(ctx, t, height, round, css[j], switches[j]) + } // We are setting the prevote function to do nothing because the prevoting // and precommitting are done alongside the proposal. css[i].doPrevote = func(height int64, round int32) {} @@ -542,8 +540,8 @@ func sendProposalAndParts( // votes cs.mtx.Lock() - prevote, _ := cs.signVote(cmtproto.PrevoteType, blockHash, parts.Header()) - precommit, _ := cs.signVote(cmtproto.PrecommitType, blockHash, parts.Header()) + prevote, _ := cs.signVote(cmtproto.PrevoteType, blockHash, parts.Header(), nil) + precommit, _ := cs.signVote(cmtproto.PrecommitType, blockHash, parts.Header(), nil) cs.mtx.Unlock() peer.Send(p2p.Envelope{ ChannelID: VoteChannel, diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index dd5f8f2ceca..d6bcfdfda30 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -48,21 +48,21 @@ func TestReactorInvalidPrecommit(t *testing.T) { // and otherwise disable the priv validator byzVal.mtx.Lock() pv := byzVal.privValidator - byzVal.doPrevote = func(height int64, round int32) { - invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv) + byzVal.doPrevote = func(int64, int32) { + invalidDoPrevoteFunc(t, byzVal, byzR.Switch, pv) } byzVal.mtx.Unlock() // wait for a bunch of blocks // TODO: make this tighter by ensuring the halt happens by block 2 for i := 0; i < 10; i++ { - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch, pv types.PrivValidator) { +func invalidDoPrevoteFunc(t *testing.T, cs *State, sw *p2p.Switch, pv types.PrivValidator) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -89,7 +89,8 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw Type: cmtproto.PrecommitType, BlockID: types.BlockID{ Hash: blockHash, - PartSetHeader: types.PartSetHeader{Total: 1, Hash: cmtrand.Bytes(32)}}, + PartSetHeader: types.PartSetHeader{Total: 1, Hash: cmtrand.Bytes(32)}, + }, } p := precommit.ToProto() err = cs.privValidator.SignVote(cs.state.ChainID, p) diff --git a/consensus/metrics.gen.go b/consensus/metrics.gen.go index a986b6bfeb5..aea9322cde8 100644 --- a/consensus/metrics.gen.go +++ b/consensus/metrics.gen.go @@ -106,6 +106,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_size_bytes", Help: "Size of the block.", }, labels).With(labelsAndValues...), + ChainSizeBytes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chain_size_bytes", + Help: "Size of the chain in bytes.", + }, labels).With(labelsAndValues...), TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -124,6 +130,18 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_parts", Help: "Number of block parts transmitted by each peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + DuplicateBlockPart: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "duplicate_block_part", + Help: "Number of times we received a duplicate block part", + }, labels).With(labelsAndValues...), + DuplicateVote: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "duplicate_vote", + Help: "Number of times we received a duplicate vote", + }, labels).With(labelsAndValues...), StepDurationSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -200,9 +218,12 @@ func NopMetrics() *Metrics { BlockIntervalSeconds: discard.NewHistogram(), NumTxs: discard.NewGauge(), BlockSizeBytes: discard.NewGauge(), + ChainSizeBytes: discard.NewCounter(), TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), BlockParts: discard.NewCounter(), + DuplicateBlockPart: discard.NewCounter(), + DuplicateVote: discard.NewCounter(), StepDurationSeconds: discard.NewHistogram(), BlockGossipPartsReceived: discard.NewCounter(), QuorumPrevoteDelay: discard.NewGauge(), diff --git a/consensus/metrics.go b/consensus/metrics.go index ab2b78d476d..6e89d6b483f 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -8,6 +8,7 @@ import ( cstypes "github.com/cometbft/cometbft/consensus/types" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + types "github.com/cometbft/cometbft/types" ) const ( @@ -56,6 +57,8 @@ type Metrics struct { NumTxs metrics.Gauge // Size of the block. BlockSizeBytes metrics.Gauge + // Size of the chain in bytes. + ChainSizeBytes metrics.Counter // Total number of transactions. TotalTxs metrics.Gauge // The latest block height. @@ -64,6 +67,12 @@ type Metrics struct { // Number of block parts transmitted by each peer. BlockParts metrics.Counter `metrics_labels:"peer_id"` + // Number of times we received a duplicate block part + DuplicateBlockPart metrics.Counter + + // Number of times we received a duplicate vote + DuplicateVote metrics.Counter + // Histogram of durations for each step in the consensus protocol. StepDurationSeconds metrics.Histogram `metrics_labels:"step" metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` stepStart time.Time @@ -136,7 +145,7 @@ func (m *Metrics) MarkVoteExtensionReceived(accepted bool) { func (m *Metrics) MarkVoteReceived(vt cmtproto.SignedMsgType, power, totalPower int64) { p := float64(power) / float64(totalPower) - n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + n := types.SignedMsgTypeToShortString(vt) m.RoundVotingPowerPercent.With("vote_type", n).Add(p) } @@ -145,17 +154,15 @@ func (m *Metrics) MarkRound(r int32, st time.Time) { roundTime := time.Since(st).Seconds() m.RoundDurationSeconds.Observe(roundTime) - pvt := cmtproto.PrevoteType - pvn := strings.ToLower(strings.TrimPrefix(pvt.String(), "SIGNED_MSG_TYPE_")) + pvn := types.SignedMsgTypeToShortString(cmtproto.PrevoteType) m.RoundVotingPowerPercent.With("vote_type", pvn).Set(0) - pct := cmtproto.PrecommitType - pcn := strings.ToLower(strings.TrimPrefix(pct.String(), "SIGNED_MSG_TYPE_")) + pcn := types.SignedMsgTypeToShortString(cmtproto.PrecommitType) m.RoundVotingPowerPercent.With("vote_type", pcn).Set(0) } func (m *Metrics) MarkLateVote(vt cmtproto.SignedMsgType) { - n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + n := types.SignedMsgTypeToShortString(vt) m.LateVotes.With("vote_type", n).Add(1) } diff --git a/consensus/reactor.go b/consensus/reactor.go index 1d7655ae26b..ee87b7ba637 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -107,14 +107,19 @@ func (conR *Reactor) OnStop() { func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { conR.Logger.Info("SwitchToConsensus") - // We have no votes, so reconstruct LastCommit from SeenCommit. - if state.LastBlockHeight > 0 { - conR.conS.reconstructLastCommit(state) - } + func() { + // We need to lock, as we are not entering consensus state from State's `handleMsg` or `handleTimeout` + conR.conS.mtx.Lock() + defer conR.conS.mtx.Unlock() + // We have no votes, so reconstruct LastCommit from SeenCommit + if state.LastBlockHeight > 0 { + conR.conS.reconstructLastCommit(state) + } - // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a - // NewRoundStepMessage. - conR.conS.updateToState(state) + // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a + // NewRoundStepMessage. + conR.conS.updateToState(state) + }() conR.mtx.Lock() conR.waitSync = false @@ -205,7 +210,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer is a noop. -func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (conR *Reactor) RemovePeer(p2p.Peer, interface{}) { if !conR.IsRunning() { return } @@ -425,7 +430,6 @@ func (conR *Reactor) subscribeToBroadcastEvents() { }); err != nil { conR.Logger.Error("Error adding listener for events", "err", err) } - } func (conR *Reactor) unsubscribeFromBroadcastEvents() { @@ -640,8 +644,8 @@ OUTER_LOOP: } func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, - prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { - + prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer, +) { if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { // Ensure that the peer's PartSetHeader is correct blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) @@ -695,7 +699,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { logger := conR.Logger.With("peer", peer) // Simple hack to throttle logs upon sleep. - var sleeping = 0 + sleeping := 0 OUTER_LOOP: for { @@ -740,10 +744,20 @@ OUTER_LOOP: // Load the block's extended commit for prs.Height, // which contains precommit signatures for prs.Height. var ec *types.ExtendedCommit - if conR.conS.state.ConsensusParams.ABCI.VoteExtensionsEnabled(prs.Height) { + var veEnabled bool + func() { + conR.conS.mtx.RLock() + defer conR.conS.mtx.RUnlock() + veEnabled = conR.conS.state.ConsensusParams.ABCI.VoteExtensionsEnabled(prs.Height) + }() + if veEnabled { ec = conR.conS.blockStore.LoadBlockExtendedCommit(prs.Height) } else { - ec = conR.conS.blockStore.LoadBlockCommit(prs.Height).WrappedExtendedCommit() + c := conR.conS.blockStore.LoadBlockCommit(prs.Height) + if c == nil { + continue + } + ec = c.WrappedExtendedCommit() } if ec == nil { continue @@ -776,7 +790,6 @@ func (conR *Reactor) gossipVotesForHeight( prs *cstypes.PeerRoundState, ps *PeerState, ) bool { - // If there are lastCommits to send... if prs.Step == cstypes.RoundStepNewHeight { if ps.PickSendVote(rs.LastCommit) { @@ -832,7 +845,6 @@ func (conR *Reactor) gossipVotesForHeight( // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { - OUTER_LOOP: for { // Manage disconnects from self or peer. @@ -1067,7 +1079,8 @@ func (ps *PeerState) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cmtjson.Marshal(ps) + type jsonPeerState PeerState + return cmtjson.Marshal((*jsonPeerState)(ps)) } // GetHeight returns an atomic snapshot of the PeerRoundState's height @@ -1159,8 +1172,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } - height, round, votesType, size := - votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size() + height, round, votesType, size := votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index bd68f4a6b5e..aed400188ac 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -24,6 +24,7 @@ import ( "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/libs/bits" "github.com/cometbft/cometbft/libs/bytes" + "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" mempl "github.com/cometbft/cometbft/mempool" @@ -114,9 +115,9 @@ func TestReactorBasic(t *testing.T) { reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } // Ensure we can process blocks with evidence @@ -209,11 +210,11 @@ func TestReactorWithEvidence(t *testing.T) { // we expect for each validator that is the proposer to propose one piece of evidence. for i := 0; i < nValidators; i++ { - timeoutWaitGroup(t, nValidators, func(j int) { + timeoutWaitGroup(nValidators, func(j int) { msg := <-blocksSubs[j].Out() block := msg.Data().(types.EventDataNewBlock).Block assert.Len(t, block.Evidence.Evidence, 1) - }, css) + }) } } @@ -238,9 +239,9 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { @@ -421,9 +422,9 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) // Get peer peer := reactors[1].Switch.Peers().List()[0] @@ -460,9 +461,9 @@ func TestReactorVotingPowerChange(t *testing.T) { } // wait till everyone makes block 1 - timeoutWaitGroup(t, nVals, func(j int) { + timeoutWaitGroup(nVals, func(j int) { <-blocksSubs[j].Out() - }, css) + }) //--------------------------------------------------------------------------- logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") @@ -544,9 +545,9 @@ func TestReactorValidatorSetChanges(t *testing.T) { } // wait till everyone makes block 1 - timeoutWaitGroup(t, nPeers, func(j int) { + timeoutWaitGroup(nPeers, func(j int) { <-blocksSubs[j].Out() - }, css) + }) t.Run("Testing adding one validator", func(t *testing.T) { newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() @@ -610,7 +611,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) t.Run("Testing adding two validators at once", func(t *testing.T) { - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) @@ -630,7 +630,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { delete(activeVals, string(newValidatorPubKey3.Address())) waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) }) - } // Check we can make blocks with skip_timeout_commit=false @@ -647,9 +646,9 @@ func TestReactorWithTimeoutCommit(t *testing.T) { defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N-1, func(j int) { + timeoutWaitGroup(N-1, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } func waitForAndValidateBlock( @@ -660,7 +659,7 @@ func waitForAndValidateBlock( css []*State, txs ...[]byte, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { css[j].Logger.Debug("waitForAndValidateBlock") msg := <-blocksSubs[j].Out() newBlock := msg.Data().(types.EventDataNewBlock).Block @@ -676,7 +675,7 @@ func waitForAndValidateBlock( }, mempl.TxInfo{}) require.NoError(t, err) } - }, css) + }) } func waitForAndValidateBlockWithTx( @@ -687,7 +686,7 @@ func waitForAndValidateBlockWithTx( css []*State, txs ...[]byte, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { ntxs := 0 BLOCK_TX_LOOP: for { @@ -710,7 +709,7 @@ func waitForAndValidateBlockWithTx( break BLOCK_TX_LOOP } } - }, css) + }) } func waitForBlockWithUpdatedValsAndValidateIt( @@ -720,7 +719,7 @@ func waitForBlockWithUpdatedValsAndValidateIt( blocksSubs []types.Subscription, css []*State, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { var newBlock *types.Block LOOP: for { @@ -730,17 +729,16 @@ func waitForBlockWithUpdatedValsAndValidateIt( if newBlock.LastCommit.Size() == len(updatedVals) { css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) break LOOP - } else { - css[j].Logger.Debug( - "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", - "height", newBlock.Height, "last_commit", newBlock.LastCommit.Size(), "updated_vals", len(updatedVals), - ) } + css[j].Logger.Debug( + "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", + "height", newBlock.Height, "last_commit", newBlock.LastCommit.Size(), "updated_vals", len(updatedVals), + ) } err := validateBlock(newBlock, updatedVals) assert.Nil(t, err) - }, css) + }) } // expects high synchrony! @@ -760,7 +758,7 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { return nil } -func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { +func timeoutWaitGroup(n int, f func(int)) { wg := new(sync.WaitGroup) wg.Add(n) for i := 0; i < n; i++ { @@ -1099,3 +1097,32 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { }) } } + +func TestMarshalJSONPeerState(t *testing.T) { + ps := NewPeerState(nil) + data, err := json.Marshal(ps) + require.NoError(t, err) + require.JSONEq(t, `{ + "round_state":{ + "height": "0", + "round": -1, + "step": 0, + "start_time": "0001-01-01T00:00:00Z", + "proposal": false, + "proposal_block_part_set_header": + {"total":0, "hash":""}, + "proposal_block_parts": null, + "proposal_pol_round": -1, + "proposal_pol": null, + "prevotes": null, + "precommits": null, + "last_commit_round": -1, + "last_commit": null, + "catchup_commit_round": -1, + "catchup_commit": null + }, + "stats":{ + "votes":"0", + "block_parts":"0"} + }`, string(data)) +} diff --git a/consensus/replay.go b/consensus/replay.go index 6496693950c..b8e457fa518 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -240,9 +240,14 @@ func (h *Handshaker) NBlocks() int { // TODO: retry the handshake/replay if it fails ? func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { + return h.HandshakeWithContext(context.TODO(), proxyApp) +} + +// HandshakeWithContext is cancellable version of Handshake +func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().Info(context.TODO(), proxy.RequestInfo) + res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo) if err != nil { return fmt.Errorf("error calling Info: %v", err) } @@ -266,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - appHash, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } @@ -287,6 +292,17 @@ func (h *Handshaker) ReplayBlocks( appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns, +) ([]byte, error) { + return h.ReplayBlocksWithContext(context.TODO(), state, appHash, appBlockHeight, proxyApp) +} + +// ReplayBlocksWithContext is cancellable version of ReplayBlocks. +func (h *Handshaker) ReplayBlocksWithContext( + ctx context.Context, + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -391,7 +407,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -406,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -443,6 +459,7 @@ func (h *Handshaker) ReplayBlocks( } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, proxyApp proxy.AppConns, appBlockHeight, @@ -469,6 +486,12 @@ func (h *Handshaker) replayBlocks( firstBlock = state.InitialHeight } for i := firstBlock; i <= finalBlock; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go index f6d6a8eb5b8..0c55552f036 100644 --- a/consensus/replay_stubs.go +++ b/consensus/replay_stubs.go @@ -20,22 +20,22 @@ func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } func (emptyMempool) SizeBytes() int64 { return 0 } -func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.ResponseCheckTx), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(types.Tx, func(*abci.ResponseCheckTx), mempl.TxInfo) error { return nil } -func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error { +func (txmp emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } -func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } func (emptyMempool) Update( - _ int64, - _ types.Txs, - _ []*abci.ExecTxResult, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + int64, + types.Txs, + []*abci.ExecTxResult, + mempl.PreCheckFunc, + mempl.PostCheckFunc, ) error { return nil } @@ -74,6 +74,6 @@ type mockProxyApp struct { finalizeBlockResponse *abci.ResponseFinalizeBlock } -func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (mock *mockProxyApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { return mock.finalizeBlockResponse, nil } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 0d0c9f0dcc1..bcf74387f36 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -23,7 +23,6 @@ import ( abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/abci/types/mocks" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/crypto" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" @@ -67,8 +66,11 @@ func TestMain(m *testing.M) { // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store, +func startNewStateAndWaitForBlock( + t *testing.T, + consensusReplayConfig *cfg.Config, + blockDB dbm.DB, + stateStore sm.Store, ) { logger := log.TestingLogger() state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) @@ -215,7 +217,7 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(t, consensusReplayConfig, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -648,8 +650,6 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin walFile := tempWALWithData(walBody) testConfig.Consensus.SetWalFile(walFile) - privVal := privval.LoadFilePV(testConfig.PrivValidatorKeyFile(), testConfig.PrivValidatorStateFile()) - wal, err := NewWAL(walFile) require.NoError(t, err) wal.SetLogger(log.TestingLogger()) @@ -662,9 +662,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin }) chain, extCommits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(t, testConfig, pubKey, kvstore.AppVersion) + stateDB, genesisState, store = stateAndStore(t, testConfig, kvstore.AppVersion) } stateStore := sm.NewStore(stateDB, sm.StoreOptions{ @@ -925,9 +923,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, state, store := stateAndStore(t, config, pubKey, appVersion) + stateDB, state, store := stateAndStore(t, config, appVersion) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -996,7 +992,7 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (app *badApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { @@ -1145,7 +1141,6 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { func stateAndStore( t *testing.T, config *cfg.Config, - pubKey crypto.PubKey, appVersion uint64, ) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() @@ -1189,10 +1184,10 @@ func (bs *mockBlockStore) Base() int64 { return bs.base func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (bs *mockBlockStore) LoadBlockByHash([]byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] } -func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } +func (bs *mockBlockStore) LoadBlockMetaByHash([]byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] bps, err := block.MakePartSet(types.BlockPartSizeBytes) @@ -1202,10 +1197,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { Header: block.Header, } } -func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (bs *mockBlockStore) LoadBlockPart(int64, int) *types.Part { return nil } +func (bs *mockBlockStore) SaveBlockWithExtendedCommit(*types.Block, *types.PartSet, *types.ExtendedCommit) { } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + +func (bs *mockBlockStore) SaveBlock(*types.Block, *types.PartSet, *types.Commit) { } func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { @@ -1215,11 +1211,12 @@ func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.extCommits[height-1].ToCommit() } + func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { return bs.extCommits[height-1] } -func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) { +func (bs *mockBlockStore) PruneBlocks(height int64, _ sm.State) (uint64, int64, error) { evidencePoint := height pruned := uint64(0) for i := int64(0); i < height-1; i++ { @@ -1251,10 +1248,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { config := ResetConfig("handshake_test_") defer os.RemoveAll(config.RootDir) - privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, state, store := stateAndStore(t, config, pubKey, 0x0) + stateDB, state, store := stateAndStore(t, config, 0x0) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -1276,6 +1270,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { if err := handshaker.Handshake(proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } + var err error // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) diff --git a/consensus/state.go b/consensus/state.go index 47a4c733fcb..d47b43f2413 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -37,6 +37,7 @@ var ( ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") ErrAddingVote = errors.New("error adding vote") ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") + ErrProposalTooManyParts = errors.New("proposal block has too many parts") errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors") ) @@ -141,6 +142,9 @@ type State struct { // for reporting metrics metrics *Metrics + + // offline state sync height indicating to which height the node synced offline + offlineStateSyncHeight int64 } // StateOption sets an optional parameter on the State. @@ -172,7 +176,9 @@ func NewState( evsw: cmtevents.NewEventSwitch(), metrics: NopMetrics(), } - + for _, option := range options { + option(cs) + } // set function defaults (may be overwritten before calling Start) cs.decideProposal = cs.defaultDecideProposal cs.doPrevote = cs.defaultDoPrevote @@ -180,7 +186,16 @@ func NewState( // We have no votes, so reconstruct LastCommit from SeenCommit. if state.LastBlockHeight > 0 { - cs.reconstructLastCommit(state) + // In case of out of band performed statesync, the state store + // will have a state but no extended commit (as no block has been downloaded). + // If the height at which the vote extensions are enabled is lower + // than the height at which we statesync, consensus will panic because + // it will try to reconstruct the extended commit here. + if cs.offlineStateSyncHeight != 0 { + cs.reconstructSeenCommit(state) + } else { + cs.reconstructLastCommit(state) + } } cs.updateToState(state) @@ -188,9 +203,6 @@ func NewState( // NOTE: we do not call scheduleRound0 yet, we do that upon Start() cs.BaseService = *service.NewBaseService(nil, "State", cs) - for _, option := range options { - option(cs) - } return cs } @@ -212,6 +224,12 @@ func StateMetrics(metrics *Metrics) StateOption { return func(cs *State) { cs.metrics = metrics } } +// OfflineStateSyncHeight indicates the height at which the node +// statesync offline - before booting sets the metrics. +func OfflineStateSyncHeight(height int64) StateOption { + return func(cs *State) { cs.offlineStateSyncHeight = height } +} + // String returns a string. func (cs *State) String() string { // better not to access shared variables @@ -469,7 +487,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // SetProposal inputs a proposal. func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} } else { @@ -482,7 +499,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { // AddProposalBlockPart inputs a part of the proposal block. func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} } else { @@ -496,11 +512,11 @@ func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Par // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( proposal *types.Proposal, - block *types.Block, + block *types.Block, //nolint:revive parts *types.PartSet, peerID p2p.ID, ) error { - + // TODO: Since the block parameter is not used, we should instead expose just a SetProposal method. if err := cs.SetProposal(proposal, peerID); err != nil { return err } @@ -562,6 +578,18 @@ func (cs *State) sendInternalMessage(mi msgInfo) { } } +// ReconstructSeenCommit reconstructs the seen commit +// This function is meant to be called after statesync +// that was performed offline as to avoid interfering with vote +// extensions. +func (cs *State) reconstructSeenCommit(state sm.State) { + votes, err := cs.votesFromSeenCommit(state) + if err != nil { + panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) + } + cs.LastCommit = votes +} + // Reconstruct the LastCommit from either SeenCommit or the ExtendedCommit. SeenCommit // and ExtendedCommit are saved along with the block. If VoteExtensions are required // the method will panic on an absent ExtendedCommit or an ExtendedCommit without @@ -569,14 +597,9 @@ func (cs *State) sendInternalMessage(mi msgInfo) { func (cs *State) reconstructLastCommit(state sm.State) { extensionsEnabled := state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) if !extensionsEnabled { - votes, err := cs.votesFromSeenCommit(state) - if err != nil { - panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) - } - cs.LastCommit = votes + cs.reconstructSeenCommit(state) return } - votes, err := cs.votesFromExtendedCommit(state) if err != nil { panic(fmt.Sprintf("failed to reconstruct last extended commit; %s", err)) @@ -987,7 +1010,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { default: panic(fmt.Sprintf("invalid timeout step: %v", ti.Step)) } - } func (cs *State) handleTxsAvailable() { @@ -1042,7 +1064,7 @@ func (cs *State) enterNewRound(height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + prevHeight, prevRound, prevStep := cs.Height, cs.Round, cs.Step // increment validators if necessary validators := cs.Validators @@ -1056,17 +1078,21 @@ func (cs *State) enterNewRound(height int64, round int32) { // but we fire an event, so update the round step first cs.updateRoundStep(round, cstypes.RoundStepNewRound) cs.Validators = validators - if round == 0 { - // We've already reset these upon new height, - // and meanwhile we might have received a proposal - // for round 0. - } else { - logger.Debug("resetting proposal info") + // If round == 0, we've already reset these upon new height, and meanwhile + // we might have received a proposal for round 0. + propAddress := validators.GetProposer().PubKey.Address() + if round != 0 { + logger.Info("resetting proposal info", "proposer", propAddress) cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil } + logger.Debug("entering new round", + "previous", log.NewLazySprintf("%v/%v/%v", prevHeight, prevRound, prevStep), + "proposer", propAddress, + ) + cs.Votes.SetRound(cmtmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false @@ -1240,7 +1266,6 @@ func (cs *State) isProposalComplete() bool { } // if this is false the proposer is lying or we haven't received the POL yet return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() - } // Create the next block to propose and return it. Returns nil block upon error. @@ -1322,14 +1347,14 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { // If a block is locked, prevote that. if cs.LockedBlock != nil { logger.Debug("prevote step; already locked on a block; prevoting locked block") - cs.signAddVote(cmtproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + cs.signAddVote(cmtproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header(), nil) return } // If ProposalBlock is nil, prevote nil. if cs.ProposalBlock == nil { logger.Debug("prevote step: ProposalBlock is nil") - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1339,7 +1364,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { // ProposalBlock is invalid, prevote nil. logger.Error("prevote step: consensus deems this block invalid; prevoting nil", "err", err) - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1365,7 +1390,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { if !isAppValid { logger.Error("prevote step: state machine rejected a proposed block; this should not happen:"+ "the proposer may be misbehaving; prevoting nil", "err", err) - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1373,7 +1398,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { // NOTE: the proposal signature is validated when it is received, // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) logger.Debug("prevote step: ProposalBlock is valid") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) } // Enter: any +2/3 prevotes at next round. @@ -1443,7 +1468,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") } - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) return } @@ -1473,7 +1498,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { } } - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) return } @@ -1488,7 +1513,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Error("failed publishing event relock", "err", err) } - cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.LockedBlock) return } @@ -1509,7 +1534,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Error("failed publishing event lock", "err", err) } - cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.ProposalBlock) return } @@ -1531,7 +1556,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Error("failed publishing event unlock", "err", err) } - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) } // Enter: any +2/3 precommits for next round. @@ -1742,8 +1767,9 @@ func (cs *State) finalizeCommit(height int64) { stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. - // NOTE The block.AppHash wont reflect these txs until the next block. - stateCopy, err := cs.blockExec.ApplyBlock( + // We use apply verified block here because we have verified the block in this function already. + // NOTE The block.AppHash won't reflect these txs until the next block. + stateCopy, err := cs.blockExec.ApplyVerifiedBlock( stateCopy, types.BlockID{ Hash: block.Hash(), @@ -1865,6 +1891,7 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.ChainSizeBytes.Add(float64(block.Size())) cs.metrics.CommittedHeight.Set(float64(block.Height)) } @@ -1890,12 +1917,22 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { p := proposal.ToProto() // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifySignature( + pubKey := cs.Validators.GetProposer().PubKey + if !pubKey.VerifySignature( types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, ) { return ErrInvalidProposalSignature } + // Validate the proposed block size, derived from its PartSetHeader + maxBytes := cs.state.ConsensusParams.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(types.MaxBlockSizeBytes) + } + if int64(proposal.BlockID.PartSetHeader.Total) > (maxBytes-1)/int64(types.BlockPartSizeBytes)+1 { + return ErrProposalTooManyParts + } + proposal.Signature = p.Signature cs.Proposal = proposal // We don't update cs.ProposalBlockParts if it is already set. @@ -1905,7 +1942,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.Logger.Info("received proposal", "proposal", proposal, "proposer", pubKey.Address()) return nil } @@ -1946,10 +1983,19 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add } cs.metrics.BlockGossipPartsReceived.With("matches_current", "true").Add(1) + if !added { + // NOTE: we are disregarding possible duplicates above where heights dont match or we're not expecting block parts yet + // but between the matches_current = true and false, we have all the info. + cs.metrics.DuplicateBlockPart.Add(1) + } - if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes { + maxBytes := cs.state.ConsensusParams.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(types.MaxBlockSizeBytes) + } + if cs.ProposalBlockParts.ByteSize() > maxBytes { return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)", - cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes, + cs.ProposalBlockParts.ByteSize(), maxBytes, ) } if added && cs.ProposalBlockParts.IsComplete() { @@ -1958,7 +2004,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add return added, err } - var pbb = new(cmtproto.Block) + pbb := new(cmtproto.Block) err = proto.Unmarshal(bz, pbb) if err != nil { return added, err @@ -2019,7 +2065,7 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { // Attempt to add the vote. if its a duplicate signature, dupeout the validator func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { added, err := cs.addVote(vote, peerID) - + // NOTE: some of these errors are swallowed here if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. @@ -2089,12 +2135,16 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error if cs.Step != cstypes.RoundStepNewHeight { // Late precommit at prior height is ignored cs.Logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) - return + return added, err } added, err = cs.LastCommit.AddVote(vote) if !added { - return + // If the vote wasnt added but there's no error, its a duplicate vote + if err == nil { + cs.metrics.DuplicateVote.Add(1) + } + return added, err } cs.Logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) @@ -2111,14 +2161,14 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error cs.enterNewRound(cs.Height, 0) } - return + return added, err } // Height mismatch is ignored. // Not necessarily a bad peer, but not favorable behavior. if vote.Height != cs.Height { cs.Logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) - return + return added, err } // Check to see if the chain is configured to extend votes. @@ -2168,7 +2218,12 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error added, err = cs.Votes.AddVote(vote, peerID, extEnabled) if !added { // Either duplicate, or error upon cs.Votes.AddByIndex() - return + + // If the vote wasnt added but there's no error, its a duplicate vote + if err == nil { + cs.metrics.DuplicateVote.Add(1) + } + return added, err } if vote.Round == cs.Round { vals := cs.state.Validators @@ -2301,6 +2356,7 @@ func (cs *State) signVote( msgType cmtproto.SignedMsgType, hash []byte, header types.PartSetHeader, + block *types.Block, ) (*types.Vote, error) { // Flush the WAL. Otherwise, we may not recompute the same vote to sign, // and the privValidator will refuse to sign anything. @@ -2330,7 +2386,7 @@ func (cs *State) signVote( // if the signedMessage type is for a non-nil precommit, add // VoteExtension if extEnabled { - ext, err := cs.blockExec.ExtendVote(context.TODO(), vote) + ext, err := cs.blockExec.ExtendVote(context.TODO(), vote, block, cs.state) if err != nil { return nil, err } @@ -2340,7 +2396,7 @@ func (cs *State) signVote( recoverable, err := types.SignAndCheckVote(vote, cs.privValidator, cs.state.ChainID, extEnabled && (msgType == cmtproto.PrecommitType)) if err != nil && !recoverable { - panic(fmt.Sprintf("non-recoverable error when signing vote (%d/%d)", vote.Height, vote.Round)) + panic(fmt.Sprintf("non-recoverable error when signing vote %v: %v", vote, err)) } return vote, err @@ -2352,10 +2408,10 @@ func (cs *State) voteTime() time.Time { // Minimum time increment between blocks const timeIota = time.Millisecond // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, - // even if cs.LockedBlock != nil. See https://github.com/cometbft/cometbft/tree/main/spec/. + // even if cs.LockedBlock != nil. See https://github.com/cometbft/cometbft/tree/v0.38.x/spec/. if cs.LockedBlock != nil { // See the BFT time spec - // https://github.com/cometbft/cometbft/blob/main/spec/consensus/bft-time.md + // https://github.com/cometbft/cometbft/blob/v0.38.x/spec/consensus/bft-time.md minVoteTime = cs.LockedBlock.Time.Add(timeIota) } else if cs.ProposalBlock != nil { minVoteTime = cs.ProposalBlock.Time.Add(timeIota) @@ -2368,10 +2424,12 @@ func (cs *State) voteTime() time.Time { } // sign the vote and publish on internalMsgQueue +// block information is only used to extend votes (precommit only); should be nil in all other cases func (cs *State) signAddVote( msgType cmtproto.SignedMsgType, hash []byte, header types.PartSetHeader, + block *types.Block, ) { if cs.privValidator == nil { // the node does not have a key return @@ -2389,7 +2447,7 @@ func (cs *State) signAddVote( } // TODO: pass pubKey to signVote - vote, err := cs.signVote(msgType, hash, header) + vote, err := cs.signVote(msgType, hash, header, block) if err != nil { cs.Logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) return diff --git a/consensus/state_test.go b/consensus/state_test.go index a2e64972fff..66169bb3b3d 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strings" "testing" "time" @@ -134,7 +135,6 @@ func TestStateProposerSelection2(t *testing.T) { ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } - } // a non-validator should timeout into the prevote round @@ -257,73 +257,100 @@ func TestStateBadProposal(t *testing.T) { } func TestStateOversizedBlock(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - cs1, vss := randState(2) - cs1.state.ConsensusParams.Block.MaxBytes = 2000 - height, round := cs1.Height, cs1.Round - vs2 := vss[1] + const maxBytes = int64(types.BlockPartSizeBytes) - partSize := types.BlockPartSizeBytes + for _, testCase := range []struct { + name string + oversized bool + }{ + { + name: "max size, correct block", + oversized: false, + }, + { + name: "off-by-1 max size, incorrect block", + oversized: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + cs1, vss := randState(2) + cs1.state.ConsensusParams.Block.MaxBytes = maxBytes + height, round := cs1.Height, cs1.Round + vs2 := vss[1] - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + partSize := types.BlockPartSizeBytes - propBlock, err := cs1.createProposalBlock(ctx) - require.NoError(t, err) - propBlock.Data.Txs = []types.Tx{cmtrand.Bytes(2001)} - propBlock.Header.DataHash = propBlock.Data.Hash() + propBlock, propBlockParts := findBlockSizeLimit(t, height, maxBytes, cs1, partSize, testCase.oversized) - // make the second validator the proposer by incrementing round - round++ - incrementRound(vss[1:]...) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - propBlockParts, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + // make the second validator the proposer by incrementing round + round++ + incrementRound(vss[1:]...) - totalBytes := 0 - for i := 0; i < int(propBlockParts.Total()); i++ { - part := propBlockParts.GetPart(i) - totalBytes += len(part.Bytes) - } + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(height, round, -1, blockID) + p := proposal.ToProto() + if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + proposal.Signature = p.Signature - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + totalBytes := 0 + for i := 0; i < int(propBlockParts.Total()); i++ { + part := propBlockParts.GetPart(i) + totalBytes += len(part.Bytes) + } - // start the machine - startTestRound(cs1, height, round) + maxBlockParts := maxBytes / int64(types.BlockPartSizeBytes) + if maxBytes > maxBlockParts*int64(types.BlockPartSizeBytes) { + maxBlockParts++ + } + numBlockParts := int64(propBlockParts.Total()) - t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } - // c1 should log an error with the block part message as it exceeds the consensus params. The - // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + // start the machine + startTestRound(cs1, height, round) + + t.Log("Block Sizes;", "Limit", maxBytes, "Current", totalBytes) + t.Log("Proposal Parts;", "Maximum", maxBlockParts, "Current", numBlockParts) + + validateHash := propBlock.Hash() + lockedRound := int32(1) + if testCase.oversized { + validateHash = nil + lockedRound = -1 + // if the block is oversized cs1 should log an error with the block part message as it exceeds + // the consensus params. The block is not added to cs.ProposalBlock so the node timeouts. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + } + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], validateHash) - // and then should send nil prevote and precommit regardless of whether other validators prevote and - // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + // Should not accept a Proposal with too many block parts + if numBlockParts > maxBlockParts { + require.Nil(t, cs1.Proposal) + } - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) + bps, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), false, vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), false, vs2) + ensurePrevote(voteCh, height, round) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, lockedRound, vss[0], validateHash, validateHash) - bps2, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), true, vs2) + bps2, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), true, vs2) + }) + } } //---------------------------------------------------------------------------------------------------- @@ -695,7 +722,7 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + // XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1017,7 +1044,7 @@ func TestStateLockPOLSafety1(t *testing.T) { round++ // moving to the next round ensureNewRound(newRoundCh, height, round) - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + // XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1180,7 +1207,6 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNoNewUnlock(unlockCh) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash1) - } // 4 vals. @@ -1498,7 +1524,7 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) - m.AssertNotCalled(t, "ExtendVote", mock.Anything) + m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) rs := cs1.GetRoundState() @@ -1513,8 +1539,14 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { if testCase.enabled { m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.RequestExtendVote{ - Height: height, - Hash: blockID.Hash, + Height: height, + Hash: blockID.Hash, + Time: rs.ProposalBlock.Time, + Txs: rs.ProposalBlock.Txs.ToSliceOfBytes(), + ProposedLastCommit: abci.CommitInfo{}, + Misbehavior: rs.ProposalBlock.Evidence.Evidence.ToABCI(), + NextValidatorsHash: rs.ProposalBlock.NextValidatorsHash, + ProposerAddress: rs.ProposalBlock.ProposerAddress, }) } else { m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) @@ -1543,7 +1575,6 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { } }) } - } // TestVerifyVoteExtensionNotCalledOnAbsentPrecommit tests that the VerifyVoteExtension @@ -1586,8 +1617,14 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { ensurePrecommit(voteCh, height, round) m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.RequestExtendVote{ - Height: height, - Hash: blockID.Hash, + Height: height, + Hash: blockID.Hash, + Time: rs.ProposalBlock.Time, + Txs: rs.ProposalBlock.Txs.ToSliceOfBytes(), + ProposedLastCommit: abci.CommitInfo{}, + Misbehavior: rs.ProposalBlock.Evidence.Evidence.ToABCI(), + NextValidatorsHash: rs.ProposalBlock.NextValidatorsHash, + ProposerAddress: rs.ProposalBlock.ProposerAddress, }) signAddVotes(cs1, cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, true, vss[2:]...) @@ -1606,7 +1643,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Height: height, VoteExtension: []byte("extension"), }) - } // TestPrepareProposalReceivesVoteExtensions tests that the PrepareProposal method @@ -1696,7 +1732,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { require.NotZero(t, len(vote.ExtensionSignature)) cve := cmtproto.CanonicalVoteExtension{ Extension: vote.VoteExtension, - Height: height - 1, //the vote extension was signed in the previous height + Height: height - 1, // the vote extension was signed in the previous height Round: int64(rpp.LocalLastCommit.Round), ChainId: test.DefaultTestChainID, } @@ -1905,7 +1941,7 @@ func TestVoteExtensionEnableHeight(t *testing.T) { // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round -func TestWaitingTimeoutOnNilPolka(t *testing.T) { +func TestWaitingTimeoutOnNilPolka(*testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -2057,7 +2093,6 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { assert.True(t, rs.Step == cstypes.RoundStepCommit) assert.True(t, rs.ProposalBlock == nil) assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - } // What we want: @@ -2435,7 +2470,6 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { t.Errorf("should not output stats message after receiving the known block part!") case <-time.After(50 * time.Millisecond): } - } func TestStateOutputVoteStats(t *testing.T) { @@ -2468,7 +2502,6 @@ func TestStateOutputVoteStats(t *testing.T) { t.Errorf("should not output stats message after receiving the known vote or vote from bigger height") case <-time.After(50 * time.Millisecond): } - } func TestSignSameVoteTwice(t *testing.T) { @@ -2517,8 +2550,39 @@ func signAddPrecommitWithExtension( hash []byte, header types.PartSetHeader, extension []byte, - stub *validatorStub) { + stub *validatorStub, +) { v, err := stub.signVote(cmtproto.PrecommitType, hash, header, extension, true) require.NoError(t, err, "failed to sign vote") addVotes(cs, v) } + +func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSize uint32, oversized bool) (*types.Block, *types.PartSet) { + var offset int64 + if !oversized { + offset = -2 + } + softMaxDataBytes := int(types.MaxDataBytes(maxBytes, 0, 0)) + for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { + propBlock := cs.state.MakeBlock( + height, + []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}, + &types.Commit{}, + nil, + cs.privValidatorPubKey.Address(), + ) + + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + if propBlockParts.ByteSize() > maxBytes+offset { + s := "real max" + if oversized { + s = "off-by-1" + } + t.Log("Detected "+s+" data size for block;", "size", i, "softMaxDataBytes", softMaxDataBytes) + return propBlock, propBlockParts + } + } + require.Fail(t, "We shouldn't hit the end of the loop") + return nil, nil +} diff --git a/consensus/ticker.go b/consensus/ticker.go index ae5fab794ab..e6fa61d6b7d 100644 --- a/consensus/ticker.go +++ b/consensus/ticker.go @@ -31,17 +31,21 @@ type TimeoutTicker interface { type timeoutTicker struct { service.BaseService - timer *time.Timer - tickChan chan timeoutInfo // for scheduling timeouts - tockChan chan timeoutInfo // for notifying about them + timerActive bool + timer *time.Timer + tickChan chan timeoutInfo // for scheduling timeouts + tockChan chan timeoutInfo // for notifying about them } // NewTimeoutTicker returns a new TimeoutTicker. func NewTimeoutTicker() TimeoutTicker { tt := &timeoutTicker{ - timer: time.NewTimer(0), - tickChan: make(chan timeoutInfo, tickTockBufferSize), - tockChan: make(chan timeoutInfo, tickTockBufferSize), + timer: time.NewTimer(0), + // An indicator variable to check if the timer is active or not. + // Concurrency safe because the timer is only accessed by a single goroutine. + timerActive: true, + tickChan: make(chan timeoutInfo, tickTockBufferSize), + tockChan: make(chan timeoutInfo, tickTockBufferSize), } tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout @@ -59,7 +63,6 @@ func (t *timeoutTicker) OnStart() error { // OnStop implements service.Service. It stops the timeout routine. func (t *timeoutTicker) OnStop() { t.BaseService.OnStop() - t.stopTimer() } // Chan returns a channel on which timeouts are sent. @@ -76,21 +79,23 @@ func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { //------------------------------------------------------------- -// stop the timer and drain if necessary +// if the timer is active, stop it and drain the channel. func (t *timeoutTicker) stopTimer() { + if !t.timerActive { + return + } // Stop() returns false if it was already fired or was stopped if !t.timer.Stop() { - select { - case <-t.timer.C: - default: - t.Logger.Debug("Timer already stopped") - } + <-t.timer.C } + t.timerActive = false } // send on tickChan to start a new timer. -// timers are interupted and replaced by new ticks from later steps -// timeouts of 0 on the tickChan will be immediately relayed to the tockChan +// timers are interrupted and replaced by new ticks from later steps +// timeouts of 0 on the tickChan will be immediately relayed to the tockChan. +// NOTE: timerActive is not concurrency safe, but it's only accessed in NewTimer and timeoutRoutine, +// making it single-threaded access. func (t *timeoutTicker) timeoutRoutine() { t.Logger.Debug("Starting timeout routine") var ti timeoutInfo @@ -112,15 +117,18 @@ func (t *timeoutTicker) timeoutRoutine() { } } - // stop the last timer + // stop the last timer if it exists t.stopTimer() - // update timeoutInfo and reset timer + // update timeoutInfo, reset timer, and mark timer as active // NOTE time.Timer allows duration to be non-positive ti = newti t.timer.Reset(ti.Duration) + t.timerActive = true + t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) case <-t.timer.C: + t.timerActive = false t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. @@ -128,6 +136,7 @@ func (t *timeoutTicker) timeoutRoutine() { // and managing the timeouts ourselves with a millisecond ticker go func(toi timeoutInfo) { t.tockChan <- toi }(ti) case <-t.Quit(): + t.stopTimer() return } } diff --git a/consensus/ticker_test.go b/consensus/ticker_test.go new file mode 100644 index 00000000000..121e390185b --- /dev/null +++ b/consensus/ticker_test.go @@ -0,0 +1,40 @@ +package consensus + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/consensus/types" +) + +func TestTimeoutTicker(t *testing.T) { + ticker := NewTimeoutTicker() + err := ticker.Start() + require.NoError(t, err) + defer func() { + err := ticker.Stop() + require.NoError(t, err) + }() + + c := ticker.Chan() + for i := 1; i <= 10; i++ { + height := int64(i) + + startTime := time.Now() + // Schedule a timeout for 5ms from now + negTimeout := timeoutInfo{Duration: -1 * time.Millisecond, Height: height, Round: 0, Step: types.RoundStepNewHeight} + timeout := timeoutInfo{Duration: 5 * time.Millisecond, Height: height, Round: 0, Step: types.RoundStepNewRound} + ticker.ScheduleTimeout(negTimeout) + ticker.ScheduleTimeout(timeout) + + // Wait for the timeout to be received + to := <-c + endTime := time.Now() + elapsedTime := endTime.Sub(startTime) + if timeout == to { + require.True(t, elapsedTime >= timeout.Duration, "We got the 5ms timeout. However the timeout happened too quickly. Should be >= 5ms. Got %dms (start time %d end time %d)", elapsedTime.Milliseconds(), startTime.UnixMilli(), endTime.UnixMilli()) + } + } +} diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 5cac9e5d827..9827a6a48c1 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -28,19 +28,19 @@ func TestPeerCatchupRounds(t *testing.T) { hvs := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1", true) if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -54,26 +54,25 @@ func TestPeerCatchupRounds(t *testing.T) { t.Error("Expected to successfully add vote from another peer") } } + func TestInconsistentExtensionData(t *testing.T) { valSet, privVals := types.RandValidatorSet(10, 1) hvsE := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteNoExt := makeVoteHR(t, 1, 0, 20, privVals) + voteNoExt := makeVoteHR(1, 0, 20, privVals) voteNoExt.Extension, voteNoExt.ExtensionSignature = nil, nil require.Panics(t, func() { _, _ = hvsE.AddVote(voteNoExt, "peer1", false) }) hvsNoE := NewHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteExt := makeVoteHR(t, 1, 0, 20, privVals) + voteExt := makeVoteHR(1, 0, 20, privVals) require.Panics(t, func() { _, _ = hvsNoE.AddVote(voteExt, "peer1", true) }) - } func makeVoteHR( - t *testing.T, height int64, valIndex, round int32, diff --git a/consensus/wal.go b/consensus/wal.go index 82ab330d8b1..2a17da1d441 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -89,7 +89,7 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { - err := cmtos.EnsureDir(filepath.Dir(walFile), 0700) + err := cmtos.EnsureDir(filepath.Dir(walFile), 0o700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) } @@ -230,7 +230,8 @@ type WALSearchOptions struct { // CONTRACT: caller must close group reader. func (wal *BaseWAL) SearchForEndHeight( height int64, - options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + options *WALSearchOptions, +) (rd io.ReadCloser, found bool, err error) { var ( msg *TimedWALMessage gr *auto.GroupReader @@ -400,7 +401,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)} } - var res = new(cmtcons.TimedWALMessage) + res := new(cmtcons.TimedWALMessage) err = proto.Unmarshal(data, res) if err != nil { return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} @@ -422,10 +423,10 @@ type nilWAL struct{} var _ WAL = nilWAL{} -func (nilWAL) Write(m WALMessage) error { return nil } -func (nilWAL) WriteSync(m WALMessage) error { return nil } -func (nilWAL) FlushAndSync() error { return nil } -func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { +func (nilWAL) Write(WALMessage) error { return nil } +func (nilWAL) WriteSync(WALMessage) error { return nil } +func (nilWAL) FlushAndSync() error { return nil } +func (nilWAL) SearchForEndHeight(int64, *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } func (nilWAL) Start() error { return nil } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 56abdfb359b..c44e9f68ad0 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -217,8 +217,8 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) error { func (w *byteBufferWAL) FlushAndSync() error { return nil } func (w *byteBufferWAL) SearchForEndHeight( - height int64, - options *WALSearchOptions, + int64, + *WALSearchOptions, ) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } diff --git a/crypto/README.md b/crypto/README.md index 507239a4d8c..d63513bbf46 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -12,11 +12,11 @@ For any specific algorithm, use its specific module e.g. ## Binary encoding -For Binary encoding, please refer to the [CometBFT encoding specification](https://github.com/cometbft/cometbft/blob/main/spec/core/encoding.md). +For Binary encoding, please refer to the [CometBFT encoding specification](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/encoding.md). ## JSON Encoding -JSON encoding is done using CometBFT's internal json encoder. For more information on JSON encoding, please refer to [CometBFT JSON encoding](https://github.com/cometbft/cometbft/blob/main/libs/json/doc.go) +JSON encoding is done using CometBFT's internal json encoder. For more information on JSON encoding, please refer to [CometBFT JSON encoding](https://github.com/cometbft/cometbft/blob/v0.38.x/libs/json/doc.go) ```go Example JSON encodings: diff --git a/crypto/batch/batch.go b/crypto/batch/batch.go index 7587bc711ab..530caafdab7 100644 --- a/crypto/batch/batch.go +++ b/crypto/batch/batch.go @@ -23,6 +23,9 @@ func CreateBatchVerifier(pk crypto.PubKey) (crypto.BatchVerifier, bool) { // SupportsBatchVerifier checks if a key type implements the batch verifier // interface. func SupportsBatchVerifier(pk crypto.PubKey) bool { + if pk == nil { + return false + } switch pk.Type() { case ed25519.KeyType, sr25519.KeyType: return true diff --git a/crypto/merkle/bench_test.go b/crypto/merkle/bench_test.go new file mode 100644 index 00000000000..0520bd23893 --- /dev/null +++ b/crypto/merkle/bench_test.go @@ -0,0 +1,42 @@ +package merkle + +import ( + "crypto/sha256" + "strings" + "testing" +) + +var sink any + +type innerHashTest struct { + left, right string +} + +var innerHashTests = []*innerHashTest{ + {"aaaaaaaaaaaaaaa", " "}, + {"", ""}, + {" ", "a ff b f1 a"}, + {"ffff122fff", "ffff122fff"}, + {"😎💡✅alalalalalalalalalallalallaallalaallalalalalalalalaallalalalalalala", "😎💡✅alalalalalalalalalallalallaallalaallalalalalalalalaallalalalalalalaffff122fff"}, + {strings.Repeat("ff", 1<<10), strings.Repeat("00af", 4<<10)}, + {strings.Repeat("f", sha256.Size), strings.Repeat("00af", 10<<10)}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaffff122fffaaaaaaaaa", "aaaaaaaaaffff1aaaaaaaaaaaaaaaaaa22fffaaaaaaaaa"}, +} + +func BenchmarkInnerHash(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tt := range innerHashTests { + got := innerHash([]byte(tt.left), []byte(tt.right)) + if g, w := len(got), sha256.Size; g != w { + b.Fatalf("size discrepancy: got %d, want %d", g, w) + } + sink = got + } + } + + if sink == nil { + b.Fatal("Benchmark did not run!") + } +} diff --git a/crypto/merkle/hash.go b/crypto/merkle/hash.go index be2010aefcc..9e149410390 100644 --- a/crypto/merkle/hash.go +++ b/crypto/merkle/hash.go @@ -32,11 +32,7 @@ func leafHashOpt(s hash.Hash, leaf []byte) []byte { // returns tmhash(0x01 || left || right) func innerHash(left []byte, right []byte) []byte { - data := make([]byte, len(innerPrefix)+len(left)+len(right)) - n := copy(data, innerPrefix) - n += copy(data[n:], left) - copy(data[n:], right) - return tmhash.Sum(data) + return tmhash.SumMany(innerPrefix, left, right) } func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte { diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 25defd5bb55..2c53abf3d40 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -24,10 +24,10 @@ const ( // everything. This also affects the generalized proof system as // well. type Proof struct { - Total int64 `json:"total"` // Total number of items. - Index int64 `json:"index"` // Index of item to prove. - LeafHash []byte `json:"leaf_hash"` // Hash of item value. - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int64 `json:"total"` // Total number of items. + Index int64 `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts,omitempty"` // Hashes from leaf's sibling to a root's child. } // ProofsFromByteSlices computes inclusion proof for given items. @@ -50,6 +50,9 @@ func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) { // Verify that the Proof proves the root hash. // Check sp.Index/sp.Total manually if needed func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { + if rootHash == nil { + return fmt.Errorf("invalid root hash: cannot be nil") + } if sp.Total < 0 { return errors.New("proof total must be positive") } @@ -60,15 +63,27 @@ func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { if !bytes.Equal(sp.LeafHash, leafHash) { return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) } - computedHash := sp.ComputeRootHash() + computedHash, err := sp.computeRootHash() + if err != nil { + return fmt.Errorf("compute root hash: %w", err) + } if !bytes.Equal(computedHash, rootHash) { return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) } return nil } -// Compute the root hash given a leaf hash. Does not verify the result. +// Compute the root hash given a leaf hash. Panics in case of errors. func (sp *Proof) ComputeRootHash() []byte { + computedHash, err := sp.computeRootHash() + if err != nil { + panic(fmt.Errorf("ComputeRootHash errored %w", err)) + } + return computedHash +} + +// Compute the root hash given a leaf hash. +func (sp *Proof) computeRootHash() ([]byte, error) { return computeHashFromAunts( sp.Index, sp.Total, @@ -148,35 +163,36 @@ func ProofFromProto(pb *cmtcrypto.Proof) (*Proof, error) { // Use the leafHash and innerHashes to get the root merkle hash. // If the length of the innerHashes slice isn't exactly correct, the result is nil. // Recursive impl. -func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) []byte { +func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) ([]byte, error) { if index >= total || index < 0 || total <= 0 { - return nil + return nil, fmt.Errorf("invalid index %d and/or total %d", index, total) } switch total { case 0: panic("Cannot call computeHashFromAunts() with 0 total") case 1: if len(innerHashes) != 0 { - return nil + return nil, fmt.Errorf("unexpected inner hashes") } - return leafHash + return leafHash, nil default: if len(innerHashes) == 0 { - return nil + return nil, fmt.Errorf("expected at least one inner hash") } numLeft := getSplitPoint(total) if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil + leftHash, err := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err } - return innerHash(leftHash, innerHashes[len(innerHashes)-1]) + + return innerHash(leftHash, innerHashes[len(innerHashes)-1]), nil } - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil + rightHash, err := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err } - return innerHash(innerHashes[len(innerHashes)-1], rightHash) + return innerHash(innerHashes[len(innerHashes)-1], rightHash), nil } } diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index dc023aff3e2..f307380aad6 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -1,6 +1,7 @@ package merkle import ( + "bytes" "errors" "fmt" "testing" @@ -8,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/cometbft/cometbft/crypto/tmhash" cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" ) @@ -198,3 +200,26 @@ func TestVoteProtobuf(t *testing.T) { } } } + +// TestVsa2022_100 verifies https://blog.verichains.io/p/vsa-2022-100-tendermint-forging-membership-proof +func TestVsa2022_100(t *testing.T) { + // a fake key-value pair and its hash + key := []byte{0x13} + value := []byte{0x37} + vhash := tmhash.Sum(value) + bz := new(bytes.Buffer) + _ = encodeByteSlice(bz, key) + _ = encodeByteSlice(bz, vhash) + kvhash := tmhash.Sum(append([]byte{0}, bz.Bytes()...)) + + // the malicious `op` + op := NewValueOp( + key, + &Proof{LeafHash: kvhash}, + ) + + // the nil root + var root []byte + + assert.NotNil(t, ProofOperators{op}.Verify(root, "/"+string(key), [][]byte{value})) +} diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index 7c267e7efb8..5cc188cbe90 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -93,8 +93,12 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { return nil, fmt.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) } + rootHash, err := op.Proof.computeRootHash() + if err != nil { + return nil, err + } return [][]byte{ - op.Proof.ComputeRootHash(), + rootHash, }, nil } diff --git a/crypto/merkle/tree_test.go b/crypto/merkle/tree_test.go index 72f1402d657..df76360efc0 100644 --- a/crypto/merkle/tree_test.go +++ b/crypto/merkle/tree_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" cmtrand "github.com/cometbft/cometbft/libs/rand" - . "github.com/cometbft/cometbft/libs/test" + "github.com/cometbft/cometbft/libs/test" "github.com/cometbft/cometbft/crypto/tmhash" ) @@ -92,11 +92,11 @@ func TestProof(t *testing.T) { proof.Aunts = origAunts // Mutating the itemHash should make it fail. - err = proof.Verify(rootHash, MutateByteSlice(item)) + err = proof.Verify(rootHash, test.MutateByteSlice(item)) require.Error(t, err, "Expected verification to fail for mutated leaf hash") // Mutating the rootHash should make it fail. - err = proof.Verify(MutateByteSlice(rootHash), item) + err = proof.Verify(test.MutateByteSlice(rootHash), item) require.Error(t, err, "Expected verification to fail for mutated root hash") } } diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index c6e31f1e794..51d462c0bed 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -128,10 +128,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey { func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { priv, _ := secp256k1.PrivKeyFromBytes(privKey) - sig, err := ecdsa.SignCompact(priv, crypto.Sha256(msg), false) - if err != nil { - return nil, err - } + sum := sha256.Sum256(msg) + sig := ecdsa.SignCompact(priv, sum[:], false) // remove the first byte which is compactSigRecoveryCode return sig[1:], nil diff --git a/crypto/tmhash/bench_test.go b/crypto/tmhash/bench_test.go new file mode 100644 index 00000000000..1165ec9fdd7 --- /dev/null +++ b/crypto/tmhash/bench_test.go @@ -0,0 +1,52 @@ +package tmhash + +import ( + "bytes" + "crypto/sha256" + "strings" + "testing" +) + +var sink any + +var manySlices = []struct { + name string + in [][]byte + want [32]byte +}{ + { + name: "all empty", + in: [][]byte{[]byte(""), []byte("")}, + want: sha256.Sum256(nil), + }, + { + name: "ax6", + in: [][]byte{[]byte("aaaa"), []byte("😎"), []byte("aaaa")}, + want: sha256.Sum256([]byte("aaaa😎aaaa")), + }, + { + name: "composite joined", + in: [][]byte{bytes.Repeat([]byte("a"), 1<<10), []byte("AA"), bytes.Repeat([]byte("z"), 100)}, + want: sha256.Sum256([]byte(strings.Repeat("a", 1<<10) + "AA" + strings.Repeat("z", 100))), + }, +} + +func BenchmarkSHA256Many(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tt := range manySlices { + got := SumMany(tt.in[0], tt.in[1:]...) + if !bytes.Equal(got, tt.want[:]) { + b.Fatalf("Outward checksum mismatch for %q\n\tGot: %x\n\tWant: %x", tt.name, got, tt.want) + } + sink = got + } + } + + if sink == nil { + b.Fatal("Benchmark did not run!") + } + + sink = nil +} diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go index f9b9582420d..fbfcf5d9564 100644 --- a/crypto/tmhash/hash.go +++ b/crypto/tmhash/hash.go @@ -21,6 +21,18 @@ func Sum(bz []byte) []byte { return h[:] } +// SumMany takes at least 1 byteslice along with a variadic +// number of other byteslices and produces the SHA256 sum from +// hashing them as if they were 1 joined slice. +func SumMany(data []byte, rest ...[]byte) []byte { + h := sha256.New() + h.Write(data) + for _, data := range rest { + h.Write(data) + } + return h.Sum(nil) +} + //------------------------------------------------------------- const ( diff --git a/docker-compose.yml b/docker-compose.yml index ee582871976..f6c37165abc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -63,4 +63,4 @@ networks: ipam: driver: default config: - - subnet: 192.167.10.0/16 + - subnet: 192.167.0.0/16 diff --git a/docs/README.md b/docs/README.md index c13ca2f4ca7..b0353b99b4f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -16,11 +16,11 @@ CometBFT serves blockchain applications. More formally, CometBFT performs Byzantine Fault Tolerant (BFT) State Machine Replication (SMR) for arbitrary deterministic, finite state machines. -For more background, see [What is CometBFT?](introduction/README.md#what-is-cometbft.md). +For more background, see [What is CometBFT?](./introduction/README.md#what-is-cometbft). To get started quickly with an example application, see the [quick start guide](guides/quick-start.md). -To learn about application development on CometBFT, see the [Application Blockchain Interface](https://github.com/cometbft/cometbft/tree/main/spec/abci). +To learn about application development on CometBFT, see the [Application Blockchain Interface](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/abci). For more details on using CometBFT, see the respective documentation for [CometBFT internals](core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 17aa4f2bf2e..829a9a6cb52 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -1,5 +1,5 @@ --- -order: 2 +order: 3 --- # Using ABCI-CLI @@ -62,51 +62,10 @@ The most important messages are `deliver_tx`, `check_tx`, and `commit`, but there are others for convenience, configuration, and information purposes. -We'll start a kvstore application, which was installed at the same time -as `abci-cli` above. The kvstore just stores transactions in a merkle -tree. Its code can be found -[here](https://github.com/cometbft/cometbft/blob/main/abci/cmd/abci-cli/abci-cli.go) -and looks like the following: - -```go -func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - var err error - flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp") - if err != nil { - return err - } - } - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - - // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) - } - }) - - // Run forever. - select {} -} - -``` +We'll start a kvstore application, which was installed at the same time as +`abci-cli` above. The kvstore just stores transactions in a Merkle tree. Its +code can be found +[here](https://github.com/cometbft/cometbft/blob/v0.38.x/abci/example/kvstore/kvstore.go). Start the application by running: @@ -146,7 +105,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in -Golang](https://github.com/cometbft/cometbft/tree/main/abci/server). See the +Golang](https://github.com/cometbft/cometbft/tree/v0.38.x/abci/server). See the [list of other ABCI implementations](https://github.com/tendermint/awesome#ecosystem) for servers in other languages. @@ -240,7 +199,7 @@ You could put the commands in a file and run Note that the `abci-cli` is designed strictly for testing and debugging. In a real deployment, the role of sending messages is taken by CometBFT, which -connects to the app using three separate connections, each with its own +connects to the app using four separate connections, each with its own pattern of messages. For examples of running an ABCI app with CometBFT, see the diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index 97ebf502cf2..cf0cc53b4af 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -1,5 +1,5 @@ --- -order: 3 +order: 4 --- # Application Architecture Guide @@ -50,6 +50,6 @@ CometBFT. See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1617) (legacy/deprecated) -- [CometBFT RPC Docs](https://docs.cometbft.com/main/rpc/) +- [CometBFT RPC Docs](../rpc) - [CometBFT in Production](../core/running-in-production.md) -- [ABCI spec](https://github.com/cometbft/cometbft/tree/main/spec/abci) +- [ABCI spec](../spec/abci) diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 94076287224..8a23cad3b85 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -1,5 +1,5 @@ --- -order: 1 +order: 2 --- # Getting Started @@ -10,9 +10,9 @@ As a general purpose blockchain engine, CometBFT is agnostic to the application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is CometBFT, the other is your application, which can be written in any programming -language. Recall from [the intro to -ABCI](../introduction/what-is-cometbft.md#abci-overview) that CometBFT -handles all the p2p and consensus stuff, and just forwards transactions to the +language. + +CometBFT handles all the p2p and consensus logic, and just forwards transactions to the application when they need to be validated, or when they're ready to be executed and committed. @@ -92,7 +92,7 @@ abci-cli kvstore In another terminal, we can start CometBFT. You should already have the CometBFT binary installed. If not, follow the steps from -[here](../introduction/install.md). If you have never run CometBFT +[here](../guides/install.md). If you have never run CometBFT before, use: ```sh diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index d6ee4e0ceb7..f20f173e993 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -1,5 +1,5 @@ --- -order: 6 +order: 5 --- # Indexing Transactions @@ -14,11 +14,7 @@ the block itself is never stored. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more -details on `Events`, see the - -[ABCI](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_basic_concepts.md#events) - -documentation. +details on `Events`, see the [ABCI][abci-events] documentation. An `Event` has a composite key associated with it. A `compositeKey` is constructed by its type and key separated by a dot. @@ -36,7 +32,7 @@ would be equal to the composite key of `jack.account.number`. By default, CometBFT will index all transactions by their respective hashes and height and blocks by their height. -CometBFT allows for different events within the same height to have +CometBFT allows for different events within the same height to have equal attributes. ## Configuration @@ -74,8 +70,9 @@ entirely in the future. **Implementation and data layout** -The kv indexer stores each attribute of an event individually, by creating a composite key +The kv indexer stores each attribute of an event individually, by creating a composite key with + - event type, - attribute key, - attribute value, @@ -83,7 +80,7 @@ with - the height, and - event counter. For example the following events: - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -92,9 +89,9 @@ Type: "transfer", {Key: "balance", Value: "100", Index: true}, {Key: "note", Value: "nothing", Index: true}, }, - + ``` - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -105,7 +102,7 @@ Type: "transfer", }, ``` -will be represented as follows in the store, assuming these events result from the `FinalizeBlock` call for height 1: +will be represented as follows in the store, assuming these events result from the `FinalizeBlock` call for height 1: ``` Key value @@ -119,12 +116,13 @@ transferSenderTomFinalizeBlock12 1 transferRecepientAliceFinalizeBlock12 1 transferBalance200FinalizeBlock12 1 transferNodeNothingFinalizeBlock12 1 - + ``` -The event number is a local variable kept by the indexer and incremented when a new event is processed. -It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. + +The event number is a local variable kept by the indexer and incremented when a new event is processed. +It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. This variable is not atomically incremented as event indexing is deterministic. **Should this ever change**, the event id generation -will be broken. +will be broken. #### PostgreSQL @@ -236,7 +234,7 @@ You can query for a paginated set of transaction by their events by calling the curl "localhost:26657/tx_search?query=\"message.sender='cosmos1...'\"&prove=true" ``` -Check out [API docs](https://docs.cometbft.com/main/rpc/#/Info/tx_search) +Check out [API docs](https://docs.cometbft.com/v0.38/rpc/#/Info/tx_search) for more information on query syntax and other options. ## Subscribing to Transactions @@ -255,7 +253,7 @@ a query to `/subscribe` RPC endpoint. } ``` -Check out [API docs](https://docs.cometbft.com/main/rpc/#subscribe) for more information +Check out [API docs](https://docs.cometbft.com/v0.38/rpc/#subscribe) for more information on query syntax and other options. ## Querying Block Events @@ -264,15 +262,44 @@ You can query for a paginated set of blocks by their events by calling the `/block_search` RPC endpoint: ```bash -curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_changed > 0\"" +curl "localhost:26657/block_search?query=\"block.height > 10\"" ``` -Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up until Tendermint Core 0.34.26, -the event sequence was not stored in the kvstore and events were stored only by height. That means that queries -returned blocks and transactions whose event attributes match within the height but can match across different -events on that height. -This behavior was fixed with CometBFT 0.34.26+. However, if the data was indexed with earlier versions of -Tendermint Core and not re-indexed, that data will be queried as if all the attributes within a height -occurred within the same event. +Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up +until Tendermint Core 0.34.26, the event sequence was not stored in the kvstore +and events were stored only by height. That means that queries returned blocks +and transactions whose event attributes match within the height but can match +across different events on that height. + +This behavior was fixed with CometBFT 0.34.26+. However, if the data was +indexed with earlier versions of Tendermint Core and not re-indexed, that data +will be queried as if all the attributes within a height occurred within the +same event. + +## Event attribute value types + +Users can use anything as an event value. However, if the event attribute value +is a number, the following needs to be taken into account: + +- Negative numbers will not be properly retrieved when querying the indexer. +- Event values are converted to big floats (from the `big/math` package). The + precision of the floating point number is set to the bit length of the + integer it is supposed to represent, so that there is no loss of information + due to insufficient precision. This was not present before CometBFT v0.38.x + and all float values were ignored. +- As of CometBFT v0.38.x, queries can contain floating point numbers as well. +- Note that comparing to floats can be imprecise with a high number of decimals. + +## Event type and attribute key format + +An event type/attribute key is a string that can contain any Unicode letter or +digit, as well as the following characters: `.` (dot), `-` (dash), `_` +(underscore). The event type/attribute key must not start with `-` (dash) or +`.` (dot). + +``` +^[\w]+[\.-\w]?$ +``` +[abci-events]: ../spec/abci/abci++_basic_concepts.md#events diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 2dde6df4597..761c68274df 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -44,6 +44,8 @@ numbering our ADRs from 100 onwards. ### Accepted +- [ADR-111: `nop` Mempool](./adr-111-nop-mempool.md) + ### Implemented ### Deprecated diff --git a/docs/architecture/adr-111-nop-mempool.md b/docs/architecture/adr-111-nop-mempool.md new file mode 100644 index 00000000000..234cd5b9c1d --- /dev/null +++ b/docs/architecture/adr-111-nop-mempool.md @@ -0,0 +1,324 @@ +# ADR 111: `nop` Mempool + +## Changelog + +- 2023-11-07: First version (@sergio-mena) +- 2023-11-15: Addressed PR comments (@sergio-mena) +- 2023-11-17: Renamed `nil` to `nop` (@melekes) +- 2023-11-20: Mentioned that the app could reuse p2p network in the future (@melekes) +- 2023-11-22: Adapt ADR to implementation (@melekes) + +## Status + +Accepted + +[Tracking issue](https://github.com/cometbft/cometbft/issues/1666) + +## Context + +### Summary + +The current mempool built into CometBFT implements a robust yet somewhat inefficient transaction gossip mechanism. +While the CometBFT team is currently working on more efficient general-purpose transaction gossiping mechanisms, +some users have expressed their desire to manage both the mempool and the transaction dissemination mechanism +outside CometBFT (typically at the application level). + +This ADR proposes a fairly simple way for CometBFT to fulfill this use case without moving away from our current architecture. + +### In the Beginning... + +It is well understood that a dissemination mechanism +(sometimes using _Reliable Broadcast_ [\[HT94\]][HT94] but not necessarily), +is needed in a distributed system implementing State-Machine Replication (SMR). +This is also the case in blockchains. +Early designs such as Bitcoin or Ethereum include an _internal_ component, +responsible for dissemination, called mempool. +Tendermint Core chose to follow the same design given the success +of those early blockchains and, since inception, Tendermint Core and later CometBFT have featured a mempool as an internal piece of its architecture. + + +However, the design of ABCI clearly dividing the application logic (i.e., the appchain) +and the consensus logic that provides SMR semantics to the app is a unique innovation in Cosmos +that sets it apart from Bitcoin, Ethereum, and many others. +This clear separation of concerns entailed many consequences, mostly positive: +it allows CometBFT to be used underneath (currently) tens of different appchains in production +in the Cosmos ecosystem and elsewhere. +But there are other implications for having an internal mempool +in CometBFT: the interaction between the mempool, the application, and the network +becomes more indirect, and thus more complex and hard to understand and operate. + +### ABCI++ Improvements and Remaining Shortcomings + +Before the release of ABCI++, `CheckTx` was the main mechanism the app had at its disposal to influence +what transactions made it to the mempool, and very indirectly what transactions got ultimately proposed in a block. +Since ABCI 1.0 (the first part of ABCI++, shipped in `v0.37.x`), the application has +a more direct say in what is proposed through `PrepareProposal` and `ProcessProposal`. + +This has greatly improved the ability for appchains to influence the contents of the proposed block. +Further, ABCI++ has enabled many new use cases for appchains. However some issues remain with +the current model: + +* We are using the same P2P network for disseminating transactions and consensus-related messages. +* Many mempool parameters are configured on a per-node basis by node operators, + allowing the possibility of inconsistent mempool configuration across the network + with potentially serious scalability effects + (even causing unacceptable performance degradation in some extreme cases). +* The current mempool implementation uses a basic (robust but sub-optimal) flood algorithm + * the CometBFT team is working on improving it as one of our current priorities, + but any improvement we come up with must address the needs of a vast spectrum of applications, + as well as be heavily scaled-tested in various scenarios + (in an attempt to cover the applications' wide spectrum) + * a mempool designed specifically for one particular application + would reduce the search space as its designers can devise it with just their application's + needs in mind. +* The interaction with the application is still somewhat convoluted: + * the application has to decide what logic to implement in `CheckTx`, + what to do with the transaction list coming in `RequestPrepareProposal`, + whether it wants to maintain an app-side mempool (more on this below), and whether or not + to combine the transactions in the app-side mempool with those coming in `RequestPrepareProposal` + * all those combinations are hard to fully understand, as the semantics and guarantees are + often not clear + * when using exclusively an app-mempool (the approach taken in the Cosmos SDK `v0.47.x`) + for populating proposed blocks, with the aim of simplifying the app developers' life, + we still have a suboptimal model where we need to continue using CometBFT's mempool + in order to disseminate the transactions. So, we end up using twice as much memory, + as in-transit transactions need to be kept in both mempools. + +The approach presented in this ADR builds on the app-mempool design released in `v0.47.x` +of the Cosmos SDK, +and briefly discussed in the last bullet point above (see [SDK app-mempool][sdk-app-mempool] for further details of this model). + +In the app-mempool design in Cosmos SDK `v0.47.x` +an unconfirmed transaction must be both in CometBFT's mempool for dissemination and +in the app's mempool so the application can decide how to manage the mempool. +There is no doubt that this approach has numerous advantages. However, it also has some implications that need to be considered: + +* Having every transaction both in CometBFT and in the application is suboptimal in terms of memory. + Additionally, the app developer has to be careful + that the contents of both mempools do not diverge over time + (hence the crucial role `re-CheckTx` plays post-ABCI++). +* The main reason for a transaction needing to be in CometBFT's mempool is + because the design in Cosmos SDK `v0.47.x` does not consider an application + that has its own means of disseminating transactions. + It reuses the peer to peer network underneath CometBFT reactors. +* There is no point in having transactions in CometBFT's mempool if an application implements an ad-hoc design for disseminating transactions. + +This proposal targets this kind of applications: +those that have an ad-hoc mechanism for transaction dissemination that better meets the application requirements. + +The ABCI application could reuse the P2P network once this is exposed via ABCI. +But this will take some time as it needs to be implemented, and has a dependency +on bi-directional ABCI, which is also quite substantial. See +[1](https://github.com/cometbft/cometbft/discussions/1112) and +[2](https://github.com/cometbft/cometbft/discussions/494) discussions. + +We propose to introduce a `nop` (short for no operation) mempool which will effectively act as a stubbed object +internally: + +* it will reject any transaction being locally submitted or gossipped by a peer +* when a _reap_ (as it is currently called) is executed in the mempool, an empty answer will always be returned +* the application running on the proposer validator will add transactions it received + using the appchains's own mechanism via `PrepareProposal`. + +## Alternative Approaches + +These are the alternatives known to date: + +1. Keep the current model. Useful for basic apps, but clearly suboptimal for applications + with their own mechanism to disseminate transactions and particular performance requirements. +2. Provide more efficient general-purpose mempool implementations. + This is an ongoing effort (e.g., [CAT mempool][cat-mempool]), but will take some time, and R&D effort, to come up with + advanced mechanisms -- likely highly configurable and thus complex -- which then will have to be thoroughly tested. +3. A similar approach to this one ([ADR110][adr-110]) whereby the application-specific + mechanism directly interacts with CometBFT via a newly defined gRPC interface. +4. Partially adopting this ADR. There are several possibilities: + * Use the current mempool, disable transaction broadcast in `config.toml`, and accept transactions from users via `BroadcastTX*` RPC methods. + Positive: avoids transaction gossiping; app can reuse the mempool existing in ComeBFT. + Negative: requires clients to know the validators' RPC endpoints (potential security issues). + * Transaction broadcast is disabled in `config.toml`, and have the application always reject transactions in `CheckTx`. + Positive: effectively disables the mempool; does not require modifications to Comet (may be used in `v0.37.x` and `v0.38.x`). + Negative: requires apps to disseminate txs themselves; the setup for this is less straightforward than this ADR's proposal. + +## Decision + +TBD + +## Detailed Design + +What this ADR proposes can already be achieved with an unmodified CometBFT since +`v0.37.x`, albeit with a complex, poor UX (see the last alternative in section +[Alternative Approaches](#alternative-approaches)). The core of this proposal +is to make some internal changes so it is clear an simple for app developers, +thus improving the UX. + +#### `nop` Mempool + +We propose a new mempool implementation, called `nop` Mempool, that effectively disables all mempool functionality +within CometBFT. +The `nop` Mempool implements the `Mempool` interface in a very simple manner: + +* `CheckTx(tx types.Tx) (*abcicli.ReqRes, error)`: returns `nil, ErrNotAllowed` +* `RemoveTxByKey(txKey types.TxKey) error`: returns `ErrNotAllowed` +* `ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs`: returns `nil` +* `ReapMaxTxs(max int) types.Txs`: returns `nil` +* `Lock()`: does nothing +* `Unlock()`: does nothing +* `Update(...) error`: returns `nil` +* `FlushAppConn() error`: returns `nil` +* `Flush()`: does nothing +* `TxsAvailable() <-chan struct{}`: returns `nil` +* `EnableTxsAvailable()`: does nothing +* `SetTxRemovedCallback(cb func(types.TxKey))`: does nothing +* `Size() int` returns 0 +* `SizeBytes() int64` returns 0 + +Upon startup, the `nop` mempool reactor will advertise no channels to the peer-to-peer layer. + +### Configuration + +We propose the following changes to the `config.toml` file: + +```toml +[mempool] +# The type of mempool for this CometBFT node to use. +# +# Valid types of mempools supported by CometBFT: +# - "flood" : clist mempool with flooding gossip protocol (default) +# - "nop" : nop-mempool (app has implemented an alternative tx dissemination mechanism) +type = "nop" +``` + +The config validation logic will be modified to add a new rule that rejects a configuration file +if all of these conditions are met: + +* the mempool is set to `nop` +* `create_empty_blocks`, in `consensus` section, is set to `false`. + +The reason for this extra validity rule is that the `nop`-mempool, as proposed here, +does not support the "do not create empty blocks" functionality. +Here are some considerations on this: + +* The "do not create empty blocks" functionality + * entangles the consensus and mempool reactors + * is hardly used in production by real appchains (to the best of CometBFT team's knowledge) + * its current implementation for the built-in mempool has undesired side-effects + * app hashes currently refer to the previous block, + * and thus it interferes with query provability. +* If needed in the future, this can be supported by extending ABCI, + but we will first need to see a real need for this before committing to changing ABCI + (which has other, higher-impact changes waiting to be prioritized). + +### RPC Calls + +There are no changes needed in the code dealing with RPC. Those RPC paths that call methods of the `Mempool` interface, +will simply be calling the new implementation. + +### Impacted Workflows + +* *Submitting a transaction*. Users are not to submit transactions via CometBFT's RPC. + `BroadcastTx*` RPC methods will fail with a reasonable error and the 501 status code. + The application running on a full node must offer an interface for users to submit new transactions. + It could also be a distinct node (or set of nodes) in the network. + These considerations are exclusively the application's concern in this approach. +* *Time to propose a block*. The consensus reactor will call `ReapMaxBytesMaxGas` which will return a `nil` slice. + `RequestPrepareProposal` will thus contain no transactions. +* *Consensus waiting for transactions to become available*. `TxsAvailable()` returns `nil`. + `cs.handleTxsAvailable()` won't ever be executed. + At any rate, a configuration with the `nop` mempool and `create_empty_blocks` set to `false` + will be rejected in the first place. +* *A new block is decided*. + * When `Update` is called, nothing is done (no decided transaction is removed). + * Locking and unlocking the mempool has no effect. +* *ABCI mempool's connection* + CometBFT will still open a "mempool" connection, even though it won't be used. + This is to avoid doing lots of breaking changes. + +### Impact on Current Release Plans + +The changes needed for this approach, are fairly simple, and the logic is clear. +This might allow us to even deliver it as part of CometBFT `v1` (our next release) +even without a noticeable impact on `v1`'s delivery schedule. + +The CometBFT team (learning from past dramatic events) usually takes a conservative approach +for backporting changes to release branches that have already undergone a full QA cycle +(and thus are in code-freeze mode). +For this reason, although the limited impact of these changes would limit the risks +of backporting to `v0.38.x` and `v0.37.x`, a careful risk/benefit evaluation will +have to be carried out. + +Backporting to `v0.34.x` does not make sense as this version predates the release of `ABCI 1.0`, +so using the `nop` mempool renders CometBFT's operation useless. + +### Config parameter _vs._ application-enforced parameter + +In the current proposal, the parameter selecting the mempool is in `config.toml`. +However, it is not a clear-cut decision. These are the alternatives we see: + +* *Mempool selected in `config.toml` (our current design)*. + This is the way the mempool has always been selected in Tendermint Core and CometBFT, + in those versions where there were more than one mempool to choose from. + As the configuration is in `config.toml`, it is up to the node operators to configure their + nodes consistently, via social consensus. However this cannot be guaranteed. + A network with an inconsistent choice of mempool at different nodes might + result in undesirable side effects, such as peers disconnecting from nodes + that sent them messages via the mempool channel. +* *Mempool selected as a network-wide parameter*. + A way to prevent any inconsistency when selecting the mempool is to move the configuration out of `config.toml` + and have it as a network-wide application-enforced parameter, implemented in the same way as Consensus Params. + The Cosmos community may not be ready for such a rigid, radical change, + even if it eliminates the risk of operators shooting themselves in the foot. + Hence we went currently favor the previous alternative. +* *Mempool selected as a network-wide parameter, but allowing override*. + A third option, half way between the previous two, is to have the mempool selection + as a network-wide parameter, but with a special value called _local-config_ that still + allows an appchain to decide to leave it up to operators to configure it in `config.toml`. + +Ultimately, the "config parameter _vs._ application-enforced parameter" discussion +is a more general one that is applicable to other parameters not related to mempool selection. +In that sense, it is out of the scope of this ADR. + +## Consequences + +### Positive + +- Applications can now find mempool mechanisms that fit better their particular needs: + - Ad-hoc ways to add, remove, merge, reorder, modify, prioritize transactions according + to application needs. + - A way to disseminate transactions (gossip-based or other) to get the submitted transactions + to proposers. The application developers can devise simpler, efficient mechanisms tailored + to their application. + - Back-pressure mechanisms to prevent malicious users from abusing the transaction + dissemination mechanism. +- In this approach, CometBFT's peer-to-peer layer is relieved from managing transaction gossip, freeing up its resources for other reactors such as consensus, evidence, block-sync, or state-sync. +- There is no risk for the operators of a network to provide inconsistent configurations + for some mempool-related parameters. Some of those misconfigurations are known to have caused + serious performance issues in CometBFT's peer to peer network. + Unless, of course, the application-defined transaction dissemination mechanism ends up + allowing similar configuration inconsistencies. +- The interaction between the application and CometBFT at `PrepareProposal` time + is simplified. No transactions are ever provided by CometBFT, + and no transactions can ever be left in the mempool when CometBFT calls `PrepareProposal`: + the application trivially has all the information. +- UX is improved compared to how this can be done prior to this ADR. + +### Negative + +- With the `nop` mempool, it is up to the application to provide users with a way + to submit transactions and deliver those transactions to validators. + This is a considerable endeavor, and more basic appchains may consider it is not worth the hassle. +- There is a risk of wasting resources by those nodes that have a misconfigured + mempool (bandwidth, CPU, memory, etc). If there are TXs submitted (incorrectly) + via CometBFT's RPC, but those TXs are never submitted (correctly via an + app-specific interface) to the App. As those TXs risk being there until the node + is stopped. Moreover, those TXs will be replied & proposed every single block. + App developers will need to keep this in mind and panic on `CheckTx` or + `PrepareProposal` with non-empty list of transactions. +- Optimizing block proposals by only including transaction IDs (e.g. TX hashes) is more difficult. + The ABCI app could do it by submitting TX hashes (rather than TXs themselves) + in `PrepareProposal`, and then having a mechanism for pulling TXs from the + network upon `FinalizeBlock`. + +[sdk-app-mempool]: https://docs.cosmos.network/v0.47/build/building-apps/app-mempool +[adr-110]: https://github.com/cometbft/cometbft/pull/1565 +[HT94]: https://dl.acm.org/doi/book/10.5555/866693 +[cat-mempool]: https://github.com/cometbft/cometbft/pull/1472 \ No newline at end of file diff --git a/docs/core/block-structure.md b/docs/core/block-structure.md index a422aa9cd9a..e1e9175cb67 100644 --- a/docs/core/block-structure.md +++ b/docs/core/block-structure.md @@ -4,13 +4,16 @@ order: 8 # Block Structure -The CometBFT consensus engine records all agreements by a -supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various RPC endpoints, mainly -`/block?height=` to get the full block, as well as -`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what -exactly is stored in these blocks? +The CometBFT consensus engine records all agreements by a 2/3+ of nodes +into a blockchain, which is replicated among all nodes. This blockchain is +accessible via various RPC endpoints, mainly `/block?height=` to get the full +block, as well as `/blockchain?minHeight=_&maxHeight=_` to get a list of +headers. But what exactly is stored in these blocks? -The [specification](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. +The [specification][data_structures] contains a detailed description of each +component - that's the best place to get started. -To dig deeper, check out the [types package documentation](https://godoc.org/github.com/cometbft/cometbft/types). +To dig deeper, check out the [types package documentation][types]. + +[data_structures]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/data_structures.md +[types]: https://pkg.go.dev/github.com/cometbft/cometbft/types diff --git a/docs/core/block-sync.md b/docs/core/block-sync.md index aac917180ed..ad2797d447f 100644 --- a/docs/core/block-sync.md +++ b/docs/core/block-sync.md @@ -25,7 +25,7 @@ process. Once caught up, the daemon will switch out of Block Sync and into the normal consensus mode. After running for some time, the node is considered `caught up` if it has at least one peer and its height is at least as high as the max reported peer height. See [the IsCaughtUp -method](https://github.com/cometbft/cometbft/blob/main/blocksync/pool.go#L168). +method](https://github.com/cometbft/cometbft/blob/v0.38.x/blocksync/pool.go#L168). Note: While there have historically been multiple versions of blocksync, v0, v1, and v2, all versions other than v0 have been deprecated in favor of the simplest and most well understood algorithm. diff --git a/docs/core/configuration.md b/docs/core/configuration.md index 63c8e723ea8..8f5c6e04f90 100644 --- a/docs/core/configuration.md +++ b/docs/core/configuration.md @@ -25,6 +25,10 @@ like the file below, however, double check by inspecting the # "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable # or --home cmd flag. +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.0" + ####################################################################### ### Main Base Config Options ### ####################################################################### @@ -37,7 +41,9 @@ proxy_app = "tcp://127.0.0.1:26658" moniker = "anonymous" # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# * goleveldb (github.com/syndtr/goleveldb) +# - UNMAINTAINED +# - stable # - pure go # - stable # * cleveldb (uses levigo wrapper) @@ -61,7 +67,7 @@ db_backend = "goleveldb" db_dir = "data" # Output level for logging, including package level options -log_level = "main:info,state:info,statesync:info,*:error" +log_level = "info" # Output format: 'plain' (colored text) or 'json' log_format = "plain" @@ -150,12 +156,49 @@ max_subscription_clients = 100 # the estimated # maximum number of broadcast_tx_commit calls per block. max_subscriptions_per_client = 5 +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + # How long to wait for a tx to be committed during /broadcast_tx_commit. # WARNING: Using a value larger than 10s will result in increasing the # global HTTP write timeout, which applies to all connections and endpoints. # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" +# Maximum number of requests that can be sent in a JSON-RPC batch request. +# Possible values: number greater than 0. +# If the number of requests sent in a JSON-RPC batch exceed the maximum batch +# size configured, an error will be returned. +# The default value is set to `10`, which will limit the number of requests +# to 10 requests per a JSON-RPC batch request. +# If you don't want to enforce a maximum number of requests for a batch +# request set this value to `0`. +max_request_batch_size = 10 + # Maximum size of request body, in bytes max_body_bytes = 1000000 @@ -173,7 +216,7 @@ tls_cert_file = "" # The path to a file containing matching private key that is used to create the HTTPS server. # Might be either absolute path or path related to CometBFT's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. # Otherwise, HTTP server is run. tls_key_file = "" @@ -188,10 +231,9 @@ pprof_laddr = "" # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 external_address = "" # Comma separated list of seed nodes to connect to @@ -200,9 +242,6 @@ seeds = "" # Comma separated list of nodes to keep persistent connections to persistent_peers = "" -# UPNP port forwarding -upnp = false - # Path to address book addr_book_file = "config/addrbook.json" @@ -254,12 +293,38 @@ handshake_timeout = "20s" dial_timeout = "3s" ####################################################### -### Mempool Configurattion Option ### +### Mempool Configuration Option ### ####################################################### [mempool] +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. recheck = true + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# wal_dir to where you want the WAL to be written (e.g. +# "data/mempool.wal"). wal_dir = "" # Maximum number of transactions in the mempool @@ -285,7 +350,7 @@ max_tx_bytes = 1048576 # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). # XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 10485760 +max_batch_bytes = 0 ####################################################### ### State Sync Configuration Options ### @@ -307,19 +372,29 @@ enable = false rpc_servers = "" trust_height = 0 trust_hash = "" -trust_period = "0s" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" # Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). # Will create a new, randomly named directory within, and remove it when done. temp_dir = "" +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + ####################################################### ### Block Sync Configuration Options ### ####################################################### [blocksync] # Block Sync version to use: -# +# # In v0.37, v1 and v2 of the block sync protocols were deprecated. # Please use v0 instead. # @@ -367,6 +442,17 @@ create_empty_blocks_interval = "0s" peer_gossip_sleep_duration = "100ms" peer_query_maj23_sleep_duration = "2s" +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + ####################################################### ### Transaction Indexer Configuration Options ### ####################################################### @@ -381,8 +467,14 @@ peer_query_maj23_sleep_duration = "2s" # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). # - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "kv" +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + ####################################################### ### Instrumentation Configuration Options ### ####################################################### @@ -405,42 +497,26 @@ max_open_connections = 3 # Instrumentation namespace namespace = "cometbft" -``` + ``` ## Empty blocks VS no empty blocks ### create_empty_blocks = true -If `create_empty_blocks` is set to `true` in your config, blocks will be -created ~ every second (with default consensus parameters). You can regulate -the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. +If `create_empty_blocks` is set to `true` in your config, blocks will be created ~ every second (with default consensus parameters). You can regulate the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. ### create_empty_blocks = false In this setting, blocks are created when transactions received. -Note after the block H, CometBFT creates something we call a "proof block" -(only if the application hash changed) H+1. The reason for this is to support -proofs. If you have a transaction in block H that changes the state to X, the -new application hash will only be included in block H+1. If after your -transaction is committed, you want to get a light-client proof for the new state -(X), you need the new block to be committed in order to do that because the new -block has the new application hash for the state X. That's why we make a new -(empty) block if the application hash changes. Otherwise, you won't be able to -make a proof for the new state. - -Plus, if you set `create_empty_blocks_interval` to something other than the -default (`0`), CometBFT will be creating empty blocks even in the absence of -transactions every `create_empty_blocks_interval`. For instance, with -`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, -CometBFT will only create blocks if there are transactions, or after waiting -30 seconds without receiving any transactions. +Note after the block H, CometBFT creates something we call a "proof block" (only if the application hash changed) H+1. The reason for this is to support proofs. If you have a transaction in block H that changes the state to X, the new application hash will only be included in block H+1. If after your transaction is committed, you want to get a light-client proof for the new state (X), you need the new block to be committed in order to do that because the new block has the new application hash for the state X. That's why we make a new (empty) block if the application hash changes. Otherwise, you won't be able to make a proof for the new state. + +Plus, if you set `create_empty_blocks_interval` to something other than the default (`0`), CometBFT will be creating empty blocks even in the absence of transactions every `create_empty_blocks_interval.` For instance, with `create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, CometBFT will only create blocks if there are transactions, or after waiting 30 seconds without receiving any transactions. ## Consensus timeouts explained There's a variety of information about timeouts in [Running in production](./running-in-production.md#configuration-parameters). - You can also find more detailed explanation in the paper describing the Tendermint consensus algorithm, adopted by CometBFT: [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938). @@ -460,17 +536,85 @@ timeout_commit = "1s" Note that in a successful round, the only timeout that we absolutely wait no matter what is `timeout_commit`. - Here's a brief summary of the timeouts: -- `timeout_propose` = how long we wait for a proposal block before prevoting nil -- `timeout_propose_delta` = how much `timeout_propose` increases with each round -- `timeout_prevote` = how long we wait after receiving +2/3 prevotes for +- `timeout_propose` = how long a validator should wait for a proposal block before prevoting nil +- `timeout_propose_delta` = how much `timeout_propose` increases with each round +- `timeout_prevote` = how long a validator should wait after receiving +2/3 prevotes for anything (ie. not a single block or nil) - `timeout_prevote_delta` = how much the `timeout_prevote` increases with each round -- `timeout_precommit` = how long we wait after receiving +2/3 precommits for +- `timeout_precommit` = how long a validator should wait after receiving +2/3 precommits for anything (ie. not a single block or nil) - `timeout_precommit_delta` = how much the `timeout_precommit` increases with each round -- `timeout_commit` = how long we wait after committing a block, before starting +- `timeout_commit` = how long a validator should wait after committing a block, before starting on the new height (this gives us a chance to receive some more precommits, even though we already have +2/3) + +### The adverse effect of using inconsistent `timeout_propose` in a network + +Here's an interesting question. What happens if a particular validator sets a +very small `timeout_propose`, as compared to the rest of the network? + +Imagine there are only two validators in your network: Alice and Bob. Bob sets +`timeout_propose` to 0s. Alice uses the default value of 3s. Let's say they +both have an equal voting power. Given the proposer selection algorithm is a +weighted round-robin, you may expect Alice and Bob to take turns proposing +blocks, and the result like: + +``` +#1 block - Alice +#2 block - Bob +#3 block - Alice +#4 block - Bob +... +``` + +What happens in reality is, however, a little bit different: + +``` +#1 block - Bob +#2 block - Bob +#3 block - Bob +#4 block - Bob +``` + +That's because Bob doesn't wait for a proposal from Alice (prevotes `nil`). +This leaves Alice no chances to commit a block. Note that every block Bob +creates needs a vote from Alice to constitute 2/3+. Bob always gets one because +Alice has `timeout_propose` set to 3s. Alice never gets one because Bob has it +set to 0s. + +Imagine now there are ten geographically distributed validators. One of them +(Bob) sets `timeout_propose` to 0s. Others have it set to 3s. Now, Bob won't be +able to move with his own speed because it still needs 2/3 votes of the other +validators and it takes time to propagate those. I.e., the network moves with +the speed of time to accumulate 2/3+ of votes (prevotes & precommits), not with +the speed of the fastest proposer. + +> Isn't block production determined by voting power? + +If it were determined solely by voting power, it wouldn't be possible to ensure +liveness. Timeouts exist because the network can't rely on a single proposer +being available and must move on if such is not responding. + +> How can we address situations where someone arbitrarily adjusts their block +> production time to gain an advantage? + +The impact shown above is negligible in a decentralized network with enough +decentralization. + +### The adverse effect of using inconsistent `timeout_commit` in a network + +Let's look at the same scenario as before. There are ten geographically +distributed validators. One of them (Bob) sets `timeout_commit` to 0s. Others +have it set to 1s (the default value). Now, Bob will be the fastest producer +because he doesn't wait for additional precommits after creating a block. If +waiting for precommits (`timeout_commit`) is not incentivized, Bob will accrue +more rewards compared to the other 9 validators. + +This is because Bob has the advantage of broadcasting its proposal early (1 +second earlier than the others). But it also makes it possible for Bob to miss +a proposal from another validator and prevote `nil` due to him starting +`timeout_propose` earlier. I.e., if Bob's `timeout_commit` is too low comparing +to other validators, then he might miss some proposals and get slashed for +inactivity. diff --git a/docs/core/how-to-read-logs.md b/docs/core/how-to-read-logs.md index ba064952f77..2e54579ac36 100644 --- a/docs/core/how-to-read-logs.md +++ b/docs/core/how-to-read-logs.md @@ -24,16 +24,13 @@ I[10-04|13:54:27.368] ABCI Replay Blocks module=consen I[10-04|13:54:27.368] Completed ABCI Handshake - CometBFT and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD ``` -After that, we start a few more things like the event switch, reactors, -and perform UPNP discover in order to detect the IP address. +After that, we start a few more things like the event switch and reactors. ```sh I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch I[10-04|13:54:27.375] This node is a validator module=consensus I[10-04|13:54:27.379] Starting Node module=main impl=Node I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 -I[10-04|13:54:27.382] Getting UPNP external address module=p2p -I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor @@ -66,7 +63,7 @@ I[10-04|13:54:30.392] Started node module=main n Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, -please refer to [Byzantine Consensus Algorithm](https://github.com/cometbft/cometbft/blob/main/spec/consensus/consensus.md). +please refer to [Byzantine Consensus Algorithm](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/consensus/consensus.md). ```sh I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus @@ -117,7 +114,7 @@ little overview what they do. - `abci-client` As mentioned in [Application Development Guide](../app-dev/abci-cli.md), CometBFT acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by CometBFT can - be found [here](https://github.com/cometbft/cometbft/blob/main/abci/client). + be found [here](https://github.com/cometbft/cometbft/blob/v0.38.x/abci/client). - `blockchain` Provides storage, pool (a group of peers), and reactor for both storing and exchanging blocks between peers. - `consensus` The heart of CometBFT, which is the @@ -127,17 +124,17 @@ little overview what they do. from a crash. - `events` Simple event notification system. The list of events can be found - [here](https://github.com/cometbft/cometbft/blob/main/types/events.go). + [here](https://github.com/cometbft/cometbft/blob/v0.38.x/types/events.go). You can subscribe to them by calling `subscribe` RPC method. Refer to [RPC docs](./rpc.md) for additional information. - `mempool` Mempool module handles all incoming transactions, whenever they are coming from peers or the application. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the - [README](https://github.com/cometbft/cometbft/blob/main/p2p/README.md). + [README](https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/README.md). - `rpc` [CometBFT's RPC](./rpc.md). - `rpc-server` RPC server. For implementation details, please read the - [doc.go](https://github.com/cometbft/cometbft/blob/main/rpc/jsonrpc/doc.go). + [doc.go](https://github.com/cometbft/cometbft/blob/v0.38.x/rpc/jsonrpc/doc.go). - `state` Represents the latest state and execution submodule, which executes blocks against the application. - `types` A collection of the publicly exposed types and methods to diff --git a/docs/core/light-client.md b/docs/core/light-client.md index a698d03ddc7..c64c9163473 100644 --- a/docs/core/light-client.md +++ b/docs/core/light-client.md @@ -16,7 +16,7 @@ package](https://pkg.go.dev/github.com/cometbft/cometbft/light?tab=doc). The objective of the light client protocol is to get a commit for a recent block hash where the commit includes a majority of signatures from the last known validator set. From there, all the application state is verifiable with -[merkle proofs](https://github.com/cometbft/cometbft/blob/main/spec/core/encoding.md#iavl-tree). +[merkle proofs](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/encoding.md#iavl-tree). ## Properties diff --git a/docs/core/mempool.md b/docs/core/mempool.md index 8dd96878196..f86083ee04d 100644 --- a/docs/core/mempool.md +++ b/docs/core/mempool.md @@ -4,7 +4,41 @@ order: 12 # Mempool -## Transaction ordering +A mempool (a contraction of memory and pool) is a node’s data structure for +storing information on uncommitted transactions. It acts as a sort of waiting +room for transactions that have not yet been committed. + +CometBFT currently supports two types of mempools: `flood` and `nop`. + +## 1. Flood + +The `flood` mempool stores transactions in a concurrent linked list. When a new +transaction is received, it first checks if there's a space for it (`size` and +`max_txs_bytes` config options) and that it's not too big (`max_tx_bytes` config +option). Then, it checks if this transaction has already been seen before by using +an LRU cache (`cache_size` regulates the cache's size). If all checks pass and +the transaction is not in the cache (meaning it's new), the ABCI +[`CheckTxAsync`][1] method is called. The ABCI application validates the +transaction using its own rules. + +If the transaction is deemed valid by the ABCI application, it's added to the linked list. + +The mempool's name (`flood`) comes from the dissemination mechanism. When a new +transaction is added to the linked list, the mempool sends it to all connected +peers. Peers themselves gossip this transaction to their peers and so on. One +can say that each transaction "floods" the network, hence the name `flood`. + +Note there are experimental config options +`experimental_max_gossip_connections_to_persistent_peers` and +`experimental_max_gossip_connections_to_non_persistent_peers` to limit the +number of peers a transaction is broadcasted to. Also, you can turn off +broadcasting with `broadcast` config option. + +After each committed block, CometBFT rechecks all uncommitted transactions (can +be disabled with the `recheck` config option) by repeatedly calling the ABCI +`CheckTxAsync`. + +### Transaction ordering Currently, there's no ordering of transactions other than the order they've arrived (via RPC or from other nodes). @@ -46,3 +80,24 @@ order/nonce/sequence number, the application can reject transactions that are out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then accept `tx1`. The sender can then retry sending `tx3`, which should probably be rejected until the node has seen `tx2`. + +## 2. Nop + +`nop` (short for no operation) mempool is used when the ABCI application developer wants to +build their own mempool. When `type = "nop"`, transactions are not stored anywhere +and are not gossiped to other peers using the P2P network. + +Submitting a transaction via the existing RPC methods (`BroadcastTxSync`, +`BroadcastTxAsync`, and `BroadcastTxCommit`) will always result in an error. + +Because there's no way for the consensus to know if transactions are available +to be committed, the node will always create blocks, which can be empty +sometimes. Using `consensus.create_empty_blocks=false` is prohibited in such +cases. + +The ABCI application becomes responsible for storing, disseminating, and +proposing transactions using [`PrepareProposal`][2]. The concrete design is up +to the ABCI application developers. + +[1]: ../../spec/abci/abci++_methods.md#checktx +[2]: ../../spec/abci/abci++_methods.md#prepareproposal \ No newline at end of file diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 71cc8d2093b..5e97318f358 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -61,7 +61,7 @@ The following metrics are available: | mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes | | mempool\_failed\_txs | Counter | | Number of failed transactions | | mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool | -| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms | +| state\_block\_processing\_time | Histogram | | Time spent processing FinalizeBlock in ms | | state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start | | state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start | | statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) | diff --git a/docs/core/running-in-production.md b/docs/core/running-in-production.md index beaee33056e..98406dadf13 100644 --- a/docs/core/running-in-production.md +++ b/docs/core/running-in-production.md @@ -10,22 +10,22 @@ By default, CometBFT uses the `syndtr/goleveldb` package for its in-process key-value database. If you want maximal performance, it may be best to install the real C-implementation of LevelDB and compile CometBFT to use that using `make build COMETBFT_BUILD_OPTIONS=cleveldb`. See the [install -instructions](../introduction/install.md) for details. +instructions](../guides/install.md) for details. CometBFT keeps multiple distinct databases in the `$CMTHOME/data`: - `blockstore.db`: Keeps the entire blockchain - stores blocks, - block commits, and block meta data, each indexed by height. Used to sync new + block commits, and block metadata, each indexed by height. Used to sync new peers. - `evidence.db`: Stores all verified evidence of misbehavior. -- `state.db`: Stores the current blockchain state (ie. height, validators, +- `state.db`: Stores the current blockchain state (i.e. height, validators, consensus params). Only grows if consensus params or validators change. Also used to temporarily store intermediate results during block processing. -- `tx_index.db`: Indexes txs (and their results) by tx hash and by DeliverTx result events. +- `tx_index.db`: Indexes transactions and by tx hash and height. The tx results are indexed if they are added to the `FinalizeBlock` response in the application. -By default, CometBFT will only index txs by their hash and height, not by their DeliverTx -result events. See [indexing transactions](../app-dev/indexing-transactions.md) for -details. +By default, CometBFT will only index transactions by their hash and height, if +you want the result events to be indexed, see [indexing +transactions](../app-dev/indexing-transactions.md) for for details. Applications can expose block pruning strategies to the node operator. Please read the documentation of your application to find out more details. @@ -62,12 +62,12 @@ If your `consensus.wal` is corrupted, see [below](#wal-corruption). ### Mempool WAL -The `mempool.wal` logs all incoming txs before running CheckTx, but is +The `mempool.wal` logs all incoming transactions before running CheckTx, but is otherwise not used in any programmatic way. It's just a kind of manual safe guard. Note the mempool provides no durability guarantees - a tx sent to one or many nodes may never make it into the blockchain if those nodes crash before being able to -propose it. Clients must monitor their txs by subscribing over websockets, -polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be +propose it. Clients must monitor their transactions by subscribing over websockets, +polling for them, or using `/broadcast_tx_commit`. In the worst case, transactions can be resent from the mempool WAL manually. For the above reasons, the `mempool.wal` is disabled by default. To enable, set @@ -123,7 +123,7 @@ ever be exposed publicly.** #### Endpoints Returning Multiple Entries Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). See the [RPC Documentation](https://docs.cometbft.com/main/rpc/) +elements (100 max). See the [RPC Documentation](https://docs.cometbft.com/v0.38/rpc/) for more information. ## Debugging CometBFT @@ -370,7 +370,7 @@ proposing the next block). By default, CometBFT checks whenever a peer's address is routable before saving it to the address book. The address is considered as routable if the IP -is [valid and within allowed ranges](https://github.com/cometbft/cometbft/blob/main/p2p/netaddress.go#L258). +is [valid and within allowed ranges](https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/netaddress.go#L258). This may not be the case for private or local networks, where your IP range is usually strictly limited and private. If that case, you need to set `addr_book_strict` diff --git a/docs/core/state-sync.md b/docs/core/state-sync.md index 0f477302e29..7f5c5433d2e 100644 --- a/docs/core/state-sync.md +++ b/docs/core/state-sync.md @@ -30,7 +30,7 @@ The next information you will need to acquire it through publicly exposed RPC's - `trust_period`: Trust period is the period in which headers can be verified. > :warning: This value should be significantly smaller than the unbonding period. -If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. +If you are relying on publicly exposed RPC's to get the need information, you can use `curl` and [`jq`][jq]. Example: @@ -46,3 +46,5 @@ The response will be: "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" } ``` + +[jq]: https://jqlang.github.io/jq/ diff --git a/docs/core/subscription.md b/docs/core/subscription.md index 96d455e54eb..7b7f1512e47 100644 --- a/docs/core/subscription.md +++ b/docs/core/subscription.md @@ -14,9 +14,11 @@ To connect to a node via websocket from the CLI, you can use a tool such as [wscat](https://github.com/websockets/wscat) and run: ```sh -wscat ws://127.0.0.1:26657/websocket +wscat -c ws://127.0.0.1:26657/websocket ``` +NOTE: If your node's RPC endpoint is TLS-enabled, utilize the scheme `wss` instead of `ws`. + You can subscribe to any of the events above by calling the `subscribe` RPC method via Websocket along with a valid query. @@ -31,19 +33,38 @@ method via Websocket along with a valid query. } ``` -Check out [API docs](https://docs.cometbft.com/main/rpc/) for +Check out [API docs](https://docs.cometbft.com/v0.38/rpc/) for more information on query syntax and other options. -You can also use tags, given you had included them into DeliverTx +You can also use tags, given you had included them into FinalizeBlock response, to query transaction results. See [Indexing transactions](../app-dev/indexing-transactions.md) for details. +## Query parameter and event type restrictions + +While CometBFT imposes no restrictions on the application with regards to the type of +the event output, there are several considerations that need to be taken into account +when querying events with numeric values. + +- Queries convert all numeric event values to `big.Float` , provided by `math/big`. Integers +are converted into a float with a precision equal to the number of bits needed +to represent this integer. This is done to avoid precision loss for big integers when they +are converted with the default precision (`64`). +- When comparing two values, if either one of them is a float, the other one will be represented +as a big float. Integers are again parsed as big floats with a precision equal to the number +of bits required to represent them. +- As with all floating point comparisons, comparing floats with decimal values can lead to imprecise +results. +- Queries cannot include negative numbers + +Prior to version `v0.38.x`, floats were not supported as query parameters. + ## ValidatorSetUpdates When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same CometBFT receives from ABCI application (see [EndBlock -section](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_methods.md#endblock) in +section](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci++_methods.md#endblock) in the ABCI spec). Response: diff --git a/docs/core/using-cometbft.md b/docs/core/using-cometbft.md index 258f20fc2c1..cafa8d34319 100644 --- a/docs/core/using-cometbft.md +++ b/docs/core/using-cometbft.md @@ -39,7 +39,7 @@ cometbft testnet --help The `genesis.json` file in `$CMTHOME/config/` defines the initial CometBFT state upon genesis of the blockchain ([see -definition](https://github.com/cometbft/cometbft/blob/main/types/genesis.go)). +definition](https://github.com/cometbft/cometbft/blob/v0.38.x/types/genesis.go)). #### Fields @@ -49,7 +49,7 @@ definition](https://github.com/cometbft/cometbft/blob/main/types/genesis.go)). chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. - `initial_height`: Height at which CometBFT should begin at. If a blockchain is conducting a network upgrade, starting from the stopped height brings uniqueness to previous heights. -- `consensus_params` ([see spec](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#consensusparams)) +- `consensus_params` ([see spec](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/data_structures.md#consensusparams)) - `block` - `max_bytes`: Max block size, in bytes. - `max_gas`: Max gas per block. @@ -59,7 +59,7 @@ definition](https://github.com/cometbft/cometbft/blob/main/types/genesis.go)). - `max_age_duration`: Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake - attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + attacks](https://vitalik.ca/general/2017/12/31/pos_faq.html#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). - `max_bytes`: This sets the maximum size in bytes of evidence that can be committed in a single block and should fall comfortably under the max block bytes. - `validator` @@ -71,7 +71,7 @@ definition](https://github.com/cometbft/cometbft/blob/main/types/genesis.go)). application will initialize the validator set upon `InitChain`. - `pub_key`: The first element specifies the key type, using the declared `PubKeyName` for the adopted - [key type](https://github.com/cometbft/cometbft/blob/main/crypto/ed25519/ed25519.go#L36). + [key type](https://github.com/cometbft/cometbft/blob/v0.38.x/crypto/ed25519/ed25519.go#L36). The second element are the pubkey bytes. - `power`: The validator's voting power. - `name`: Name of the validator (optional). @@ -130,7 +130,7 @@ cometbft node ``` By default, CometBFT will try to connect to an ABCI application on -`127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in +`tcp://127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in another window. If you don't, kill CometBFT and run an in-process version of the `kvstore` app: @@ -139,8 +139,8 @@ cometbft node --proxy_app=kvstore ``` After a few seconds, you should see blocks start streaming in. Note that blocks -are produced regularly, even if there are no transactions. See _No Empty -Blocks_, below, to modify this setting. +are produced regularly, even if there are no transactions. See [No Empty +Blocks](#no-empty-blocks), below, to modify this setting. CometBFT supports in-process versions of the `counter`, `kvstore`, and `noop` apps that ship as examples with `abci-cli`. It's easy to compile your app @@ -180,7 +180,7 @@ endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. -> TIP: Find the RPC Documentation [here](https://docs.cometbft.com/main/rpc/) +> TIP: Find the RPC Documentation [here](https://docs.cometbft.com/v0.38/rpc/) ### Formatting @@ -565,7 +565,7 @@ library will deny making connections to peers with the same IP address. ### Upgrading See the -[UPGRADING.md](https://github.com/cometbft/cometbft/blob/main/UPGRADING.md) +[UPGRADING.md](https://github.com/cometbft/cometbft/blob/v0.38.x/UPGRADING.md) guide. You may need to reset your chain between major breaking releases. Although, we expect CometBFT to have fewer breaking releases in the future (especially after 1.0 release). diff --git a/docs/core/validators.md b/docs/core/validators.md index 34f99290856..86c4626ee6c 100644 --- a/docs/core/validators.md +++ b/docs/core/validators.md @@ -10,14 +10,13 @@ _votes_ which contain cryptographic signatures signed by each validator's private key. Some Proof-of-Stake consensus algorithms aim to create a "completely" -decentralized system where all stakeholders (even those who are not -always available online) participate in the committing of blocks. -CometBFT has a different approach to block creation. Validators are -expected to be online, and the set of validators is permissioned/curated -by some external process. Proof-of-stake is not required, but can be -implemented on top of CometBFT consensus. That is, validators may be -required to post collateral on-chain, off-chain, or may not be required -to post any collateral at all. +decentralized system where all stakeholders (even those who are not always +available online) participate in the committing of blocks. CometBFT has a +different approach to block creation. Validators are expected to be online, and +the set of validators is permissioned/curated by the ABCI application. +Proof-of-stake is not required, but can be implemented on top of CometBFT +consensus. That is, validators may be required to post collateral on-chain, +off-chain, or may not be required to post any collateral at all. Validators have a cryptographic key-pair and an associated amount of "voting power". Voting power need not be the same. @@ -27,7 +26,7 @@ Validators have a cryptographic key-pair and an associated amount of There are two ways to become validator. 1. They can be pre-established in the [genesis state](./using-cometbft.md#genesis) -2. The ABCI app responds to the EndBlock message with changes to the +2. The ABCI app responds to the FinalizeBlock message with changes to the existing validator set. ## Setting up a Validator @@ -100,16 +99,3 @@ More Information can be found at these links: Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. Currently CometBFT uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. - -## Committing a Block - -> **+2/3 is short for "more than 2/3"** - -A block is committed when +2/3 of the validator set sign -[precommit votes](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#vote) -for that block at the same `round`. -The +2/3 set of precommit votes is called a -[commit](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#commit). -While any +2/3 set of precommits for the same block at the same height&round can serve as -validation, the canonical commit is included in the next block (see -[LastCommit](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#block)). diff --git a/docs/explanation/core/metrics.md b/docs/explanation/core/metrics.md new file mode 100644 index 00000000000..90bc8a867d3 --- /dev/null +++ b/docs/explanation/core/metrics.md @@ -0,0 +1,92 @@ +--- +order: 5 +--- + +# Metrics + +CometBFT can report and serve the Prometheus metrics, which in their turn can +be consumed by Prometheus collector(s). + +This functionality is disabled by default. + +To enable the Prometheus metrics, set `instrumentation.prometheus=true` in your +config file. Metrics will be served under `/metrics` on 26660 port by default. +Listen address can be changed in the config file (see +`instrumentation.prometheus\_listen\_addr`). + +## List of available metrics + +The following metrics are available: + +| **Name** | **Type** | **Tags** | **Description** | +| ------------------------------------------------------- | --------- | ------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | +| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods | +| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) | +| consensus\_height | Gauge | | Height of the chain | +| consensus\_validators | Gauge | | Number of validators | +| consensus\_validators\_power | Gauge | validator\_address | Total voting power of all validators | +| consensus\_validator\_power | Gauge | validator\_address | Voting power of the node if in the validator set | +| consensus\_validator\_last\_signed\_height | Gauge | validator\_address | Last height the node signed a block, if the node is a validator | +| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus\_missing\_validators | Gauge | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | | Number of rounds | +| consensus\_num\_txs | Gauge | | Number of transactions | +| consensus\_total\_txs | Gauge | | Total number of transactions committed | +| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | Gauge | | /status sync\_info number | +| consensus\_block\_size\_bytes | Gauge | | Block size in bytes | +| consensus\_step\_duration\_seconds | Histogram | step | Histogram of durations for each step in the consensus protocol | +| consensus\_round\_duration\_seconds | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | +| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node | +| consensus\_quorum\_prevote\_delay | Gauge | proposer\_address | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| consensus\_full\_prevote\_delay | Gauge | proposer\_address | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| consensus\_vote\_extension\_receive\_count | Counter | status | Number of vote extensions received | +| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start | +| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start | +| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| consensus\_duplicate\_vote | Counter | | Number of times we received a duplicate vote. | +| consensus\_duplicate\_block\_part | Counter | | Number of times we received a duplicate block part. | +| consensus\_proposal\_timestamp\_difference | Histogram | is\_timely | Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message. | +| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type | +| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type | +| p2p\_peers | Gauge | | Number of peers node's connected to | +| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer | +| p2p\_recv\_rate\_limiter\_delay | Counter | peer\_id | Time in seconds spent sleeping by the receive rate limiter, in seconds. | +| p2p\_send\_rate\_limiter\_delay | Counter | peer\_id | Time in seconds spent sleeping by the send rate limiter, in seconds. | +| mempool\_size | Gauge | | Number of uncommitted transactions in the mempool | +| mempool\_size\_bytes | Gauge | | Total size of the mempool in bytes | +| mempool\_tx\_size\_bytes | Histogram | | Histogram of transaction sizes in bytes | +| mempool\_evicted\_txs | Counter | | Number of transactions that make it into the mempool and were later evicted for being invalid | +| mempool\_failed\_txs | Counter | | Number of transactions that failed to make it into the mempool for being invalid | +| mempool\_rejected\_txs | Counter | | Number of transactions that failed to make it into the mempool due to resource limits | +| mempool\_recheck\_times | Counter | | Number of times transactions are rechecked in the mempool | +| mempool\_already\_received\_txs | Counter | | Number of times transactions were received more than once | +| mempool\_active\_outbound\_connections | Gauge | | Number of connections being actively used for gossiping transaction (experimental) | +| mempool\_recheck\_duration\_seconds | Gauge | | Cumulative time spent rechecking transactions | +| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start | +| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start | +| state\_pruning\_service\_block\_retain\_height | Gauge | | Accepted block retain height set by the data companion | +| state\_pruning\_service\_block\_results\_retain\_height | Gauge | | Accepted block results retain height set by the data companion | +| state\_pruning\_service\_tx\_indexer\_retain\_height | Gauge | | Accepted transactions indices retain height set by the data companion | +| state\_pruning\_service\_block\_indexer\_retain\_height | Gauge | | Accepted blocks indices retain height set by the data companion | +| state\_application\_block\_retain\_height | Gauge | | Accepted block retain height set by the application | +| state\_block\_store\_base\_height | Gauge | | First height at which a block is available | +| state\_abciresults\_base\_height | Gauge | | First height at which ABCI results are available | +| state\_tx\_indexer\_base\_height | Gauge | | First height at which tx indices are available | +| state\_block\_indexer\_base\_height | Gauge | | First height at which block indices are available | +| state\_store\_access\_duration\_seconds | Histogram | method | Duration of accesses to the state store labeled by which method was called on the store | +| state\_fire\_block\_events\_delay\_seconds | Gauge | | Duration of event firing related to a new block | +| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) | + +## Useful queries + +Percentage of missing + byzantine validators: + +```md +((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +``` diff --git a/docs/guides/go-built-in.md b/docs/guides/go-built-in.md index c0e780bf95c..3c9120c205c 100644 --- a/docs/guides/go-built-in.md +++ b/docs/guides/go-built-in.md @@ -40,14 +40,13 @@ guarantees as two processes would be communicating via established binary protoc CometBFT will not have access to application's state. If that is the way you wish to proceed, use the [Creating an application in Go](./go.md) guide instead of this one. - ## 1.1 Installing Go Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)): ```bash $ go version -go version go1.20.1 darwin/amd64 +go version go1.22.7 darwin/amd64 ``` ## 1.2 Creating a new Go project @@ -82,30 +81,38 @@ Hello, CometBFT We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management, so let's start by including a dependency on the latest version of -CometBFT, `v0.37.0` in this example. +CometBFT, `v0.38.0` in this example. ```bash go mod init kvstore -go get github.com/cometbft/cometbft@v0.37.0 +go get github.com/cometbft/cometbft@v0.38.0 ``` After running the above commands you will see two generated files, `go.mod` and `go.sum`. The go.mod file should look similar to: ```go -module github.com/me/example +module kvstore -go 1.20 +go 1.22 require ( - github.com/cometbft/cometbft v0.37.0 +github.com/cometbft/cometbft v0.38.0 ) ``` +XXX: CometBFT `v0.38.0` uses a slightly outdated `gogoproto` library, which +may fail to compile with newer Go versions. To avoid any compilation errors, +upgrade `gogoproto` manually: + +```bash +go get github.com/cosmos/gogoproto@v1.4.11 +``` + As you write the kvstore application, you can rebuild the binary by pulling any new dependencies and recompiling it. -```sh +```bash go get go build ``` @@ -115,7 +122,7 @@ go build CometBFT communicates with the application through the Application BlockChain Interface (ABCI). The messages exchanged through the interface are defined in the ABCI [protobuf -file](https://github.com/cometbft/cometbft/blob/v0.37.x/proto/tendermint/abci/types.proto). +file](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). We begin by creating the basic scaffolding for an ABCI application by creating a new type, `KVStoreApplication`, which implements the @@ -127,7 +134,8 @@ Create a file called `app.go` with the following contents: package main import ( - abcitypes "github.com/cometbft/cometbft/abci/types" + abcitypes "github.com/cometbft/cometbft/abci/types" + "context" ) type KVStoreApplication struct{} @@ -135,63 +143,63 @@ type KVStoreApplication struct{} var _ abcitypes.Application = (*KVStoreApplication)(nil) func NewKVStoreApplication() *KVStoreApplication { - return &KVStoreApplication{} + return &KVStoreApplication{} } -func (app *KVStoreApplication) Info(info abcitypes.RequestInfo) abcitypes.ResponseInfo { - return abcitypes.ResponseInfo{} +func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.RequestInfo) (*abcitypes.ResponseInfo, error) { + return &abcitypes.ResponseInfo{}, nil } -func (app *KVStoreApplication) Query(query abcitypes.RequestQuery) abcitypes.ResponseQuery { - return abcitypes.ResponseQuery{} +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { + return &abcitypes.ResponseQuery{}, nil } -func (app *KVStoreApplication) CheckTx(tx abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { - return abcitypes.ResponseCheckTx{} +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { + return &abcitypes.ResponseCheckTx{}, nil } -func (app *KVStoreApplication) InitChain(chain abcitypes.RequestInitChain) abcitypes.ResponseInitChain { - return abcitypes.ResponseInitChain{} +func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) { + return &abcitypes.ResponseInitChain{}, nil } -func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal { - return abcitypes.ResponsePrepareProposal{} +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { + return &abcitypes.ResponsePrepareProposal{}, nil } -func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal { - return abcitypes.ResponseProcessProposal{} +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { + return &abcitypes.ResponseProcessProposal{}, nil } -func (app *KVStoreApplication) BeginBlock(block abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { - return abcitypes.ResponseBeginBlock{} +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { + return &abcitypes.ResponseFinalizeBlock{}, nil } -func (app *KVStoreApplication) DeliverTx(tx abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { - return abcitypes.ResponseDeliverTx{} +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { + return &abcitypes.ResponseCommit{}, nil } -func (app *KVStoreApplication) EndBlock(block abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { - return abcitypes.ResponseEndBlock{} +func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.RequestListSnapshots) (*abcitypes.ResponseListSnapshots, error) { + return &abcitypes.ResponseListSnapshots{}, nil } -func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { - return abcitypes.ResponseCommit{} +func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.RequestOfferSnapshot) (*abcitypes.ResponseOfferSnapshot, error) { + return &abcitypes.ResponseOfferSnapshot{}, nil } -func (app *KVStoreApplication) ListSnapshots(snapshots abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots { - return abcitypes.ResponseListSnapshots{} +func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.RequestLoadSnapshotChunk) (*abcitypes.ResponseLoadSnapshotChunk, error) { + return &abcitypes.ResponseLoadSnapshotChunk{}, nil } -func (app *KVStoreApplication) OfferSnapshot(snapshot abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot { - return abcitypes.ResponseOfferSnapshot{} +func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.RequestApplySnapshotChunk) (*abcitypes.ResponseApplySnapshotChunk, error) { + return &abcitypes.ResponseApplySnapshotChunk{Result: abcitypes.ResponseApplySnapshotChunk_ACCEPT}, nil } -func (app *KVStoreApplication) LoadSnapshotChunk(chunk abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk { - return abcitypes.ResponseLoadSnapshotChunk{} +func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) { + return &abcitypes.ResponseExtendVote{}, nil } -func (app *KVStoreApplication) ApplySnapshotChunk(chunk abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk { - return abcitypes.ResponseApplySnapshotChunk{} +func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) { + return &abcitypes.ResponseVerifyVoteExtension{}, nil } ``` @@ -199,7 +207,7 @@ The types used here are defined in the CometBFT library and were added as a depe to the project when you ran `go get`. If your IDE is not recognizing the types, go ahead and run the command again. ```bash -go get github.com/cometbft/cometbft@v0.37.0 +go get github.com/cometbft/cometbft@v0.38.0 ``` Now go back to the `main.go` and modify the `main` function so it matches the following, @@ -218,7 +226,6 @@ not do anything. So let's revisit the code adding the logic needed to implement our minimal key/value store and to start it along with the CometBFT Service. - ### 1.3.1 Add a persistent data store Our application will need to write its state out to persistent storage so that it @@ -235,14 +242,14 @@ Next, let's update the application and its constructor to receive a handle to th ```go type KVStoreApplication struct { - db *badger.DB - onGoingBlock *badger.Txn + db *badger.DB + onGoingBlock *badger.Txn } var _ abcitypes.Application = (*KVStoreApplication)(nil) func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{db: db} + return &KVStoreApplication{db: db} } ``` @@ -253,15 +260,15 @@ Next, update the `import` stanza at the top to include the Badger library: ```go import( - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` Finally, update the `main.go` file to invoke the updated constructor: ```go - _ = NewKVStoreApplication(nil) + _ = NewKVStoreApplication(nil) ``` ### 1.3.2 CheckTx @@ -277,22 +284,22 @@ For that, let's add the following helper method to app.go: ```go func (app *KVStoreApplication) isValid(tx []byte) uint32 { - // check format - parts := bytes.Split(tx, []byte("=")) - if len(parts) != 2 { - return 1 - } + // check format + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return 1 + } - return 0 + return 0 } ``` Now you can rewrite the `CheckTx` method to use the helper function: ```go -func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { - code := app.isValid(req.Tx) - return abcitypes.ResponseCheckTx{Code: code} +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { + code := app.isValid(check.Tx) + return &abcitypes.ResponseCheckTx{Code: code}, nil } ``` @@ -311,83 +318,75 @@ information on why the transaction was rejected. Note that `CheckTx` does not execute the transaction, it only verifies that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block. - -Finally, make sure to add the bytes package to the `import` stanza at the top of `app.go`: +Finally, make sure to add the `bytes` package to the `import` stanza at the top of `app.go`: ```go import( - "bytes" + "bytes" - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` - -### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 FinalizeBlock When the CometBFT consensus engine has decided on the block, the block is transferred to the -application over three ABCI method calls: `BeginBlock`, `DeliverTx`, and `EndBlock`. +application via `FinalizeBlock`. +`FinalizeBlock` is an ABCI method introduced in CometBFT `v0.38.0`. This replaces the functionality provided previously (pre-`v0.38.0`) by the combination of ABCI methods `BeginBlock`, `DeliverTx`, and `EndBlock`. `FinalizeBlock`'s parameters are an aggregation of those in `BeginBlock`, `DeliverTx`, and `EndBlock`. -- `BeginBlock` is called once to indicate to the application that it is about to -receive a block. -- `DeliverTx` is called repeatedly, once for each application transaction that was included in the block. -- `EndBlock` is called once to indicate to the application that no more transactions -will be delivered to the application within this block. +This method is responsible for executing the block and returning a response to the consensus engine. +Providing a single `FinalizeBlock` method to signal the finalization of a block simplifies the ABCI interface and increases flexibility in the execution pipeline. -Note that, to implement these calls in our application we're going to make use of Badger's -transaction mechanism. We will always refer to these as Badger transactions, not to -confuse them with the transactions included in the blocks delivered by CometBFT, -the _application transactions_. +The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `ResponseFinalizeBlock` object which contains any necessary information about the executed block. -First, let's create a new Badger transaction during `BeginBlock`. All application transactions in the -current block will be executed within this Badger transaction. -Then, return informing CometBFT that the application is ready to receive application transactions: +**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage i.e. in `commit` phase. -```go -func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { - app.onGoingBlock = app.db.NewTransaction(true) - return abcitypes.ResponseBeginBlock{} -} -``` - -Next, let's modify `DeliverTx` to add the `key` and `value` to the database transaction every time our application -receives a new application transaction through `RequestDeliverTx`. - -```go -func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { - if code := app.isValid(req.Tx); code != 0 { - return abcitypes.ResponseDeliverTx{Code: code} - } +Note that, to implement these calls in our application we're going to make use of Badger's transaction mechanism. We will always refer to these as Badger transactions, not to confuse them with the transactions included in the blocks delivered by CometBFT, the _application transactions_. - parts := bytes.SplitN(req.Tx, []byte("="), 2) - key, value := parts[0], parts[1] +First, let's create a new Badger transaction during `FinalizeBlock`. All application transactions in the current block will be executed within this Badger transaction. +Next, let's modify `FinalizeBlock` to add the `key` and `value` to the Badger transaction every time our application processes a new application transaction from the list received through `RequestFinalizeBlock`. - if err := app.onGoingBlock.Set(key, value); err != nil { - log.Panicf("Error writing to database, unable to execute tx: %v", err) - } +Note that we check the validity of the transaction _again_ during `FinalizeBlock`. - return abcitypes.ResponseDeliverTx{Code: 0} +```go +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { + var txs = make([]*abcitypes.ExecTxResult, len(req.Txs)) + + app.onGoingBlock = app.db.NewTransaction(true) + for i, tx := range req.Txs { + if code := app.isValid(tx); code != 0 { + log.Printf("Error: invalid transaction index %v", i) + txs[i] = &abcitypes.ExecTxResult{Code: code} + } else { + parts := bytes.SplitN(tx, []byte("="), 2) + key, value := parts[0], parts[1] + log.Printf("Adding key %s with value %s", key, value) + + if err := app.onGoingBlock.Set(key, value); err != nil { + log.Panicf("Error writing to database, unable to execute tx: %v", err) + } + + log.Printf("Successfully added key %s with value %s", key, value) + + txs[i] = &abcitypes.ExecTxResult{} + } + } + + return &abcitypes.ResponseFinalizeBlock{ + TxResults: txs, + }, nil } ``` -Note that we check the validity of the transaction _again_ during `DeliverTx`. -Transactions are not guaranteed to be valid when they are delivered to an -application, even if they were valid when they were proposed. -This can happen if the application state is used to determine transaction -validity. Application state may have changed between the initial execution of `CheckTx` -and the transaction delivery in `DeliverTx` in a way that rendered the transaction -no longer valid. +Transactions are not guaranteed to be valid when they are delivered to an application, even if they were valid when they were proposed. + +This can happen if the application state is used to determine transaction validity. +The application state may have changed between the initial execution of `CheckTx` and the transaction delivery in `FinalizeBlock` in a way that rendered the transaction no longer valid. -`EndBlock` is called to inform the application that the full block has been delivered -and give the application a chance to perform any other computation needed, before the -effects of the transactions become permanent. +**Note** that `FinalizeBlock` cannot yet commit the Badger transaction we were building during the block execution. -Note that `EndBlock` **cannot** yet commit the Badger transaction we were building -in during `DeliverTx`. -Since other methods, such as `Query`, rely on a consistent view of the application's -state, the application should only update its state by committing the Badger transactions -when the full block has been delivered and the `Commit` method is invoked. +Other methods, such as `Query`, rely on a consistent view of the application's state, the application should only update its state by committing the Badger transactions when the full block has been delivered and the Commit method is invoked. The `Commit` method tells the application to make permanent the effects of the application transactions. @@ -395,11 +394,8 @@ Let's update the method to terminate the pending Badger transaction and persist the resulting state: ```go -func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { - if err := app.onGoingBlock.Commit(); err != nil { - log.Panicf("Error writing to database, unable to commit block: %v", err) - } - return abcitypes.ResponseCommit{Data: []byte{}} +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { + return &abcitypes.ResponseCommit{}, app.onGoingBlock.Commit() } ``` @@ -407,18 +403,20 @@ Finally, make sure to add the log library to the `import` stanza as well: ```go import ( - "bytes" - "log" + "bytes" + "log" - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` You may have noticed that the application we are writing will crash if it receives -an unexpected error from the Badger database during the `DeliverTx` or `Commit` methods. +an unexpected error from the Badger database during the `FinalizeBlock` or `Commit` methods. This is not an accident. If the application received an error from the database, there is no deterministic way for it to make progress so the only safe option is to terminate. +Once the application is restarted, the transactions in the block that failed execution will +be re-executed and should succeed if the Badger error was transient. ### 1.3.4 Query @@ -426,29 +424,29 @@ When a client tries to read some information from the `kvstore`, the request wil handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`: ```go -func (app *KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery { - resp := abcitypes.ResponseQuery{Key: req.Data} - - dbErr := app.db.View(func(txn *badger.Txn) error { - item, err := txn.Get(req.Data) - if err != nil { - if err != badger.ErrKeyNotFound { - return err - } - resp.Log = "key does not exist" - return nil - } - - return item.Value(func(val []byte) error { - resp.Log = "exists" - resp.Value = val - return nil - }) - }) - if dbErr != nil { - log.Panicf("Error reading database, unable to execute query: %v", dbErr) - } - return resp +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { + resp := abcitypes.ResponseQuery{Key: req.Data} + + dbErr := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(req.Data) + if err != nil { + if err != badger.ErrKeyNotFound { + return err + } + resp.Log = "key does not exist" + return nil + } + + return item.Value(func(val []byte) error { + resp.Log = "exists" + resp.Value = val + return nil + }) + }) + if dbErr != nil { + log.Panicf("Error reading database, unable to execute query: %v", dbErr) + } + return &resp, nil } ``` @@ -465,14 +463,15 @@ included in blocks, it groups some of these transactions and then gives the appl to modify the group by invoking `PrepareProposal`. The application is free to modify the group before returning from the call, as long as the resulting set -does not use more bytes than `RequestPrepareProposal.max_tx_bytes' +does not use more bytes than `RequestPrepareProposal.max_tx_bytes` For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. + In the following code, the application simply returns the unmodified group of transactions: ```go -func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal { - return abcitypes.ResponsePrepareProposal{Txs: proposal.Txs} +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { + return &abcitypes.ResponsePrepareProposal{Txs: proposal.Txs}, nil } ``` @@ -481,17 +480,19 @@ its blessing before voting to accept the proposal. This mechanism may be used for different reasons, for example to deal with blocks manipulated by malicious nodes, in which case the block should not be considered valid. + The following code simply accepts all proposals: ```go -func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal { - return abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT} +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { + return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil } ``` ## 1.4 Starting an application and a CometBFT instance in the same process -Now that we have the basic functionality of our application in place, let's put it all together inside of our main.go file. +Now that we have the basic functionality of our application in place, let's put +it all together inside of our `main.go` file. Change the contents of your `main.go` file to the following. @@ -499,103 +500,105 @@ Change the contents of your `main.go` file to the following. package main import ( - "flag" - "fmt" - "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/privval" - "github.com/cometbft/cometbft/proxy" - "log" - "os" - "os/signal" - "path/filepath" - "syscall" - - "github.com/dgraph-io/badger/v3" - "github.com/spf13/viper" - cfg "github.com/cometbft/cometbft/config" - cmtflags "github.com/cometbft/cometbft/libs/cli/flags" - cmtlog "github.com/cometbft/cometbft/libs/log" - nm "github.com/cometbft/cometbft/node" + "flag" + "fmt" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/privval" + "github.com/cometbft/cometbft/proxy" + "log" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/dgraph-io/badger/v3" + "github.com/spf13/viper" + cfg "github.com/cometbft/cometbft/config" + cmtflags "github.com/cometbft/cometbft/libs/cli/flags" + cmtlog "github.com/cometbft/cometbft/libs/log" + nm "github.com/cometbft/cometbft/node" ) var homeDir string func init() { - flag.StringVar(&homeDir, "cmt-home", "", "Path to the CometBFT config directory (if empty, uses $HOME/.cometbft)") + flag.StringVar(&homeDir, "cmt-home", "", "Path to the CometBFT config directory (if empty, uses $HOME/.cometbft)") } func main() { - flag.Parse() - if homeDir == "" { - homeDir = os.ExpandEnv("$HOME/.cometbft") - } - config := cfg.DefaultConfig() - - config.SetRoot(homeDir) - - viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) - if err := viper.ReadInConfig(); err != nil { - log.Fatalf("Reading config: %v", err) - } - if err := viper.Unmarshal(config); err != nil { - log.Fatalf("Decoding config: %v", err) - } - if err := config.ValidateBasic(); err != nil { - log.Fatalf("Invalid configuration data: %v", err) - } - - dbPath := filepath.Join(homeDir, "badger") - db, err := badger.Open(badger.DefaultOptions(dbPath)) - if err != nil { - log.Fatalf("Opening database: %v", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Printf("Closing database: %v", err) - } - }() - - app := NewKVStoreApplication(db) - - pv := privval.LoadFilePV( - config.PrivValidatorKeyFile(), - config.PrivValidatorStateFile(), - ) - - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) - if err != nil { - log.Fatalf("failed to load node's key: %v", err) - } - - logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) - logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) - if err != nil { - log.Fatalf("failed to parse log level: %v", err) - } - - node, err := nm.NewNode( - config, - pv, - nodeKey, - proxy.NewLocalClientCreator(app), - nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, - nm.DefaultMetricsProvider(config.Instrumentation), - logger) - - if err != nil { - log.Fatalf("Creating node: %v", err) - } - - node.Start() - defer func() { - node.Stop() - node.Wait() - }() - - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - <-c + flag.Parse() + if homeDir == "" { + homeDir = os.ExpandEnv("$HOME/.cometbft") + } + + config := cfg.DefaultConfig() + config.SetRoot(homeDir) + viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) + + if err := viper.ReadInConfig(); err != nil { + log.Fatalf("Reading config: %v", err) + } + if err := viper.Unmarshal(config); err != nil { + log.Fatalf("Decoding config: %v", err) + } + if err := config.ValidateBasic(); err != nil { + log.Fatalf("Invalid configuration data: %v", err) + } + dbPath := filepath.Join(homeDir, "badger") + db, err := badger.Open(badger.DefaultOptions(dbPath)) + + if err != nil { + log.Fatalf("Opening database: %v", err) + } + defer func() { + if err := db.Close(); err != nil { + log.Printf("Closing database: %v", err) + } + }() + + app := NewKVStoreApplication(db) + + pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), + ) + + nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + if err != nil { + log.Fatalf("failed to load node's key: %v", err) + } + + logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) + logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) + + if err != nil { + log.Fatalf("failed to parse log level: %v", err) + } + + node, err := nm.NewNode( + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + cfg.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger, + ) + + if err != nil { + log.Fatalf("Creating node: %v", err) + } + + node.Start() + defer func() { + node.Stop() + node.Wait() + }() + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c } ``` @@ -603,39 +606,38 @@ This is a huge blob of code, so let's break it down into pieces. First, we use [viper](https://github.com/spf13/viper) to load the CometBFT configuration files, which we will generate later: - ```go - config := cfg.DefaultValidatorConfig() +config := cfg.DefaultValidatorConfig() - config.SetRoot(homeDir) +config.SetRoot(homeDir) - viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) - if err := viper.ReadInConfig(); err != nil { - log.Fatalf("Reading config: %v", err) - } - if err := viper.Unmarshal(config); err != nil { - log.Fatalf("Decoding config: %v", err) - } - if err := config.ValidateBasic(); err != nil { - log.Fatalf("Invalid configuration data: %v", err) - } +viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) +if err := viper.ReadInConfig(); err != nil { + log.Fatalf("Reading config: %v", err) +} +if err := viper.Unmarshal(config); err != nil { + log.Fatalf("Decoding config: %v", err) +} +if err := config.ValidateBasic(); err != nil { + log.Fatalf("Invalid configuration data: %v", err) +} ``` Next, we initialize the Badger database and create an app instance. ```go - dbPath := filepath.Join(homeDir, "badger") - db, err := badger.Open(badger.DefaultOptions(dbPath)) - if err != nil { - log.Fatalf("Opening database: %v", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Fatalf("Closing database: %v", err) - } - }() +dbPath := filepath.Join(homeDir, "badger") +db, err := badger.Open(badger.DefaultOptions(dbPath)) +if err != nil { + log.Fatalf("Opening database: %v", err) +} +defer func() { + if err := db.Close(); err != nil { + log.Fatalf("Closing database: %v", err) + } +}() - app := NewKVStoreApplication(db) +app := NewKVStoreApplication(db) ``` We use `FilePV`, which is a private validator (i.e. thing which signs consensus @@ -643,19 +645,19 @@ messages). Normally, you would use `SignerRemote` to connect to an external [HSM](https://kb.certus.one/hsm.html). ```go - pv := privval.LoadFilePV( - config.PrivValidatorKeyFile(), - config.PrivValidatorStateFile(), - ) +pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), +) ``` `nodeKey` is needed to identify the node in a p2p network. ```go - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) - if err != nil { - return nil, fmt.Errorf("failed to load node's key: %w", err) - } +nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) +if err != nil { + return nil, fmt.Errorf("failed to load node's key: %w", err) +} ``` Now we have everything set up to run the CometBFT node. We construct @@ -663,57 +665,57 @@ a node by passing it the configuration, the logger, a handle to our application the genesis information: ```go - node, err := nm.NewNode( - config, - pv, - nodeKey, - proxy.NewLocalClientCreator(app), - nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, - nm.DefaultMetricsProvider(config.Instrumentation), - logger) - - if err != nil { - log.Fatalf("Creating node: %v", err) - } +node, err := nm.NewNode( + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + cfg.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger) + +if err != nil { + log.Fatalf("Creating node: %v", err) +} ``` Finally, we start the node, i.e., the CometBFT service inside our application: ```go - node.Start() - defer func() { - node.Stop() - node.Wait() - }() +node.Start() +defer func() { + node.Stop() + node.Wait() +}() ``` The additional logic at the end of the file allows the program to catch SIGTERM. This means that the node can shut down gracefully when an operator tries to kill the program: ```go - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - <-c +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt, syscall.SIGTERM) +<-c ``` ## 1.5 Initializing and Running Our application is almost ready to run, but first we'll need to populate the CometBFT configuration files. The following command will create a `cometbft-home` directory in your project and add a basic set of configuration files in `cometbft-home/config/`. -For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.37.x/docs/core/configuration.md). +For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/configuration.md). From the root of your project, run: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.37.0 init --home /tmp/cometbft-home +go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 init --home /tmp/cometbft-home ``` You should see an output similar to the following: ```bash -I[2022-11-09|09:06:34.444] Generated private validator module=main keyFile=/tmp/cometbft-home/config/priv_validator_key.json stateFile=/tmp/cometbft-home/data/priv_validator_state.json -I[2022-11-09|09:06:34.444] Generated node key module=main path=/tmp/cometbft-home/config/node_key.json -I[2022-11-09|09:06:34.444] Generated genesis file module=main path=/tmp/cometbft-home/config/genesis.json +I[2023-25-04|09:06:34.444] Generated private validator module=main keyFile=/tmp/cometbft-home/config/priv_validator_key.json stateFile=/tmp/cometbft-home/data/priv_validator_state.json +I[2023-25-04|09:06:34.444] Generated node key module=main path=/tmp/cometbft-home/config/node_key.json +I[2023-25-04|09:06:34.444] Generated genesis file module=main path=/tmp/cometbft-home/config/genesis.json ``` Now rebuild the app: @@ -731,23 +733,23 @@ Everything is now in place to run your application. Run: The application will start and you should see a continuous output starting with: ```bash -badger 2022/11/09 09:08:50 INFO: All 0 tables opened in 0s -badger 2022/11/09 09:08:50 INFO: Discard stats nextEmptySlot: 0 -badger 2022/11/09 09:08:50 INFO: Set nextTxnTs to 0 -I[2022-11-09|09:08:50.085] service start module=proxy msg="Starting multiAppConn service" impl=multiAppConn -I[2022-11-09|09:08:50.085] service start module=abci-client connection=query msg="Starting localClient service" impl=localClient -I[2022-11-09|09:08:50.085] service start module=abci-client connection=snapshot msg="Starting localClient service" impl=localClient +badger 2023-04-25 09:08:50 INFO: All 0 tables opened in 0s +badger 2023-04-25 09:08:50 INFO: Discard stats nextEmptySlot: 0 +badger 2023-04-25 09:08:50 INFO: Set nextTxnTs to 0 +I[2023-04-25|09:08:50.085] service start module=proxy msg="Starting multiAppConn service" impl=multiAppConn +I[2023-04-25|09:08:50.085] service start module=abci-client connection=query msg="Starting localClient service" impl=localClient +I[2023-04-25|09:08:50.085] service start module=abci-client connection=snapshot msg="Starting localClient service" impl=localClient ... ``` -More importantly, the application using CometBFT is producing blocks 🎉🎉 and you can see this reflected in the log output in lines like this: +More importantly, the application using CometBFT is producing blocks 🎉🎉 and you can see this reflected in the log output in lines like this: ```bash -I[2022-11-09|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2022-11-09T12:08:52.143393Z}" -I[2022-11-09|09:08:52.152] received complete proposal block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C -I[2022-11-09|09:08:52.160] finalizing commit of block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C root= num_txs=0 -I[2022-11-09|09:08:52.167] executed block module=state height=2 num_valid_txs=0 num_invalid_txs=0 -I[2022-11-09|09:08:52.171] committed state module=state height=2 num_txs=0 app_hash= +I[2023-04-25|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2023-04-25T12:08:52.143393Z}" +I[2023-04-25|09:08:52.152] received complete proposal block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C +I[2023-04-25|09:08:52.160] finalizing commit of block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C root= num_txs=0 +I[2023-04-25|09:08:52.167] executed block module=state height=2 num_valid_txs=0 num_invalid_txs=0 +I[2023-04-25|09:08:52.171] committed state module=state height=2 num_txs=0 app_hash= ``` The blocks, as you can see from the `num_valid_txs=0` part, are empty, but let's remedy that next. @@ -757,7 +759,6 @@ The blocks, as you can see from the `num_valid_txs=0` part, are empty, but let's Let's try submitting a transaction to our new application. Open another terminal window and run the following curl command: - ```bash curl -s 'localhost:26657/broadcast_tx_commit?tx="cometbft=rocks"' ``` @@ -777,8 +778,8 @@ The request returns a `json` object with a `key` and `value` field set. ```json ... - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", ... ``` @@ -789,11 +790,9 @@ The response contains a `base64` encoded representation of the data we submitted To get the original value out of this data, we can use the `base64` command line utility: ```bash -echo cm9ja3M=" | base64 -d +echo "cm9ja3M=" | base64 -d ``` ## Outro -I hope everything went smoothly and your first, but hopefully not the last, -CometBFT application is up and running. If not, please [open an issue on -Github](https://github.com/cometbft/cometbft/issues/new/choose). +Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/interchain) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. diff --git a/docs/guides/go.md b/docs/guides/go.md index 3edb91691e2..3d82e10f299 100644 --- a/docs/guides/go.md +++ b/docs/guides/go.md @@ -46,7 +46,7 @@ Verify that you have the latest version of Go installed (refer to the [official ```bash $ go version -go version go1.20.1 darwin/amd64 +go version go1.22.7 darwin/amd64 ``` ## 1.2 Creating a new Go project @@ -81,41 +81,48 @@ Hello, CometBFT We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management, so let's start by including a dependency on the latest version of -CometBFT, `v0.37.0` in this example. +CometBFT, `v0.38.0` in this example. ```bash go mod init kvstore -go get github.com/cometbft/cometbft@v0.37.0 +go get github.com/cometbft/cometbft@v0.38.0 ``` After running the above commands you will see two generated files, `go.mod` and `go.sum`. The go.mod file should look similar to: ```go -module github.com/me/example +module kvstore -go 1.20 +go 1.22 require ( - github.com/cometbft/cometbft v0.37.0 +github.com/cometbft/cometbft v0.38.0 ) ``` +XXX: CometBFT `v0.38.0` uses a slightly outdated `gogoproto` library, which +may fail to compile with newer Go versions. To avoid any compilation errors, +upgrade `gogoproto` manually: + +```bash +go get github.com/cosmos/gogoproto@v1.4.11 +``` + As you write the kvstore application, you can rebuild the binary by pulling any new dependencies and recompiling it. -```sh +```bash go get go build ``` - ## 1.3 Writing a CometBFT application CometBFT communicates with the application through the Application BlockChain Interface (ABCI). The messages exchanged through the interface are defined in the ABCI [protobuf -file](https://github.com/cometbft/cometbft/blob/v0.37.x/proto/tendermint/abci/types.proto). +file](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). We begin by creating the basic scaffolding for an ABCI application by creating a new type, `KVStoreApplication`, which implements the @@ -127,7 +134,8 @@ Create a file called `app.go` with the following contents: package main import ( - abcitypes "github.com/cometbft/cometbft/abci/types" + abcitypes "github.com/cometbft/cometbft/abci/types" + "context" ) type KVStoreApplication struct{} @@ -135,63 +143,64 @@ type KVStoreApplication struct{} var _ abcitypes.Application = (*KVStoreApplication)(nil) func NewKVStoreApplication() *KVStoreApplication { - return &KVStoreApplication{} + return &KVStoreApplication{} } -func (app *KVStoreApplication) Info(info abcitypes.RequestInfo) abcitypes.ResponseInfo { - return abcitypes.ResponseInfo{} +func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.RequestInfo) (*abcitypes.ResponseInfo, error) { + return &abcitypes.ResponseInfo{}, nil } -func (app *KVStoreApplication) Query(query abcitypes.RequestQuery) abcitypes.ResponseQuery { - return abcitypes.ResponseQuery{} +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { + return &abcitypes.ResponseQuery{}, nil } -func (app *KVStoreApplication) CheckTx(tx abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { - return abcitypes.ResponseCheckTx{} +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { + return &abcitypes.ResponseCheckTx{Code: code}, nil } -func (app *KVStoreApplication) InitChain(chain abcitypes.RequestInitChain) abcitypes.ResponseInitChain { - return abcitypes.ResponseInitChain{} +func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) { + return &abcitypes.ResponseInitChain{}, nil } -func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal { - return abcitypes.ResponsePrepareProposal{} +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { + return &abcitypes.ResponsePrepareProposal{}, nil } -func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal { - return abcitypes.ResponseProcessProposal{} +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { + return &abcitypes.ResponseProcessProposal{}, nil } -func (app *KVStoreApplication) BeginBlock(block abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { - return abcitypes.ResponseBeginBlock{} +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { + return &abcitypes.ResponseFinalizeBlock{}, nil } -func (app *KVStoreApplication) DeliverTx(tx abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { - return abcitypes.ResponseDeliverTx{} +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { + return &abcitypes.ResponseCommit{}, nil } -func (app *KVStoreApplication) EndBlock(block abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { - return abcitypes.ResponseEndBlock{} +func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.RequestListSnapshots) (*abcitypes.ResponseListSnapshots, error) { + return &abcitypes.ResponseListSnapshots{}, nil } -func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { - return abcitypes.ResponseCommit{} +func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.RequestOfferSnapshot) (*abcitypes.ResponseOfferSnapshot, error) { + return &abcitypes.ResponseOfferSnapshot{}, nil } -func (app *KVStoreApplication) ListSnapshots(snapshots abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots { - return abcitypes.ResponseListSnapshots{} +func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.RequestLoadSnapshotChunk) (*abcitypes.ResponseLoadSnapshotChunk, error) { + return &abcitypes.ResponseLoadSnapshotChunk{}, nil } -func (app *KVStoreApplication) OfferSnapshot(snapshot abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot { - return abcitypes.ResponseOfferSnapshot{} +func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.RequestApplySnapshotChunk) (*abcitypes.ResponseApplySnapshotChunk, error) { + + return &abcitypes.ResponseApplySnapshotChunk{Result: abcitypes.ResponseApplySnapshotChunk_ACCEPT}, nil } -func (app *KVStoreApplication) LoadSnapshotChunk(chunk abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk { - return abcitypes.ResponseLoadSnapshotChunk{} +func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) { + return &abcitypes.ResponseExtendVote{}, nil } -func (app *KVStoreApplication) ApplySnapshotChunk(chunk abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk { - return abcitypes.ResponseApplySnapshotChunk{} +func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) { + return &abcitypes.ResponseVerifyVoteExtension{}, nil } ``` @@ -199,7 +208,7 @@ The types used here are defined in the CometBFT library and were added as a depe to the project when you ran `go get`. If your IDE is not recognizing the types, go ahead and run the command again. ```bash -go get github.com/cometbft/cometbft@v0.37.0 +go get github.com/cometbft/cometbft@v0.38.0 ``` Now go back to the `main.go` and modify the `main` function so it matches the following, @@ -218,7 +227,6 @@ not do anything. So let's revisit the code adding the logic needed to implement our minimal key/value store and to start it along with the CometBFT Service. - ### 1.3.1 Add a persistent data store Our application will need to write its state out to persistent storage so that it @@ -235,14 +243,14 @@ Next, let's update the application and its constructor to receive a handle to th ```go type KVStoreApplication struct { - db *badger.DB - onGoingBlock *badger.Txn + db *badger.DB + onGoingBlock *badger.Txn } var _ abcitypes.Application = (*KVStoreApplication)(nil) func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{db: db} + return &KVStoreApplication{db: db} } ``` @@ -253,15 +261,15 @@ Next, update the `import` stanza at the top to include the Badger library: ```go import( - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` Finally, update the `main.go` file to invoke the updated constructor: ```go - _ = NewKVStoreApplication(nil) +_ = NewKVStoreApplication(nil) ``` ### 1.3.2 CheckTx @@ -277,22 +285,21 @@ For that, let's add the following helper method to app.go: ```go func (app *KVStoreApplication) isValid(tx []byte) uint32 { - // check format - parts := bytes.Split(tx, []byte("=")) - if len(parts) != 2 { - return 1 - } - - return 0 + // check format + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return 1 + } + return 0 } ``` Now you can rewrite the `CheckTx` method to use the helper function: ```go -func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { - code := app.isValid(req.Tx) - return abcitypes.ResponseCheckTx{Code: code} +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { + code := app.isValid(check.Tx) + return &abcitypes.ResponseCheckTx{Code: code}, nil } ``` @@ -311,95 +318,81 @@ information on why the transaction was rejected. Note that `CheckTx` does not execute the transaction, it only verifies that that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block. - Finally, make sure to add the bytes package to the `import` stanza at the top of `app.go`: ```go import( - "bytes" + "bytes" - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` - -### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 FinalizeBlock When the CometBFT consensus engine has decided on the block, the block is transferred to the -application over three ABCI method calls: `BeginBlock`, `DeliverTx`, and `EndBlock`. - -- `BeginBlock` is called once to indicate to the application that it is about to -receive a block. -- `DeliverTx` is called repeatedly, once for each application transaction that was included in the block. -- `EndBlock` is called once to indicate to the application that no more transactions -will be delivered to the application in within this block. +application via the `FinalizeBlock` method. +`FinalizeBlock` is an ABCI method introduced in CometBFT `v0.38.0`. This replaces the functionality provided previously (pre-`v0.38.0`) by the combination of ABCI methods `BeginBlock`, `DeliverTx`, and `EndBlock`. +`FinalizeBlock`'s parameters are an aggregation of those in `BeginBlock`, `DeliverTx`, and `EndBlock`. -Note that, to implement these calls in our application we're going to make use of Badger's -transaction mechanism. We will always refer to these as Badger transactions, not to -confuse them with the transactions included in the blocks delivered by CometBFT, -the _application transactions_. +This method is responsible for executing the block and returning a response to the consensus engine. +Providing a single `FinalizeBlock` method to signal the finalization of a block simplifies the ABCI interface and increases flexibility in the execution pipeline. -First, let's create a new Badger transaction during `BeginBlock`. All application transactions in the -current block will be executed within this Badger transaction. -Then, return informing CometBFT that the application is ready to receive application transactions: +The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `ResponseFinalizeBlock` object which contains any necessary information about the executed block. -```go -func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { - app.onGoingBlock = app.db.NewTransaction(true) - return abcitypes.ResponseBeginBlock{} -} -``` +**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage i.e. in `commit` phase. -Next, let's modify `DeliverTx` to add the `key` and `value` to the database transaction every time our application -receives a new application transaction through `RequestDeliverTx`. +Note that, to implement these calls in our application we're going to make use of Badger's transaction mechanism. We will always refer to these as Badger transactions, not to confuse them with the transactions included in the blocks delivered by CometBFT, the _application transactions_. -```go -func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { - if code := app.isValid(req.Tx); code != 0 { - return abcitypes.ResponseDeliverTx{Code: code} - } +First, let's create a new Badger transaction during `FinalizeBlock`. All application transactions in the current block will be executed within this Badger transaction. +Next, let's modify `FinalizeBlock` to add the `key` and `value` to the database transaction every time our application processes a new application transaction from the list received through `RequestFinalizeBlock`. - parts := bytes.SplitN(req.Tx, []byte("="), 2) - key, value := parts[0], parts[1] +Note that we check the validity of the transaction _again_ during `FinalizeBlock`. - if err := app.onGoingBlock.Set(key, value); err != nil { - log.Panicf("Error writing to database, unable to execute tx: %v", err) - } - - return abcitypes.ResponseDeliverTx{Code: 0} +```go +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { + var txs = make([]*abcitypes.ExecTxResult, len(req.Txs)) + + app.onGoingBlock = app.db.NewTransaction(true) + for i, tx := range req.Txs { + if code := app.isValid(tx); code != 0 { + log.Printf("Error in tx in if") + txs[i] = &abcitypes.ExecTxResult{Code: code} + } else { + parts := bytes.SplitN(tx, []byte("="), 2) + key, value := parts[0], parts[1] + log.Printf("Adding key %s with value %s", key, value) + + if err := app.onGoingBlock.Set(key, value); err != nil { + log.Panicf("Error writing to database, unable to execute tx: %v", err) + } + log.Printf("Successfully added key %s with value %s", key, value) + + txs[i] = &abcitypes.ExecTxResult{} + } + } + + return &abcitypes.ResponseFinalizeBlock{ + TxResults: txs, + }, nil } ``` -Note that we check the validity of the transaction _again_ during `DeliverTx`. -Transactions are not guaranteed to be valid when they are delivered to an -application, even if they were valid when they were proposed. -This can happen if the application state is used to determine transaction -validity. Application state may have changed between the initial execution of `CheckTx` -and the transaction delivery in `DeliverTx` in a way that rendered the transaction -no longer valid. +Transactions are not guaranteed to be valid when they are delivered to an application, even if they were valid when they were proposed. + +This can happen if the application state is used to determine transaction validity. The application state may have changed between the initial execution of `CheckTx` and the transaction delivery in `FinalizeBlock` in a way that rendered the transaction no longer valid. -`EndBlock` is called to inform the application that the full block has been delivered -and give the application a chance to perform any other computation needed, before the -effects of the transactions become permanent. +**Note** that `FinalizeBlock` cannot yet commit the Badger transaction we were building during the block execution. -Note that `EndBlock` **cannot** yet commit the Badger transaction we were building -in during `DeliverTx`. -Since other methods, such as `Query`, rely on a consistent view of the application's -state, the application should only update its state by committing the Badger transactions -when the full block has been delivered and the `Commit` method is invoked. +Other methods, such as `Query`, rely on a consistent view of the application's state, the application should only update its state by committing the Badger transactions when the full block has been delivered and the `Commit` method is invoked. -The `Commit` method tells the application to make permanent the effects of -the application transactions. -Let's update the method to terminate the pending Badger transaction and -persist the resulting state: +The `Commit` method tells the application to make permanent the effects of the application transactions. +Let's update the method to terminate the pending Badger transaction and persist the resulting state: ```go -func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { - if err := app.onGoingBlock.Commit(); err != nil { - log.Panicf("Error writing to database, unable to commit block: %v", err) - } - return abcitypes.ResponseCommit{Data: []byte{}} +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { + return &abcitypes.ResponseCommit{}, app.onGoingBlock.Commit() } ``` @@ -407,16 +400,16 @@ Finally, make sure to add the log library to the `import` stanza as well: ```go import ( - "bytes" - "log" + "bytes" + "log" - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v3" + abcitypes "github.com/cometbft/cometbft/abci/types" ) ``` You may have noticed that the application we are writing will crash if it receives -an unexpected error from the Badger database during the `DeliverTx` or `Commit` methods. +an unexpected error from the Badger database during the `FinalizeBlock` or `Commit` methods. This is not an accident. If the application received an error from the database, there is no deterministic way for it to make progress so the only safe option is to terminate. @@ -426,29 +419,29 @@ When a client tries to read some information from the `kvstore`, the request wil handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`: ```go -func (app *KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery { - resp := abcitypes.ResponseQuery{Key: req.Data} - - dbErr := app.db.View(func(txn *badger.Txn) error { - item, err := txn.Get(req.Data) - if err != nil { - if err != badger.ErrKeyNotFound { - return err - } - resp.Log = "key does not exist" - return nil - } - - return item.Value(func(val []byte) error { - resp.Log = "exists" - resp.Value = val - return nil - }) - }) - if dbErr != nil { - log.Panicf("Error reading database, unable to execute query: %v", dbErr) - } - return resp +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { + resp := abcitypes.ResponseQuery{Key: req.Data} + + dbErr := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(req.Data) + if err != nil { + if err != badger.ErrKeyNotFound { + return err + } + resp.Log = "key does not exist" + return nil + } + + return item.Value(func(val []byte) error { + resp.Log = "exists" + resp.Value = val + return nil + }) + }) + if dbErr != nil { + log.Panicf("Error reading database, unable to execute query: %v", dbErr) + } + return &resp, nil } ``` @@ -465,33 +458,36 @@ included in blocks, it groups some of these transactions and then gives the appl to modify the group by invoking `PrepareProposal`. The application is free to modify the group before returning from the call, as long as the resulting set -does not use more bytes than `RequestPrepareProposal.max_tx_bytes' +does not use more bytes than `RequestPrepareProposal.max_tx_bytes`. For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. + In the following code, the application simply returns the unmodified group of transactions: ```go -func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal { - return abcitypes.ResponsePrepareProposal{Txs: proposal.Txs} +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { + return &abcitypes.ResponsePrepareProposal{Txs: proposal.Txs}, nil } ``` -Once a proposed block is received by a node, the proposal is passed to the application to give -its blessing before voting to accept the proposal. +Once a proposed block is received by a node, the proposal is passed to the +application to determine its validity before voting to accept the proposal. This mechanism may be used for different reasons, for example to deal with blocks manipulated by malicious nodes, in which case the block should not be considered valid. + The following code simply accepts all proposals: ```go -func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal { - return abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT} +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { + return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil } ``` ## 1.4 Starting an application and a CometBFT instance -Now that we have the basic functionality of our application in place, let's put it all together inside of our `main.go` file. +Now that we have the basic functionality of our application in place, let's put +it all together inside of our `main.go` file. Change the contents of your `main.go` file to the following. @@ -499,60 +495,61 @@ Change the contents of your `main.go` file to the following. package main import ( - "flag" - "fmt" - abciserver "github.com/cometbft/cometbft/abci/server" - "log" - "os" - "os/signal" - "path/filepath" - "syscall" - - "github.com/dgraph-io/badger/v3" - cmtlog "github.com/cometbft/cometbft/libs/log" + "flag" + "fmt" + abciserver "github.com/cometbft/cometbft/abci/server" + "log" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/dgraph-io/badger/v3" + cmtlog "github.com/cometbft/cometbft/libs/log" ) var homeDir string var socketAddr string func init() { - flag.StringVar(&homeDir, "kv-home", "", "Path to the kvstore directory (if empty, uses $HOME/.kvstore)") - flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address (if empty, uses \"unix://example.sock\"") + flag.StringVar(&homeDir, "kv-home", "", "Path to the kvstore directory (if empty, uses $HOME/.kvstore)") + flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address (if empty, uses \"unix://example.sock\"") } func main() { - flag.Parse() - if homeDir == "" { - homeDir = os.ExpandEnv("$HOME/.kvstore") - } - - dbPath := filepath.Join(homeDir, "badger") - db, err := badger.Open(badger.DefaultOptions(dbPath)) - if err != nil { - log.Fatalf("Opening database: %v", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Fatalf("Closing database: %v", err) - } - }() - - app := NewKVStoreApplication(db) - - logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) - - server := abciserver.NewSocketServer(socketAddr, app) - server.SetLogger(logger) - - if err := server.Start(); err != nil { - fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) - os.Exit(1) - } - defer server.Stop() - - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - <-c + flag.Parse() + if homeDir == "" { + homeDir = os.ExpandEnv("$HOME/.kvstore") + } + + dbPath := filepath.Join(homeDir, "badger") + db, err := badger.Open(badger.DefaultOptions(dbPath)) + if err != nil { + log.Fatalf("Opening database: %v", err) + } + + defer func() { + if err := db.Close(); err != nil { + log.Fatalf("Closing database: %v", err) + } + }() + + app := NewKVStoreApplication(db) + logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) + + server := abciserver.NewSocketServer(socketAddr, app) + server.SetLogger(logger) + + if err := server.Start(); err != nil { + fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) + + os.Exit(1) + } + defer server.Stop() + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c } ``` @@ -561,18 +558,18 @@ This is a huge blob of code, so let's break it down into pieces. First, we initialize the Badger database and create an app instance: ```go - dbPath := filepath.Join(homeDir, "badger") - db, err := badger.Open(badger.DefaultOptions(dbPath)) - if err != nil { - log.Fatalf("Opening database: %v", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Fatalf("Closing database: %v", err) - } - }() +dbPath := filepath.Join(homeDir, "badger") +db, err := badger.Open(badger.DefaultOptions(dbPath)) +if err != nil { + log.Fatalf("Opening database: %v", err) +} +defer func() { + if err := db.Close(); err != nil { + log.Fatalf("Closing database: %v", err) + } +}() - app := NewKVStoreApplication(db) +app := NewKVStoreApplication(db) ``` Then we start the ABCI server and add some signal handling to gracefully stop @@ -580,38 +577,38 @@ it upon receiving SIGTERM or Ctrl-C. CometBFT will act as a client, which connects to our server and send us transactions and other messages. ```go - server := abciserver.NewSocketServer(socketAddr, app) - server.SetLogger(logger) +server := abciserver.NewSocketServer(socketAddr, app) +server.SetLogger(logger) - if err := server.Start(); err != nil { - fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) - os.Exit(1) - } - defer server.Stop() +if err := server.Start(); err != nil { + fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) + os.Exit(1) +} +defer server.Stop() - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - <-c +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt, syscall.SIGTERM) +<-c ``` ## 1.5 Initializing and Running Our application is almost ready to run, but first we'll need to populate the CometBFT configuration files. The following command will create a `cometbft-home` directory in your project and add a basic set of configuration files in `cometbft-home/config/`. -For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.37.x/docs/core/configuration.md). +For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/configuration.md). From the root of your project, run: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.37.0 init --home /tmp/cometbft-home +go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 init --home /tmp/cometbft-home ``` You should see an output similar to the following: ```bash -I[2022-11-09|09:06:34.444] Generated private validator module=main keyFile=/tmp/cometbft-home/config/priv_validator_key.json stateFile=/tmp/cometbft-home/data/priv_validator_state.json -I[2022-11-09|09:06:34.444] Generated node key module=main path=/tmp/cometbft-home/config/node_key.json -I[2022-11-09|09:06:34.444] Generated genesis file module=main path=/tmp/cometbft-home/config/genesis.json +I[2023-04-25|09:06:34.444] Generated private validator module=main keyFile=/tmp/cometbft-home/config/priv_validator_key.json stateFile=/tmp/cometbft-home/data/priv_validator_state.json +I[2023-04-25|09:06:34.444] Generated node key module=main path=/tmp/cometbft-home/config/node_key.json +I[2023-04-25|09:06:34.444] Generated genesis file module=main path=/tmp/cometbft-home/config/genesis.json ``` Now rebuild the app: @@ -629,11 +626,11 @@ Everything is now in place to run your application. Run: The application will start and you should see an output similar to the following: ```bash -badger 2022/11/09 17:01:28 INFO: All 0 tables opened in 0s -badger 2022/11/09 17:01:28 INFO: Discard stats nextEmptySlot: 0 -badger 2022/11/09 17:01:28 INFO: Set nextTxnTs to 0 -I[2022-11-09|17:01:28.726] service start msg="Starting ABCIServer service" impl=ABCIServer -I[2022-11-09|17:01:28.726] Waiting for new connection... +badger 2023-04-25 17:01:28 INFO: All 0 tables opened in 0s +badger 2023-04-25 17:01:28 INFO: Discard stats nextEmptySlot: 0 +badger 2023-04-25 17:01:28 INFO: Set nextTxnTs to 0 +I[2023-04-25|17:01:28.726] service start msg="Starting ABCIServer service" impl=ABCIServer +I[2023-04-25|17:01:28.726] Waiting for new connection... ``` Then we need to start CometBFT service and point it to our application. @@ -641,29 +638,30 @@ Open a new terminal window and cd to the same folder where the app is running. Then execute the following command: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.37.0 node --home /tmp/cometbft-home --proxy_app=unix://example.sock +go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 node --home /tmp/cometbft-home --proxy_app=unix://example.sock ``` This should start the full node and connect to our ABCI application, which will be reflected in the application output. ```sh -I[2022-11-09|17:07:08.124] service start msg="Starting ABCIServer service" impl=ABCIServer -I[2022-11-09|17:07:08.124] Waiting for new connection... -I[2022-11-09|17:08:12.702] Accepted a new connection -I[2022-11-09|17:08:12.703] Waiting for new connection... -I[2022-11-09|17:08:12.703] Accepted a new connection -I[2022-11-09|17:08:12.703] Waiting for new connection... +I[2023-04-25|17:07:08.124] service start msg="Starting ABCIServer service" impl=ABCIServer +I[2023-04-25|17:07:08.124] Waiting for new connection... +I[2023-04-25|17:08:12.702] Accepted a new connection +I[2023-04-25|17:08:12.703] Waiting for new connection... +I[2023-04-25|17:08:12.703] Accepted a new connection +I[2023-04-25|17:08:12.703] Waiting for new connection... ``` Also, the application using CometBFT Core is producing blocks 🎉🎉 and you can see this reflected in the log output of the service in lines like this: ```bash -I[2022-11-09|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2022-11-09T12:08:52.143393Z}" -I[2022-11-09|09:08:52.152] received complete proposal block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C -I[2022-11-09|09:08:52.160] finalizing commit of block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C root= num_txs=0 -I[2022-11-09|09:08:52.167] executed block module=state height=2 num_valid_txs=0 num_invalid_txs=0 -I[2022-11-09|09:08:52.171] committed state module=state height=2 num_txs=0 app_hash= +I[2023-04-25|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2023-04-25T12:08:52.143393Z}" +I[2023-04-25|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2023-04-25T12:08:52.143393Z}" +I[2023-04-25|09:08:52.152] received complete proposal block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C +I[2023-04-25|09:08:52.160] finalizing commit of block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C root= num_txs=0 +I[2023-04-25|09:08:52.167] executed block module=state height=2 num_valid_txs=0 num_invalid_txs=0 +I[2023-04-25|09:08:52.171] committed state module=state height=2 num_txs=0 app_hash= ``` The blocks, as you can see from the `num_valid_txs=0` part, are empty, but let's remedy that next. @@ -673,7 +671,6 @@ The blocks, as you can see from the `num_valid_txs=0` part, are empty, but let's Let's try submitting a transaction to our new application. Open another terminal window and run the following curl command: - ```bash curl -s 'localhost:26657/broadcast_tx_commit?tx="cometbft=rocks"' ``` @@ -693,8 +690,8 @@ The request returns a `json` object with a `key` and `value` field set. ```json ... - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", ... ``` @@ -705,11 +702,9 @@ The response contains a `base64` encoded representation of the data we submitted To get the original value out of this data, we can use the `base64` command line utility: ```bash -echo cm9ja3M=" | base64 -d +echo "cm9ja3M=" | base64 -d ``` ## Outro -I hope everything went smoothly and your first, but hopefully not the last, -CometBFT application is up and running. If not, please [open an issue on -Github](https://github.com/cometbft/cometbft/issues/new/choose). +Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/interchain) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. diff --git a/docs/guides/install.md b/docs/guides/install.md index 366c0c90a27..c7eaff20a50 100644 --- a/docs/guides/install.md +++ b/docs/guides/install.md @@ -4,6 +4,20 @@ order: 3 # Install CometBFT +## From Go Package + +Install the latest version of CometBFT's Go package: + +```sh +go install github.com/cometbft/cometbft/cmd/cometbft@latest +``` + +Install a specific version of CometBFT's Go package: + +```sh +go install github.com/cometbft/cometbft/cmd/cometbft@v0.38 +``` + ## From Binary To download pre-built binaries, see the [releases page](https://github.com/cometbft/cometbft/releases). @@ -51,15 +65,6 @@ running: cometbft version ``` -## Run - -To start a one-node blockchain with a simple in-process application: - -```sh -cometbft init -cometbft node --proxy_app=kvstore -``` - ## Reinstall If you already have CometBFT installed, and you make updates, simply @@ -87,15 +92,15 @@ sudo apt install build-essential sudo apt-get install libsnappy-dev -wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ - tar -zxvf v1.20.tar.gz && \ - cd leveldb-1.20/ && \ +wget https://github.com/google/leveldb/archive/v1.23.tar.gz && \ + tar -zxvf v1.23.tar.gz && \ + cd leveldb-1.23/ && \ make && \ sudo cp -r out-static/lib* out-shared/lib* /usr/local/lib/ && \ cd include/ && \ sudo cp -r leveldb /usr/local/include/ && \ sudo ldconfig && \ - rm -f v1.20.tar.gz + rm -f v1.23.tar.gz ``` Set a database backend to `cleveldb`: diff --git a/docs/guides/quick-start.md b/docs/guides/quick-start.md index b0eecf25187..d5f1481710c 100644 --- a/docs/guides/quick-start.md +++ b/docs/guides/quick-start.md @@ -108,6 +108,46 @@ cometbft show_node_id --home ./mytestnet/node2 cometbft show_node_id --home ./mytestnet/node3 ``` +Here's a handy Bash script to compile the persistent peers string, which will +be needed for our next step: + +```bash +#!/bin/bash + +# Check if the required argument is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 ..." + exit 1 +fi + +# Command to run on each IP +BASE_COMMAND="cometbft show_node_id --home ./mytestnet/node" + +# Initialize an array to store results +PERSISTENT_PEERS="" + +# Iterate through provided IPs +for i in "${!@}"; do + IP="${!i}" + NODE_IDX=$((i - 1)) # Adjust for zero-based indexing + + echo "Getting ID of $IP (node $NODE_IDX)..." + + # Run the command on the current IP and capture the result + ID=$($BASE_COMMAND$NODE_IDX) + + # Store the result in the array + PERSISTENT_PEERS+="$ID@$IP:26656" + + # Add a comma if not the last IP + if [ $i -lt $# ]; then + PERSISTENT_PEERS+="," + fi +done + +echo "$PERSISTENT_PEERS" +``` + Finally, from each machine, run: ```sh diff --git a/docs/imgs/light_client_bisection_alg.png b/docs/imgs/light_client_bisection_alg.png index 2a12c7542e5..a960ee69f88 100644 Binary files a/docs/imgs/light_client_bisection_alg.png and b/docs/imgs/light_client_bisection_alg.png differ diff --git a/docs/imgs/sentry_layout.png b/docs/imgs/sentry_layout.png index 240abde18fa..7d7dff44d6d 100644 Binary files a/docs/imgs/sentry_layout.png and b/docs/imgs/sentry_layout.png differ diff --git a/docs/imgs/sentry_local_config.png b/docs/imgs/sentry_local_config.png index 050a6df2fac..4fdb2fe580a 100644 Binary files a/docs/imgs/sentry_local_config.png and b/docs/imgs/sentry_local_config.png differ diff --git a/docs/introduction/README.md b/docs/introduction/README.md index 1c2b5850b38..0eaf05f751e 100644 --- a/docs/introduction/README.md +++ b/docs/introduction/README.md @@ -1,5 +1,5 @@ --- -order: false +order: 1 parent: title: Introduction order: 1 @@ -126,7 +126,7 @@ consensus engine, and provides a particular application state. ## ABCI Overview The [Application BlockChain Interface -(ABCI)](https://github.com/cometbft/cometbft/tree/main/abci) +(ABCI)](https://github.com/cometbft/cometbft/tree/v0.38.x/abci) allows for Byzantine Fault Tolerant replication of applications written in any programming language. @@ -191,19 +191,18 @@ core to the application. The application replies with corresponding response messages. The messages are specified here: [ABCI Message -Types](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). +Types](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). -The **DeliverTx** message is the work horse of the application. Each -transaction in the blockchain is delivered with this message. The +The **FinalizeBlock** message is the work horse of the application. Each +transaction in the blockchain is finalized within this message. The application needs to validate each transaction received with the -**DeliverTx** message against the current state, application protocol, -and the cryptographic credentials of the transaction. A validated -transaction then needs to update the application state — by binding a -value into a key values store, or by updating the UTXO database, for -instance. - -The **CheckTx** message is similar to **DeliverTx**, but it's only for -validating transactions. CometBFT's mempool first checks the +**FinalizeBlock** message against the current state, application protocol, +and the cryptographic credentials of the transaction. FinalizeBlock only +prepares the update to be made and does not change the state of the application. +The state change is actually committed in a later stage i.e. in commit phase. + +The **CheckTx** message is used for validating transactions. +CometBFT's mempool first checks the validity of a transaction with **CheckTx**, and only relays valid transactions to its peers. For instance, an application may check an incrementing sequence number in the transaction and return an error upon @@ -220,10 +219,10 @@ lightweight clients, as Merkle-hash proofs can be verified by checking against the block hash, and that the block hash is signed by a quorum. There can be multiple ABCI socket connections to an application. -CometBFT creates three ABCI connections to the application; one -for the validation of transactions when broadcasting in the mempool, one -for the consensus engine to run block proposals, and one more for -querying the application state. +CometBFT creates four ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one for +the consensus engine to run block proposals, one for creating snapshots of the +application state, and one more for querying the application state. It's probably evident that application designers need to very carefully design their message handlers to create a blockchain that does anything diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md index 9d99ff65f35..08aa4fced45 100644 --- a/docs/networks/docker-compose.md +++ b/docs/networks/docker-compose.md @@ -8,7 +8,7 @@ With Docker Compose, you can spin up local testnets with a single command. ## Requirements -1. [Install CometBFT](../introduction/install.md) +1. [Install CometBFT](../guides/install.md) 2. [Install docker](https://docs.docker.com/engine/installation/) 3. [Install docker-compose](https://docs.docker.com/compose/install/) @@ -64,7 +64,7 @@ To change the number of validators / non-validators change the `localnet-start` ```makefile localnet-start: localnet-stop @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/cometbft:Z cometbft/localnode testnet --v 5 --n 3 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi - docker-compose up + docker compose up -d ``` The command now will generate config files for 5 validators and 3 @@ -96,7 +96,7 @@ rm -rf ./build/node* ## Configuring ABCI containers -To use your own ABCI applications with 4-node setup edit the [docker-compose.yaml](https://github.com/cometbft/cometbft/blob/main/docker-compose.yml) file and add images to your ABCI application. +To use your own ABCI applications with 4-node setup edit the [docker-compose.yaml](https://github.com/cometbft/cometbft/blob/v0.38.x/docker-compose.yml) file and add images to your ABCI application. ```yml abci0: @@ -145,7 +145,7 @@ To use your own ABCI applications with 4-node setup edit the [docker-compose.yam ``` -Override the [command](https://github.com/cometbft/cometbft/blob/main/networks/local/localnode/Dockerfile#L11) in each node to connect to it's ABCI. +Override the [command](https://github.com/cometbft/cometbft/blob/v0.38.x/networks/local/localnode/Dockerfile#L11) in each node to connect to it's ABCI. ```yml node0: diff --git a/docs/qa/CometBFT-QA-37.md b/docs/qa/CometBFT-QA-37.md index 1717ecf3ecd..1181cf5d827 100644 --- a/docs/qa/CometBFT-QA-37.md +++ b/docs/qa/CometBFT-QA-37.md @@ -19,7 +19,7 @@ As in other iterations of our QA process, we have used a 200-node network as tes ### Saturation point As in previous iterations, in our QA experiments, the system is subjected to a load slightly under a saturation point. -The method to identify the saturation point is explained [here](CometBFT-QA-34.md#finding-the-saturation-point) and its application to the baseline is described [here](TMCore-QA-37.md#finding-the-saturation-point). +The method to identify the saturation point is explained [here](TMCore-QA-34.md#finding-the-saturation-point) and its application to the baseline is described [here](TMCore-QA-37.md#finding-the-saturation-point). We use the same saturation point, that is, `c`, the number of connections created by the load runner process to the target node, is 2 and `r`, the rate or number of transactions issued per second, is 200. ## Examining latencies diff --git a/docs/qa/CometBFT-QA-38.md b/docs/qa/CometBFT-QA-38.md new file mode 100644 index 00000000000..b44b386c9ae --- /dev/null +++ b/docs/qa/CometBFT-QA-38.md @@ -0,0 +1,556 @@ +--- +order: 1 +parent: + title: CometBFT QA Results v0.38.x + description: This is a report on the results obtained when running CometBFT v0.38.x on testnets + order: 5 +--- + +# CometBFT QA Results v0.38.x + +This iteration of the QA was run on CometBFT `v0.38.0-alpha.2`, the second +`v0.38.x` version from the CometBFT repository. + +The changes with respect to the baseline, `v0.37.0-alpha.3` from Feb 21, 2023, +include the introduction of the `FinalizeBlock` method to complete the full +range of ABCI++ functionality (ABCI 2.0), and other several improvements +described in the +[CHANGELOG](https://github.com/cometbft/cometbft/blob/v0.38.0-alpha.2/CHANGELOG.md). + +## Issues discovered + +* (critical, fixed) [\#539] and [\#546] - This bug causes the proposer to crash in + `PrepareProposal` because it does not have extensions while it should. + This happens mainly when the proposer was catching up. +* (critical, fixed) [\#562] - There were several bugs in the metrics-related + logic that were causing panics when the testnets were started. + +## 200 Node Testnet + +As in other iterations of our QA process, we have used a 200-node network as +testbed, plus nodes to introduce load and collect metrics. + +### Saturation point + +As in previous iterations of our QA experiments, we first find the transaction +load on which the system begins to show a degraded performance. Then we run the +experiments with the system subjected to a load slightly under the saturation +point. The method to identify the saturation point is explained +[here](CometBFT-QA-34.md#saturation-point) and its application to the baseline +is described [here](TMCore-QA-37.md#finding-the-saturation-point). + +The following table summarizes the results for the different experiments +(extracted from +[`v038_report_tabbed.txt`](img38/200nodes/v038_report_tabbed.txt)). The X axis +(`c`) is the number of connections created by the load runner process to the +target node. The Y axis (`r`) is the rate or number of transactions issued per +second. + +| | c=1 | c=2 | c=4 | +| ------ | --------: | --------: | ----: | +| r=200 | 17800 | **33259** | 33259 | +| r=400 | **35600** | 41565 | 41384 | +| r=800 | 36831 | 38686 | 40816 | +| r=1600 | 40600 | 45034 | 39830 | + +We can observe in the table that the system is saturated beyond the diagonal +defined by the entries `c=1,r=400` and `c=2,r=200`. Entries in the diagonal have +the same amount of transaction load, so we can consider them equivalent. For the +chosen diagonal, the expected number of processed transactions is `1 * 400 tx/s * 89 s = 35600`. +(Note that we use 89 out of 90 seconds of the experiment because the last transaction batch +coincides with the end of the experiment and is thus not sent.) The experiments in the diagonal +below expect double that number, that is, `1 * 800 tx/s * 89 s = 71200`, but the +system is not able to process such load, thus it is saturated. + +Therefore, for the rest of these experiments, we chose `c=1,r=400` as the +configuration. We could have chosen the equivalent `c=2,r=200`, which is the same +used in our baseline version, but for simplicity we decided to use the one with +only one connection. + +Also note that, compared to the previous QA tests, we have tried to find the +saturation point within a higher range of load values for the rate `r`. In +particular we run tests with `r` equal to or above `200`, while in the previous +tests `r` was `200` or lower. In particular, for our baseline version we didn't +run the experiment on the configuration `c=1,r=400`. + +For comparison, this is the table with the baseline version, where the +saturation point is beyond the diagonal defined by `r=200,c=2` and `r=100,c=4`. + +| | c=1 | c=2 | c=4 | +| ----- | ----: | --------: | --------: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | **35600** | +| r=200 | 17800 | **35600** | 38660 | + +### Latencies + +The following figure plots the latencies of the experiment carried out with the +configuration `c=1,r=400`. + +![latency-1-400](img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png). + +For reference, the following figure shows the latencies of one of the +experiments for `c=2,r=200` in the baseline. + +![latency-2-200-37](img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png) + +As can be seen, in most cases the latencies are very similar, and in some cases, +the baseline has slightly higher latencies than the version under test. Thus, +from this small experiment, we can say that the latencies measured on the two +versions are equivalent, or at least that the version under test is not worse +than the baseline. + +### Prometheus Metrics on the Chosen Experiment + +This section further examines key metrics for this experiment extracted from +Prometheus data regarding the chosen experiment with configuration `c=1,r=400`. + +#### Mempool Size + +The mempool size, a count of the number of transactions in the mempool, was +shown to be stable and homogeneous at all full nodes. It did not exhibit any +unconstrained growth. The plot below shows the evolution over time of the +cumulative number of transactions inside all full nodes' mempools at a given +time. + +![mempoool-cumulative](img38/200nodes/mempool_size.png) + +The following picture shows the evolution of the average mempool size over all +full nodes, which mostly oscilates between 1000 and 2500 outstanding +transactions. + +![mempool-avg](img38/200nodes/avg_mempool_size.png) + +The peaks observed coincide with the moments when some nodes reached round 1 of +consensus (see below). + +The behavior is similar to the observed in the baseline, presented next. + +![mempool-cumulative-baseline](img37/200nodes_cmt037/mempool_size.png) + +![mempool-avg-baseline](img37/200nodes_cmt037/avg_mempool_size.png) + + +#### Peers + +The number of peers was stable at all nodes. It was higher for the seed nodes +(around 140) than for the rest (between 20 and 70 for most nodes). The red +dashed line denotes the average value. + +![peers](img38/200nodes/peers.png) + +Just as in the baseline, shown next, the fact that non-seed nodes reach more +than 50 peers is due to [\#9548]. + +![peers](img37/200nodes_cmt037/peers.png) + + +#### Consensus Rounds per Height + +Most heights took just one round, that is, round 0, but some nodes needed to +advance to round 1. + +![rounds](img38/200nodes/rounds.png) + +The following specific run of the baseline required some nodes to reach round 1. + +![rounds](img37/200nodes_cmt037/rounds.png) + + +#### Blocks Produced per Minute, Transactions Processed per Minute + +The following plot shows the rate in which blocks were created, from the point +of view of each node. That is, it shows when each node learned that a new block +had been agreed upon. + +![heights](img38/200nodes/block_rate.png) + +For most of the time when load was being applied to the system, most of the +nodes stayed around 20 blocks/minute. + +The spike to more than 100 blocks/minute is due to a slow node catching up. + +The baseline experienced a similar behavior. + +![heights-baseline](img37/200nodes_cmt037/block_rate.png) + +The collective spike on the right of the graph marks the end of the load +injection, when blocks become smaller (empty) and impose less strain on the +network. This behavior is reflected in the following graph, which shows the +number of transactions processed per minute. + +![total-txs](img38/200nodes/total_txs_rate.png) + +The following is the transaction processing rate of the baseline, which is +similar to above. + +![total-txs-baseline](img37/200nodes_cmt037/total_txs_rate.png) + + +#### Memory Resident Set Size + +The following graph shows the Resident Set Size of all monitored processes, with +maximum memory usage of 1.6GB, slightly lower than the baseline shown after. + +![rss](img38/200nodes/memory.png) + +A similar behavior was shown in the baseline, with even a slightly higher memory +usage. + +![rss](img37/200nodes_cmt037/memory.png) + +The memory of all processes went down as the load is removed, showing no signs +of unconstrained growth. + + +#### CPU utilization + +##### Comparison to baseline + +The best metric from Prometheus to gauge CPU utilization in a Unix machine is +`load1`, as it usually appears in the [output of +`top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). + +The load is contained below 5 on most nodes, as seen in the following graph. + +![load1](img38/200nodes/cpu.png) + +The baseline had a similar behavior. + +![load1-baseline](img37/200nodes_cmt037/cpu.png) + +##### Impact of vote extension signature verification + +It is important to notice that the baseline (`v0.37.x`) does not implement vote extensions, +whereas the version under test (`v0.38.0-alpha.2`) _does_ implement them, and they are +configured to be activated since height 1. +The e2e application used in these tests verifies all received vote extension signatures (up to 175) +twice per height: upon `PrepareProposal` (for sanity) and upon `ProcessProposal` (to demonstrate how +real applications can do it). + +The fact that there is no noticeable difference in the CPU utilization plots of +the baseline and `v0.38.0-alpha.2` means that re-verifying up 175 vote extension signatures twice +(besides the initial verification done by CometBFT when receiving them from the network) +has no performance impact in the current version of the system: the bottlenecks are elsewhere. +Thus, we should focus on optimizing other parts of the system: the ones that cause the current +bottlenecks (mempool gossip duplication, leaner proposal structure, optimized consensus gossip). + +### Test Results + +The comparison against the baseline results show that both scenarios had similar +numbers and are therefore equivalent. + +A conclusion of these tests is shown in the following table, along with the +commit versions used in the experiments. + +| Scenario | Date | Version | Result | +| -------- | ---------- | ---------------------------------------------------------- | ------ | +| 200-node | 2023-05-21 | v0.38.0-alpha.2 (1f524d12996204f8fd9d41aa5aca215f80f06f5e) | Pass | + + +## Rotating Node Testnet + +We use `c=1,r=400` as load, which can be considered a safe workload, as it was close to (but below) +the saturation point in the 200 node testnet. This testnet has less nodes (10 validators and 25 full nodes). + +Importantly, the baseline considered in this section is `v0.37.0-alpha.2` (Tendermint Core), +which is **different** from the one used in the [previous section](method.md#200-node-testnet). +The reason is that this testnet was not re-tested for `v0.37.0-alpha.3` (CometBFT), +since it was not deemed necessary. + +Unlike in the baseline tests, the version of CometBFT used for these tests is _not_ affected by [\#9539], +which was fixed right after having run rotating testnet for `v0.37`. +As a result, the load introduced in this iteration of the test is higher as transactions do not get rejected. + +### Latencies + +The plot of all latencies can be seen here. + +![rotating-all-latencies](img38/rotating/rotating_latencies.png) + +Which is similar to the baseline. + +![rotating-all-latencies](img37/200nodes_tm037/v037_rotating_latencies.png) + +The average increase of about 1 second with respect to the baseline is due to the higher +transaction load produced (remember the baseline was affected by [\#9539], whereby most transactions +produced were rejected by `CheckTx`). + +### Prometheus Metrics + +The set of metrics shown here roughly match those shown on the baseline (`v0.37`) for the same experiment. +We also show the baseline results for comparison. + +#### Blocks and Transactions per minute + +This following plot shows the blocks produced per minute. + +![rotating-heights](img38/rotating/rotating_block_rate.png) + +This is similar to the baseline, shown below. + +![rotating-heights-bl](img37/rotating/rotating_block_rate.png) + +The following plot shows only the heights reported by ephemeral nodes, both when they were blocksyncing +and when they were running consensus. +The second plot is the baseline plot for comparison. The baseline lacks the heights when the nodes were +blocksyncing as that metric was implemented afterwards. + +![rotating-heights-ephe](img38/rotating/rotating_eph_heights.png) + +![rotating-heights-ephe-bl](img37/rotating/rotating_eph_heights.png) + +We seen that heights follow a similar pattern in both plots: they grow in length as the experiment advances. + +The following plot shows the transactions processed per minute. + +![rotating-total-txs](img38/rotating/rotating_txs_rate.png) + +For comparison, this is the baseline plot. + +![rotating-total-txs-bl](img37/rotating/rotating_txs_rate.png) + +We can see the rate is much lower in the baseline plot. +The reason is that the baseline was affected by [\#9539], whereby `CheckTx` rejected most transactions +produced by the load runner. + +#### Peers + +The plot below shows the evolution of the number of peers throughout the experiment. + +![rotating-peers](img38/rotating/rotating_peers.png) + +This is the baseline plot, for comparison. + +![rotating-peers-bl](img37/rotating/rotating_peers.png) + +The plotted values and their evolution are comparable in both plots. + +For further details on these plots, see the [this section](./TMCore-QA-34.md#peers-1). + +#### Memory Resident Set Size + +The average Resident Set Size (RSS) over all processes is notably bigger on `v0.38.0-alpha.2` than on the baseline. +The reason for this is, again, the fact that `CheckTx` was rejecting most transactions submitted on the baseline +and therefore the overall transaction load was lower on the baseline. +This is consistent with the difference seen in the transaction rate plots +in the [previous section](#blocks-and-transactions-per-minute). + +![rotating-rss-avg](img38/rotating/rotating_avg_memory.png) + +![rotating-rss-avg-bl](img37/rotating/rotating_avg_memory.png) + +#### CPU utilization + +The plots show metric `load1` for all nodes for `v0.38.0-alpha.2` and for the baseline. + +![rotating-load1](img38/rotating/rotating_cpu.png) + +![rotating-load1-bl](img37/rotating/rotating_cpu.png) + +In both cases, it is contained under 5 most of the time, which is considered normal load. +The load seems to be more important on `v0.38.0-alpha.2` on average because of the bigger +number of transactions processed per minute as compared to the baseline. + +### Test Result + +| Scenario | Date | Version | Result | +| -------- | ---------- | ---------------------------------------------------------- | ------ | +| Rotating | 2023-05-23 | v0.38.0-alpha.2 (e9abb116e29beb830cf111b824c8e2174d538838) | Pass | + + + +## Vote Extensions Testbed + +In this testnet we evaluate the effect of varying the sizes of vote extensions added to pre-commit votes on the performance of CometBFT. +The test uses the Key/Value store in our [[end-to-end]] test framework, which has the following simplified flow: + +1. When validators send their pre-commit votes to a block of height $i$, they first extend the vote as they see fit in `ExtendVote`. +2. When a proposer for height $i+1$ creates a block to propose, in `PrepareProposal`, it prepends the transactions with a special transaction, which modifies a reserved key. The transaction value is derived from the extensions from height $i$; in this example, the value is derived from the vote extensions and includes the set itself, hexa encoded as string. +3. When a validator sends their pre-vote for the block proposed in $i+1$, they first double check in `ProcessProposal` that the special transaction in the block was properly built by the proposer. +4. When validators send their pre-commit for the block proposed in $i+1$, they first extend the vote, and the steps repeat for heights $i+2$ and so on. + +For this test, extensions are random sequences of bytes with a predefined `vote_extension_size`. +Hence, two effects are seen on the network. +First, pre-commit vote message sizes will increase by the specified `vote_extension_size` and, second, block messages will increase by twice `vote_extension_size`, given then hexa encoding of extensions, times the number of extensions received, i.e. at least 2/3 of 175. + +All tests were performed on commit d5baba237ab3a04c1fd4a7b10927ba2e6a2aab27, which corresponds to v0.38.0-alpha.2 plus commits to add the ability to vary the vote extension sizes to the test application. +Although the same commit is used for the baseline, in this configuration the behavior observed is the same as in the "vanilla" v0.38.0-alpha.2 test application, that is, vote extensions are 8-byte integers, compressed as variable size integers instead of a random sequence of size `vote_extension_size`. + +The following table summarizes the test cases. + +| Name | Extension Size (bytes) | Date | +| -------- | ---------------------- | ---------- | +| baseline | 8 (varint) | 2023-05-26 | +| 2k | 2048 | 2023-05-29 | +| 4k | 4094 | 2023-05-29 | +| 8k | 8192 | 2023-05-26 | +| 16k | 16384 | 2023-05-26 | +| 32k | 32768 | 2023-05-26 | + + +### Latency + +The following figures show the latencies observed on each of the 5 runs of each experiment; +the redline shows the average of each run. +It can be easily seen from these graphs that the larger the vote extension size, the more latency varies and the more common higher latencies become. +Even in the case of extensions of size 2k, the mean latency goes from below 5s to nearly 10s. + +**Baseline** + +![](img38/voteExtensions/all_experiments_baseline.png) + +**2k** + +![](img38/voteExtensions/all_experiments_2k.png) + +**4k** + +![](img38/voteExtensions/all_experiments_4k.png) + +**8k** + +![](img38/voteExtensions/all_experiments_8k.png) + +**16k** + +![](img38/voteExtensions/all_experiments_16k.png) + +**32k** + +![](img38/voteExtensions/all_experiments_32k.png) + +The following graphs combine all the runs of the same experiment. +They show that latency variation greatly increases with the increase of vote extensions. +In particular, for the 16k and 32k cases, the system goes through large gaps without transaction delivery. +As discussed later, this is the result of heights taking multiple rounds to finish and new transactions being held until the next block is agreed upon. + +| | | +| ---------------------------------------------------------- | ------------------------------------------------ | +| baseline ![](img38/voteExtensions/all_c1r400_baseline.png) | 2k ![](img38/voteExtensions/all_c1r400_2k.png) | +| 4k ![](img38/voteExtensions/all_c1r400_4k.png) | 8k ![](img38/voteExtensions/all_c1r400_8k.png) | +| 16k ![](img38/voteExtensions/all_c1r400_16k.png) | 32k ![](img38/voteExtensions/all_c1r400_32k.png) | + + +### Blocks and Transactions per minute + +The following plots show the blocks produced per minute and transactions processed per minute. +We have divided the presentation in an overview section, which shows the metrics for the whole experiment (five runs) and a detailed sample, which shows the metrics for the first of the five runs. +We repeat the approach for the other metrics as well. +The dashed red line shows the moving average over a 20s window. + +#### Overview + +It is clear from the overview plots that as the vote extension sizes increase, the rate of block creation decreases. +Although the rate of transaction processing also decreases, it does not seem to decrease as fast. + +| Experiment | Block creation rate | Transaction rate | +| ------------ | ----------------------------------------------------------- | ------------------------------------------------------------- | +| **baseline** | ![block rate](img38/voteExtensions/baseline_block_rate.png) | ![txs rate](img38/voteExtensions/baseline_total_txs_rate.png) | +| **2k** | ![block rate](img38/voteExtensions/02k_block_rate.png) | ![txs rate](img38/voteExtensions/02k_total_txs_rate.png) | +| **4k** | ![block rate](img38/voteExtensions/04k_block_rate.png) | ![txs rate](img38/voteExtensions/04k_total_txs_rate.png) | +| **8k** | ![block rate](img38/voteExtensions/8k_block_rate.png) | ![txs rate](img38/voteExtensions/08k_total_txs_rate.png) | +| **16k** | ![block rate](img38/voteExtensions/16k_block_rate.png) | ![txs rate](img38/voteExtensions/16k_total_txs_rate.png) | +| **32k** | ![block rate](img38/voteExtensions/32k_block_rate.png) | ![txs rate](img38/voteExtensions/32k_total_txs_rate.png) | + +#### First run + +| Experiment | Block creation rate | Transaction rate | +| ------------ | ------------------------------------------------------------- | --------------------------------------------------------------- | +| **baseline** | ![block rate](img38/voteExtensions/baseline_1_block_rate.png) | ![txs rate](img38/voteExtensions/baseline_1_total_txs_rate.png) | +| **2k** | ![block rate](img38/voteExtensions/02k_1_block_rate.png) | ![txs rate](img38/voteExtensions/02k_1_total_txs_rate.png) | +| **4k** | ![block rate](img38/voteExtensions/04k_1_block_rate.png) | ![txs rate](img38/voteExtensions/04k_1_total_txs_rate.png) | +| **8k** | ![block rate](img38/voteExtensions/08k_1_block_rate.png) | ![txs rate](img38/voteExtensions/08k_1_total_txs_rate.png) | +| **16k** | ![block rate](img38/voteExtensions/16k_1_block_rate.png) | ![txs rate](img38/voteExtensions/16k_1_total_txs_rate.png) | +| **32k** | ![block rate](img38/voteExtensions/32k_1_block_rate.png) | ![txs rate](img38/voteExtensions/32k_1_total_txs_rate.png) | + + +### Number of rounds + +The effect of vote extensions are also felt on the number of rounds needed to reach consensus. +The following graphs show the number of the highest round required to reach consensus during the whole experiment. + +In the baseline and low vote extension lengths, most blocks were agreed upon during round 0. +As the load increases, more and more rounds were required. +In the 32k case se see round 5 being reached frequently. + +| Experiment | Number of Rounds per block | +| ------------ | ------------------------------------------------------------- | +| **baseline** | ![number of rounds](img38/voteExtensions/baseline_rounds.png) | +| **2k** | ![number of rounds](img38/voteExtensions/02k_rounds.png) | +| **4k** | ![number of rounds](img38/voteExtensions/04k_rounds.png) | +| **8k** | ![number of rounds](img38/voteExtensions/08k_rounds.png) | +| **16k** | ![number of rounds](img38/voteExtensions/16k_rounds.png) | +| **32k** | ![number of rounds](img38/voteExtensions/32k_rounds.png) | + + +We conjecture that the reason is that the timeouts used are inadequate for the extra traffic in the network. + +### CPU + +The CPU usage reached the same peaks on all tests, but the following graphs show that with larger Vote Extensions, nodes take longer to reduce the CPU usage. +This could mean that a backlog of processing is forming during the execution of the tests with larger extensions. + + +| Experiment | CPU | +| ------------ | ----------------------------------------------------- | +| **baseline** | ![cpu-avg](img38/voteExtensions/baseline_avg_cpu.png) | +| **2k** | ![cpu-avg](img38/voteExtensions/02k_avg_cpu.png) | +| **4k** | ![cpu-avg](img38/voteExtensions/04k_avg_cpu.png) | +| **8k** | ![cpu-avg](img38/voteExtensions/08k_avg_cpu.png) | +| **16k** | ![cpu-avg](img38/voteExtensions/16k_avg_cpu.png) | +| **32k** | ![cpu-avg](img38/voteExtensions/32k_avg_cpu.png) | + +### Resident Memory + +The same conclusion reached for CPU usage may be drawn for the memory. +That is, that a backlog of work is formed during the tests and catching up (freeing of memory) happens after the test is done. + +A more worrying trend is that the bottom of the memory usage seems to increase in between runs. +We have investigated this in longer runs and confirmed that there is no such a trend. + + + +| Experiment | Resident Set Size | +| ------------ | -------------------------------------------------------- | +| **baseline** | ![rss-avg](img38/voteExtensions/baseline_avg_memory.png) | +| **2k** | ![rss-avg](img38/voteExtensions/02k_avg_memory.png) | +| **4k** | ![rss-avg](img38/voteExtensions/04k_avg_memory.png) | +| **8k** | ![rss-avg](img38/voteExtensions/08k_avg_memory.png) | +| **16k** | ![rss-avg](img38/voteExtensions/16k_avg_memory.png) | +| **32k** | ![rss-avg](img38/voteExtensions/32k_avg_memory.png) | + +### Mempool size + +This metric shows how many transactions are outstanding in the nodes' mempools. +Observe that in all runs, the average number of transactions in the mempool quickly drops to near zero between runs. + + +| Experiment | Resident Set Size | +| ------------ | ------------------------------------------------------------------ | +| **baseline** | ![mempool-avg](img38/voteExtensions/baseline_avg_mempool_size.png) | +| **2k** | ![mempool-avg](img38/voteExtensions/02k_avg_mempool_size.png) | +| **4k** | ![mempool-avg](img38/voteExtensions/04k_avg_mempool_size.png) | +| **8k** | ![mempool-avg](img38/voteExtensions/08k_avg_mempool_size.png) | +| **16k** | ![mempool-avg](img38/voteExtensions/16k_avg_mempool_size.png) | +| **32k** | ![mempool-avg](img38/voteExtensions/32k_avg_mempool_size.png) | + + + + + +### Results + +| Scenario | Date | Version | Result | +| -------- | ---------- | ------------------------------------------------------------------------------------- | ------ | +| VESize | 2023-05-23 | v0.38.0-alpha.2 + varying vote extensions (9fc711b6514f99b2dc0864fc703cb81214f01783) | N/A | + + + +[\#9539]: https://github.com/tendermint/tendermint/issues/9539 +[\#9548]: https://github.com/tendermint/tendermint/issues/9548 +[\#539]: https://github.com/cometbft/cometbft/issues/539 +[\#546]: https://github.com/cometbft/cometbft/issues/546 +[\#562]: https://github.com/cometbft/cometbft/issues/562 +[end-to-end]: https://github.com/cometbft/cometbft/tree/main/test/e2e diff --git a/docs/qa/README.md b/docs/qa/README.md index d59049074bc..a9b678f819a 100644 --- a/docs/qa/README.md +++ b/docs/qa/README.md @@ -3,7 +3,7 @@ order: 1 parent: title: CometBFT Quality Assurance description: This is a report on the process followed and results obtained when running v0.34.x on testnets - order: 2 + order: 6 --- # CometBFT Quality Assurance @@ -23,3 +23,4 @@ The following releases have undergone the Quality Assurance process, and the cor * [v0.34.x](CometBFT-QA-34.md) - Tested prior to releasing v0.34.27, using TM v0.34.x results as baseline. * [TM v0.37.x](TMCore-QA-37.md) - Tested prior to releasing TM v0.37.x, using TM v0.34.x results as baseline. * [v0.37.x](CometBFT-QA-37.md) - Tested on CometBFT v0.37.0-alpha3, using TM v0.37.x results as baseline. +* [v0.38.x](CometBFT-QA-38.md) - Tested on v0.38.0-alpha.2, using v0.37.x results as baseline. diff --git a/docs/qa/TMCore-QA-37.md b/docs/qa/TMCore-QA-37.md index edff57b0276..23dd2ed1f03 100644 --- a/docs/qa/TMCore-QA-37.md +++ b/docs/qa/TMCore-QA-37.md @@ -32,7 +32,7 @@ During this iteration of the QA process, the following issues were found: ### Finding the Saturation Point The first goal is to identify the saturation point and compare it with the baseline (v0.34.x). -For further details, see [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) +For further details, see [this paragraph](TMCore-QA-34.md#finding-the-saturation-point) in the baseline version. The following table summarizes the results for v0.37.x, for the different experiments @@ -63,7 +63,7 @@ The saturation point is beyond the diagonal: * `r=100,c=4` which is at the same place as the baseline. For more details on the saturation point, see -[this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) in the baseline version. +[this paragraph](TMCore-QA-34.md#finding-the-saturation-point) in the baseline version. The experiment chosen to examine Prometheus metrics is the same as in the baseline: **`r=200,c=2`**. @@ -211,7 +211,7 @@ Version: 1cf9d8e276afe8595cba960b51cd056514965fd1 We use the same load as in the baseline: `c=4,r=800`. Just as in the baseline tests, the version of CometBFT used for these tests is affected by #9539. -See this paragraph in the [baseline report](CometBFT-QA-34.md#rotating-node-testnet) for further details. +See this paragraph in the [baseline report](method.md#rotating-node-testnet) for further details. Finally, note that this setup allows for a fairer comparison between this version and the baseline. ### Latencies diff --git a/docs/qa/img37/rotating/rotating_avg_memory.png b/docs/qa/img37/rotating/rotating_avg_memory.png new file mode 100644 index 00000000000..7feb2e81261 Binary files /dev/null and b/docs/qa/img37/rotating/rotating_avg_memory.png differ diff --git a/docs/qa/img37/rotating/rotating_block_rate.png b/docs/qa/img37/rotating/rotating_block_rate.png new file mode 100644 index 00000000000..4bbc3c99941 Binary files /dev/null and b/docs/qa/img37/rotating/rotating_block_rate.png differ diff --git a/docs/qa/img37/rotating/rotating_cpu.png b/docs/qa/img37/rotating/rotating_cpu.png new file mode 100644 index 00000000000..ef4c7d30df2 Binary files /dev/null and b/docs/qa/img37/rotating/rotating_cpu.png differ diff --git a/docs/qa/img37/rotating/rotating_eph_heights.png b/docs/qa/img37/rotating/rotating_eph_heights.png new file mode 100644 index 00000000000..36850fb526c Binary files /dev/null and b/docs/qa/img37/rotating/rotating_eph_heights.png differ diff --git a/docs/qa/img37/rotating/rotating_peers.png b/docs/qa/img37/rotating/rotating_peers.png new file mode 100644 index 00000000000..c45a4d4d73b Binary files /dev/null and b/docs/qa/img37/rotating/rotating_peers.png differ diff --git a/docs/qa/img37/rotating/rotating_txs_rate.png b/docs/qa/img37/rotating/rotating_txs_rate.png new file mode 100644 index 00000000000..5462738e5b9 Binary files /dev/null and b/docs/qa/img37/rotating/rotating_txs_rate.png differ diff --git a/docs/qa/img38/200nodes/avg_mempool_size.png b/docs/qa/img38/200nodes/avg_mempool_size.png new file mode 100644 index 00000000000..36cd5f6c705 Binary files /dev/null and b/docs/qa/img38/200nodes/avg_mempool_size.png differ diff --git a/docs/qa/img38/200nodes/block_rate.png b/docs/qa/img38/200nodes/block_rate.png new file mode 100644 index 00000000000..b2042865d5e Binary files /dev/null and b/docs/qa/img38/200nodes/block_rate.png differ diff --git a/docs/qa/img38/200nodes/c1r400.png b/docs/qa/img38/200nodes/c1r400.png new file mode 100644 index 00000000000..0c27c144f84 Binary files /dev/null and b/docs/qa/img38/200nodes/c1r400.png differ diff --git a/docs/qa/img38/200nodes/cpu.png b/docs/qa/img38/200nodes/cpu.png new file mode 100644 index 00000000000..15f74aeb041 Binary files /dev/null and b/docs/qa/img38/200nodes/cpu.png differ diff --git a/docs/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png b/docs/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png new file mode 100644 index 00000000000..21f5cab6ea1 Binary files /dev/null and b/docs/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png differ diff --git a/docs/qa/img38/200nodes/memory.png b/docs/qa/img38/200nodes/memory.png new file mode 100644 index 00000000000..3e01c4ccd0b Binary files /dev/null and b/docs/qa/img38/200nodes/memory.png differ diff --git a/docs/qa/img38/200nodes/mempool_size.png b/docs/qa/img38/200nodes/mempool_size.png new file mode 100644 index 00000000000..6a897a84a15 Binary files /dev/null and b/docs/qa/img38/200nodes/mempool_size.png differ diff --git a/docs/qa/img38/200nodes/peers.png b/docs/qa/img38/200nodes/peers.png new file mode 100644 index 00000000000..87470051950 Binary files /dev/null and b/docs/qa/img38/200nodes/peers.png differ diff --git a/docs/qa/img38/200nodes/rounds.png b/docs/qa/img38/200nodes/rounds.png new file mode 100644 index 00000000000..5ac53cad869 Binary files /dev/null and b/docs/qa/img38/200nodes/rounds.png differ diff --git a/docs/qa/img38/200nodes/total_txs_rate.png b/docs/qa/img38/200nodes/total_txs_rate.png new file mode 100644 index 00000000000..ac7f5e686b9 Binary files /dev/null and b/docs/qa/img38/200nodes/total_txs_rate.png differ diff --git a/docs/qa/img38/200nodes/v038_report_tabbed.txt b/docs/qa/img38/200nodes/v038_report_tabbed.txt new file mode 100644 index 00000000000..c482aeac84c --- /dev/null +++ b/docs/qa/img38/200nodes/v038_report_tabbed.txt @@ -0,0 +1,40 @@ +Experiment ID: 93024f38-a008-443d-9aa7-9ac44c9fe15b Experiment ID: d65a486e-4712-41b5-9f41-97e491895d2e Experiment ID: 9c39184b-b8c7-46a2-bacb-40f9961fb7a1 + Connections: 1 Connections: 2 Connections: 4 + Rate: 200 Rate: 200 Rate: 200 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 17800 Total Valid Tx: 33259 Total Valid Tx: 33259 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 562.805076ms Minimum Latency: 894.026089ms Minimum Latency: 2.166875257s + Maximum Latency: 7.623963559s Maximum Latency: 16.941216187s Maximum Latency: 15.701598288s + Average Latency: 1.860012628s Average Latency: 4.033134276s Average Latency: 7.592412668s + Standard Deviation: 1.169158915s Standard Deviation: 3.427243686s Standard Deviation: 2.951797195s +Experiment ID: de676ecf-038e-443f-a26a-27915f29e312 Experiment ID: 39d571b8-f39b-4aec-bd6a-e94f28a42a63 Experiment ID: 5b855105-60b5-4c2d-ba5c-fdad0213765c + Connections: 1 Connections: 2 Connections: 4 + Rate: 400 Rate: 400 Rate: 400 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 35600 Total Valid Tx: 41565 Total Valid Tx: 41384 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 565.640641ms Minimum Latency: 1.650712046s Minimum Latency: 2.796290248s + Maximum Latency: 10.051316705s Maximum Latency: 15.897581951s Maximum Latency: 20.124431723s + Average Latency: 3.499369173s Average Latency: 8.635543807s Average Latency: 10.596146863s + Standard Deviation: 1.926805844s Standard Deviation: 2.535678364s Standard Deviation: 3.193742233s +Experiment ID: db10ca9e-6cf8-4dc9-9284-6e767e4b4346 Experiment ID: f57af87d-d342-41f7-a0eb-baa87a4b2257 Experiment ID: 32819ea0-1a59-41de-8aa6-b70f68697520 + Connections: 1 Connections: 2 Connections: 4 + Rate: 800 Rate: 800 Rate: 800 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 36831 Total Valid Tx: 38686 Total Valid Tx: 40816 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 1.203966853s Minimum Latency: 728.863446ms Minimum Latency: 1.559342549s + Maximum Latency: 21.411365818s Maximum Latency: 24.349050642s Maximum Latency: 25.791215028s + Average Latency: 9.213156739s Average Latency: 11.194994374s Average Latency: 11.950851892s + Standard Deviation: 4.909584729s Standard Deviation: 5.199186587s Standard Deviation: 4.315394253s +Experiment ID: 587762c4-3fd4-4799-9f3b-9e6971b353ba Experiment ID: 489b2623-a3e4-453f-a771-5d05e7de4a1f Experiment ID: 98605df2-3b16-46db-8675-2980bc84ea2b + Connections: 1 Connections: 2 Connections: 4 + Rate: 1600 Rate: 1600 Rate: 1600 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 40600 Total Valid Tx: 45034 Total Valid Tx: 39830 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 998.07523ms Minimum Latency: 1.43819209s Minimum Latency: 1.50664776s + Maximum Latency: 18.565312759s Maximum Latency: 17.098811297s Maximum Latency: 20.346885373s + Average Latency: 8.78128586s Average Latency: 8.957419021s Average Latency: 12.113245591s + Standard Deviation: 3.305897473s Standard Deviation: 2.734640455s Standard Deviation: 4.029854219s diff --git a/docs/qa/img38/rotating/rotating_avg_memory.png b/docs/qa/img38/rotating/rotating_avg_memory.png new file mode 100644 index 00000000000..43dadcabea0 Binary files /dev/null and b/docs/qa/img38/rotating/rotating_avg_memory.png differ diff --git a/docs/qa/img38/rotating/rotating_block_rate.png b/docs/qa/img38/rotating/rotating_block_rate.png new file mode 100644 index 00000000000..e627064a345 Binary files /dev/null and b/docs/qa/img38/rotating/rotating_block_rate.png differ diff --git a/docs/qa/img38/rotating/rotating_cpu.png b/docs/qa/img38/rotating/rotating_cpu.png new file mode 100644 index 00000000000..f51403d409b Binary files /dev/null and b/docs/qa/img38/rotating/rotating_cpu.png differ diff --git a/docs/qa/img38/rotating/rotating_eph_heights.png b/docs/qa/img38/rotating/rotating_eph_heights.png new file mode 100644 index 00000000000..6c8e08eeee8 Binary files /dev/null and b/docs/qa/img38/rotating/rotating_eph_heights.png differ diff --git a/docs/qa/img38/rotating/rotating_latencies.png b/docs/qa/img38/rotating/rotating_latencies.png new file mode 100644 index 00000000000..8031d7cda80 Binary files /dev/null and b/docs/qa/img38/rotating/rotating_latencies.png differ diff --git a/docs/qa/img38/rotating/rotating_peers.png b/docs/qa/img38/rotating/rotating_peers.png new file mode 100644 index 00000000000..b0ac6a2b08d Binary files /dev/null and b/docs/qa/img38/rotating/rotating_peers.png differ diff --git a/docs/qa/img38/rotating/rotating_txs_rate.png b/docs/qa/img38/rotating/rotating_txs_rate.png new file mode 100644 index 00000000000..d3ae71413ac Binary files /dev/null and b/docs/qa/img38/rotating/rotating_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/02k_1_block_rate.png b/docs/qa/img38/voteExtensions/02k_1_block_rate.png new file mode 100644 index 00000000000..ff314bd7ba5 Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/02k_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/02k_1_total_txs_rate.png new file mode 100644 index 00000000000..67a0cbb582b Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/02k_avg_cpu.png b/docs/qa/img38/voteExtensions/02k_avg_cpu.png new file mode 100644 index 00000000000..2eb9683ec4c Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/02k_avg_memory.png b/docs/qa/img38/voteExtensions/02k_avg_memory.png new file mode 100644 index 00000000000..955d1136294 Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/02k_avg_mempool_size.png b/docs/qa/img38/voteExtensions/02k_avg_mempool_size.png new file mode 100644 index 00000000000..426867db67b Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/02k_block_rate.png b/docs/qa/img38/voteExtensions/02k_block_rate.png new file mode 100644 index 00000000000..0a53ccf7886 Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/02k_rounds.png b/docs/qa/img38/voteExtensions/02k_rounds.png new file mode 100644 index 00000000000..b932e80d423 Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/02k_total_txs_rate.png b/docs/qa/img38/voteExtensions/02k_total_txs_rate.png new file mode 100644 index 00000000000..c7040fb86b8 Binary files /dev/null and b/docs/qa/img38/voteExtensions/02k_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/04k_1_block_rate.png b/docs/qa/img38/voteExtensions/04k_1_block_rate.png new file mode 100644 index 00000000000..9f9564a6fd9 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/04k_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/04k_1_total_txs_rate.png new file mode 100644 index 00000000000..e69096d7e47 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/04k_avg_cpu.png b/docs/qa/img38/voteExtensions/04k_avg_cpu.png new file mode 100644 index 00000000000..be251929236 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/04k_avg_memory.png b/docs/qa/img38/voteExtensions/04k_avg_memory.png new file mode 100644 index 00000000000..50503a3abc0 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/04k_avg_mempool_size.png b/docs/qa/img38/voteExtensions/04k_avg_mempool_size.png new file mode 100644 index 00000000000..3e3eea8ed55 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/04k_block_rate.png b/docs/qa/img38/voteExtensions/04k_block_rate.png new file mode 100644 index 00000000000..f0bd5c2a1ba Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/04k_rounds.png b/docs/qa/img38/voteExtensions/04k_rounds.png new file mode 100644 index 00000000000..64bb878f767 Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/04k_total_txs_rate.png b/docs/qa/img38/voteExtensions/04k_total_txs_rate.png new file mode 100644 index 00000000000..be5ab70aefe Binary files /dev/null and b/docs/qa/img38/voteExtensions/04k_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/08k_1_avg_mempool_size.png b/docs/qa/img38/voteExtensions/08k_1_avg_mempool_size.png new file mode 100644 index 00000000000..00cc236c365 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_1_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/08k_1_block_rate.png b/docs/qa/img38/voteExtensions/08k_1_block_rate.png new file mode 100644 index 00000000000..9caa120f734 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/08k_1_rounds.png b/docs/qa/img38/voteExtensions/08k_1_rounds.png new file mode 100644 index 00000000000..809a97eed94 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_1_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/08k_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/08k_1_total_txs_rate.png new file mode 100644 index 00000000000..ce1c5c7d8cb Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/08k_avg_cpu.png b/docs/qa/img38/voteExtensions/08k_avg_cpu.png new file mode 100644 index 00000000000..c78af4f2960 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/08k_avg_memory.png b/docs/qa/img38/voteExtensions/08k_avg_memory.png new file mode 100644 index 00000000000..cd36d056262 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/08k_avg_mempool_size.png b/docs/qa/img38/voteExtensions/08k_avg_mempool_size.png new file mode 100644 index 00000000000..dd852e9bb8f Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/08k_rounds.png b/docs/qa/img38/voteExtensions/08k_rounds.png new file mode 100644 index 00000000000..0bd983039d6 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/08k_total_txs_rate.png b/docs/qa/img38/voteExtensions/08k_total_txs_rate.png new file mode 100644 index 00000000000..87cb6e4bad9 Binary files /dev/null and b/docs/qa/img38/voteExtensions/08k_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/16k_1_avg_mempool_size.png b/docs/qa/img38/voteExtensions/16k_1_avg_mempool_size.png new file mode 100644 index 00000000000..3eb5b73d3f1 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_1_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/16k_1_block_rate.png b/docs/qa/img38/voteExtensions/16k_1_block_rate.png new file mode 100644 index 00000000000..12025af8c8f Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/16k_1_rounds.png b/docs/qa/img38/voteExtensions/16k_1_rounds.png new file mode 100644 index 00000000000..15d6feb220d Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_1_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/16k_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/16k_1_total_txs_rate.png new file mode 100644 index 00000000000..65cb0c11554 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/16k_avg_cpu.png b/docs/qa/img38/voteExtensions/16k_avg_cpu.png new file mode 100644 index 00000000000..6fff44dab93 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/16k_avg_memory.png b/docs/qa/img38/voteExtensions/16k_avg_memory.png new file mode 100644 index 00000000000..218ef0bd6d1 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/16k_avg_mempool_size.png b/docs/qa/img38/voteExtensions/16k_avg_mempool_size.png new file mode 100644 index 00000000000..73881a1533a Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/16k_block_rate.png b/docs/qa/img38/voteExtensions/16k_block_rate.png new file mode 100644 index 00000000000..73cbba282d3 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/16k_rounds.png b/docs/qa/img38/voteExtensions/16k_rounds.png new file mode 100644 index 00000000000..7458188b7a1 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/16k_total_txs_rate.png b/docs/qa/img38/voteExtensions/16k_total_txs_rate.png new file mode 100644 index 00000000000..5d44a422dc4 Binary files /dev/null and b/docs/qa/img38/voteExtensions/16k_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/32k_1_avg_mempool_size.png b/docs/qa/img38/voteExtensions/32k_1_avg_mempool_size.png new file mode 100644 index 00000000000..273f1b8b818 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_1_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/32k_1_block_rate.png b/docs/qa/img38/voteExtensions/32k_1_block_rate.png new file mode 100644 index 00000000000..d469e947555 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/32k_1_rounds.png b/docs/qa/img38/voteExtensions/32k_1_rounds.png new file mode 100644 index 00000000000..347263dd89a Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_1_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/32k_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/32k_1_total_txs_rate.png new file mode 100644 index 00000000000..0d0451acdec Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/32k_avg_cpu.png b/docs/qa/img38/voteExtensions/32k_avg_cpu.png new file mode 100644 index 00000000000..5464681c2fc Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/32k_avg_memory.png b/docs/qa/img38/voteExtensions/32k_avg_memory.png new file mode 100644 index 00000000000..4cea5df70f0 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/32k_avg_mempool_size.png b/docs/qa/img38/voteExtensions/32k_avg_mempool_size.png new file mode 100644 index 00000000000..e573eca2ae4 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/32k_block_rate.png b/docs/qa/img38/voteExtensions/32k_block_rate.png new file mode 100644 index 00000000000..f3ebf625537 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/32k_rounds.png b/docs/qa/img38/voteExtensions/32k_rounds.png new file mode 100644 index 00000000000..a7a0597c24e Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/32k_total_txs_rate.png b/docs/qa/img38/voteExtensions/32k_total_txs_rate.png new file mode 100644 index 00000000000..fdd93e25277 Binary files /dev/null and b/docs/qa/img38/voteExtensions/32k_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/8k_block_rate.png b/docs/qa/img38/voteExtensions/8k_block_rate.png new file mode 100644 index 00000000000..da0a378adaf Binary files /dev/null and b/docs/qa/img38/voteExtensions/8k_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_16k.png b/docs/qa/img38/voteExtensions/all_c1r400_16k.png new file mode 100644 index 00000000000..d7e18134faf Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_16k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_2k.png b/docs/qa/img38/voteExtensions/all_c1r400_2k.png new file mode 100644 index 00000000000..9682d84a23f Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_2k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_32k.png b/docs/qa/img38/voteExtensions/all_c1r400_32k.png new file mode 100644 index 00000000000..5179fd21218 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_32k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_4k.png b/docs/qa/img38/voteExtensions/all_c1r400_4k.png new file mode 100644 index 00000000000..46d67719cb0 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_4k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_64k.png b/docs/qa/img38/voteExtensions/all_c1r400_64k.png new file mode 100644 index 00000000000..04ff7e76096 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_64k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_8k.png b/docs/qa/img38/voteExtensions/all_c1r400_8k.png new file mode 100644 index 00000000000..b54ed7d892a Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_8k.png differ diff --git a/docs/qa/img38/voteExtensions/all_c1r400_baseline.png b/docs/qa/img38/voteExtensions/all_c1r400_baseline.png new file mode 100644 index 00000000000..2c094f58fd7 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_c1r400_baseline.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_16k.png b/docs/qa/img38/voteExtensions/all_experiments_16k.png new file mode 100644 index 00000000000..c7a1f6da203 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_16k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_2k.png b/docs/qa/img38/voteExtensions/all_experiments_2k.png new file mode 100644 index 00000000000..ea717d55962 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_2k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_32k.png b/docs/qa/img38/voteExtensions/all_experiments_32k.png new file mode 100644 index 00000000000..ee4cf339d23 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_32k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_4k.png b/docs/qa/img38/voteExtensions/all_experiments_4k.png new file mode 100644 index 00000000000..eb4c569cba7 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_4k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_64k.png b/docs/qa/img38/voteExtensions/all_experiments_64k.png new file mode 100644 index 00000000000..f7abbae58e0 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_64k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_8k.png b/docs/qa/img38/voteExtensions/all_experiments_8k.png new file mode 100644 index 00000000000..cbaaf5c9eb2 Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_8k.png differ diff --git a/docs/qa/img38/voteExtensions/all_experiments_baseline.png b/docs/qa/img38/voteExtensions/all_experiments_baseline.png new file mode 100644 index 00000000000..b27ec5d625c Binary files /dev/null and b/docs/qa/img38/voteExtensions/all_experiments_baseline.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png b/docs/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png new file mode 100644 index 00000000000..63c86687b13 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_1_block_rate.png b/docs/qa/img38/voteExtensions/baseline_1_block_rate.png new file mode 100644 index 00000000000..46f0a4ee8ac Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_1_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_1_rounds.png b/docs/qa/img38/voteExtensions/baseline_1_rounds.png new file mode 100644 index 00000000000..1e6db5e3838 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_1_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_1_total_txs_rate.png b/docs/qa/img38/voteExtensions/baseline_1_total_txs_rate.png new file mode 100644 index 00000000000..75f9ab435ce Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_1_total_txs_rate.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_avg_cpu.png b/docs/qa/img38/voteExtensions/baseline_avg_cpu.png new file mode 100644 index 00000000000..2c1bca8bf06 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_avg_cpu.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_avg_memory.png b/docs/qa/img38/voteExtensions/baseline_avg_memory.png new file mode 100644 index 00000000000..f0529880b4c Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_avg_memory.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_avg_mempool_size.png b/docs/qa/img38/voteExtensions/baseline_avg_mempool_size.png new file mode 100644 index 00000000000..179693cc610 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_avg_mempool_size.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_block_rate.png b/docs/qa/img38/voteExtensions/baseline_block_rate.png new file mode 100644 index 00000000000..20073522c84 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_block_rate.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_rounds.png b/docs/qa/img38/voteExtensions/baseline_rounds.png new file mode 100644 index 00000000000..468d4e2ff8e Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_rounds.png differ diff --git a/docs/qa/img38/voteExtensions/baseline_total_txs_rate.png b/docs/qa/img38/voteExtensions/baseline_total_txs_rate.png new file mode 100644 index 00000000000..306793d5d21 Binary files /dev/null and b/docs/qa/img38/voteExtensions/baseline_total_txs_rate.png differ diff --git a/docs/qa/method.md b/docs/qa/method.md index 6de0cbcf80c..a1663f65a63 100644 --- a/docs/qa/method.md +++ b/docs/qa/method.md @@ -17,7 +17,7 @@ This baseline is then compared with results obtained in later versions. Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them: _200 Node Test_, and _Rotating Nodes Test_. -[releases]: https://github.com/cometbft/cometbft/blob/main/RELEASES.md#large-scale-testnets +[releases]: https://github.com/cometbft/cometbft/blob/v0.38.x/RELEASES.md#large-scale-testnets ## Software Dependencies @@ -26,7 +26,7 @@ _200 Node Test_, and _Rotating Nodes Test_. * An account at Digital Ocean (DO), with a high droplet limit (>202) * The machine to orchestrate the tests should have the following installed: * A clone of the [testnet repository][testnet-repo] - * This repository contains all the scripts mentioned in the reminder of this section + * This repository contains all the scripts mentioned in the remainder of this section * [Digital Ocean CLI][doctl] * [Terraform CLI][Terraform] * [Ansible CLI][Ansible] @@ -38,10 +38,10 @@ _200 Node Test_, and _Rotating Nodes Test_. ### Requirements for Result Extraction -* Matlab or Octave -* [Prometheus][prometheus] server installed +* [Prometheus DB][prometheus] to collect metrics from nodes +* Prometheus DB to process queries (may be different node from the previous) * blockstore DB of one of the full nodes in the testnet -* Prometheus DB + [prometheus]: https://prometheus.io/ @@ -57,24 +57,43 @@ This section explains how the tests were carried out for reproducibility purpose 3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. * If you are running the base test, which implies an homogeneous network (all nodes are running the same version), then make sure makefile variable `VERSION2_WEIGHT` is set to 0 - * If you are running a mixed network, set the variable `VERSION_TAG2` to the other version you want deployed - in the network. The, adjust the weight variables `VERSION_WEIGHT` and `VERSION2_WEIGHT` to configure the + * If you are running a mixed network, set the variable `VERSION2_TAG` to the other version you want deployed + in the network. + Then adjust the weight variables `VERSION_WEIGHT` and `VERSION2_WEIGHT` to configure the desired proportion of nodes running each of the two configured versions. 4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9) -5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `COMETBFT_CONSENSUS_HEIGHT` metric. - All nodes should be increasing their heights. -6. You now need to start the load runner that will produce transaction load +5. As a sanity check, connect to the Prometheus node's web interface (port 9090) + and check the graph for the `cometbft_consensus_height` metric. All nodes + should be increasing their heights. + + * You can find the Prometheus node's IP address in `ansible/hosts` under section `[prometheus]`. + * The following URL will display the metrics `cometbft_consensus_height` and `cometbft_mempool_size`: + + ``` + http://:9090/classic/graph?g0.range_input=1h&g0.expr=cometbft_consensus_height&g0.tab=0&g1.range_input=1h&g1.expr=cometbft_mempool_size&g1.tab=0 + ``` + +6. You now need to start the load runner that will produce transaction load. * If you don't know the saturation load of the version you are testing, you need to discover it. - * `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node. - * Before running it, you need to edit the script to provide the IP address of a full node. - This node will receive all transactions from the load runner node. - * This script will take about 40 mins to run. - * It is running 90-seconds-long experiments in a loop with different loads. + * Run `make loadrunners-init`. This will copy the loader scripts to the + `testnet-load-runner` node and install the load tool. + * Find the IP address of the `testnet-load-runner` node in + `ansible/hosts` under section `[loadrunners]`. + * `ssh` into `testnet-load-runner`. + * Edit the script `/root/200-node-loadscript.sh` in the load runner + node to provide the IP address of a full node (for example, + `validator000`). This node will receive all transactions from the + load runner node. + * Run `/root/200-node-loadscript.sh` from the load runner node. + * This script will take about 40 mins to run, so it is suggested to + first run `tmux` in case the ssh session breaks. + * It is running 90-seconds-long experiments in a loop with different + loads. * If you already know the saturation load, you can simply run the test (several times) for 90 seconds with a load somewhat below saturation: - * set makefile variables `ROTATE_CONNECTIONS`, `ROTATE_TX_RATE`, to values that will produce the desired transaction load. - * set `ROTATE_TOTAL_TIME` to 90 (seconds). + * set makefile variables `LOAD_CONNECTIONS`, `LOAD_TX_RATE`, to values that will produce the desired transaction load. + * set `LOAD_TOTAL_TIME` to 90 (seconds). * run "make runload" and wait for it to complete. You may want to run this several times so the data from different runs can be compared. 7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine * Alternatively, you may want to run `make retrieve-prometheus-data` and `make retrieve-blockstore` separately. @@ -99,108 +118,46 @@ The CometBFT team should improve it at every iteration to increase the amount of #### Steps 1. Unzip the blockstore into a directory -2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore - * ```bash - mkdir results - go run github.com/cometbft/cometbft/test/loadtime/cmd/report@f1aaa436d --database-type goleveldb --data-dir ./ > results/report.txt` - go run github.com/cometbft/cometbft/test/loadtime/cmd/report@f1aaa436d --database-type goleveldb --data-dir ./ --csv results/raw.csv` - ``` -3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate - * If you are looking for the saturation point +2. To identify saturation points + 1. Extract the latency report for all the experiments. + * Run these commands from the directory containing the `blockstore.db` folder. + * It is advisable to adjust the hash in the `go run` command to the latest possible. + * ```bash + mkdir results + go run github.com/cometbft/cometbft/test/loadtime/cmd/report@3003ef7 --database-type goleveldb --data-dir ./ > results/report.txt + ``` + 2. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate. + You will need to separate data per experiment. + * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, copy its related lines to the filename that matches the number of connections, for example + ```bash - for cnum in 1 2 3 4; do echo "$cnum"; grep "Connections: $cnum" results/report.txt -B 2 -A 10 > results/report$cnum.txt; done + for cnum in 1 2 4; do echo "$cnum"; grep "Connections: $cnum" results/report.txt -B 2 -A 10 > results/report$cnum.txt; done ``` * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. - * Otherwise just keep `report.txt`, and skip step 4. -4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side - * This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections. -5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment. - The format of the `.dat` files is amenable to loading them as matrices in Octave. - * Adapt the values of the for loop variables according to the experiments that you ran (check `report.txt`). - * Adapt `report*.txt` to the files you produced in step 3. - - ```bash - uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }')) - c=1 - rm -f *.dat - for i in 01 02 04; do - for j in 0025 0050 0100 0200; do - echo $i $j $c "${uuids[$c]}" - filename=c${i}_r${j} - grep ${uuids[$c]} raw.csv > ${filename}.csv - cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' >> ${filename}.dat - c=$(expr $c + 1) - done - done - ``` - -6. Enter Octave -7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet - - ```octave - conns = { "01"; "02"; "04" }; - rates = { "0025"; "0050"; "0100"; "0200" }; - for i = 1:length(conns) - for j = 1:length(rates) - filename = strcat("c", conns{i}, "_r", rates{j}, ".dat"); - load("-ascii", filename); - endfor - endfor - ``` - -8. Set variable release to the current release undergoing QA - - ```octave - release = "v0.34.x"; - ``` - -9. Generate a plot with all (or some) experiments, where the X axis is the experiment time, - and the y axis is the latency of transactions. - The following snippet plots all experiments. - - ```octave - legends = {}; - hold off; - for i = 1:length(conns) - for j = 1:length(rates) - data_name = strcat("c", conns{i}, "_r", rates{j}); - l = strcat("c=", conns{i}, " r=", rates{j}); - m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, "."); - hold on; - legends(1, end+1) = l; - endfor - endfor - legend(legends, "location", "northeastoutside"); - xlabel("experiment time (s)"); - ylabel("latency (s)"); - t = sprintf("200-node testnet - %s", release); - title(t); - ``` - -10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance - - ```octave - axis([0, 100, 0, 30], "tic"); - ``` - -11. Use Octave's GUI menu to save the plot (e.g. as `.png`) - -12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary. - -13. To generate a latency vs throughput plot, using the raw CSV file generated - in step 2, follow the instructions for the [`latency_throughput.py`] script. + * Otherwise just keep `report.txt`, and skip to the next step. + 4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side + * This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections. + * Combine the column files into a single table file: + * Replace tabs by spaces in all column files. For example, + `sed -i.bak 's/\t/ /g' results/report1.txt`. + * Merge the new column files into one: + `paste results/report1.txt results/report2.txt results/report4.txt | column -s $'\t' -t > report_tabbed.txt` + +3. To generate a latency vs throughput plot, extract the data as a CSV + * ```bash + go run github.com/cometbft/cometbft/test/loadtime/cmd/report@3003ef7 --database-type goleveldb --data-dir ./ --csv results/raw.csv + ``` + * Follow the instructions for the [`latency_throughput.py`] script. This plot is useful to visualize the saturation point. - -[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting - -14. Alternatively, follow the instructions for the [`latency_plotter.py`] script. - This script generates a series of plots per experiment and configuration that my + * Alternatively, follow the instructions for the [`latency_plotter.py`] script. + This script generates a series of plots per experiment and configuration that may help with visualizing Latency vs Throughput variation. -[`latency_plotter.py`]: ../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting-version-2 +[`latency_throughput.py`]: https://github.com/cometbft/cometbft/tree/v0.38.x/scripts/qa/reporting#latency-vs-throughput-plotting +[`latency_plotter.py`]: https://github.com/cometbft/cometbft/tree/v0.38.x/scripts/qa/reporting#latency-vs-throughput-plotting-version-2 #### Extracting Prometheus Metrics @@ -211,7 +168,7 @@ The CometBFT team should improve it at every iteration to increase the amount of 4. Identify the time window you want to plot in your graphs. 5. Execute the [`prometheus_plotter.py`] script for the time window. -[`prometheus_plotter.py`]: ../../scripts/qa/reporting/README.md#prometheus-metrics +[`prometheus_plotter.py`]: https://github.com/cometbft/cometbft/tree/v0.38.x/scripts/qa/reporting#prometheus-metrics ## Rotating Node Testnet @@ -229,24 +186,72 @@ This section explains how the tests were carried out for reproducibility purpose 6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. All nodes should be increasing their heights. 7. On a different shell, - * run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y` + * run `make runload LOAD_CONNECTIONS=X LOAD_TX_RATE=Y LOAD_TOTAL_TIME=Z` * `X` and `Y` should reflect a load below the saturation point (see, e.g., - [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) for further info) + [this paragraph](./TMCore-QA-34.md#finding-the-saturation-point) for further info) + * `Z` (in seconds) should be big enough to keep running throughout the test, until we manually stop it in step 9. + In principle, a good value for `Z` is `7200` (2 hours) 8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. - * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length + * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for the full length of the experiment. -9. When the height of the chain reaches 3000, stop the `make rotate` script + * [This](http://:9090/classic/graph?g0.range_input=100m&g0.expr=cometbft_consensus_height%7Bjob%3D~%22ephemeral.*%22%7D%20or%20cometbft_blocksync_latest_block_height%7Bjob%3D~%22ephemeral.*%22%7D&g0.tab=0&g1.range_input=100m&g1.expr=cometbft_mempool_size%7Bjob!~%22ephemeral.*%22%7D&g1.tab=0&g2.range_input=100m&g2.expr=cometbft_consensus_num_txs%7Bjob!~%22ephemeral.*%22%7D&g2.tab=0) + is an example Prometheus URL you can use to monitor the test case's progress +9. When the height of the chain reaches 3000, stop the `make runload` script. 10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice) after height 3000 was reached, stop `make rotate` -11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine -12. Verify that the data was collected without errors +11. Run `make stop-network` +12. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +13. Verify that the data was collected without errors * at least one blockstore DB for a CometBFT validator * the Prometheus database from the Prometheus node * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) -13. **Run `make terraform-destroy`** +14. **Run `make terraform-destroy`** Steps 8 to 10 are highly manual at the moment and will be improved in next iterations. +### Result Extraction + +In order to obtain a latency plot, follow the instructions above for the 200 node experiment, +but the `results.txt` file contains only one experiment. + +As for prometheus, the same method as for the 200 node experiment can be applied. + +## Vote Extensions Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. +2. Copy file `varyVESize.toml` onto `testnet.toml` (do NOT commit this change). +3. Set variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. +4. Follow steps 5-10 of the `README.md` to configure and start the testnet + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests +5. Configure the load runner to produce the desired transaction load. + * set makefile variables `ROTATE_CONNECTIONS`, `ROTATE_TX_RATE`, to values that will produce the desired transaction load. + * set `ROTATE_TOTAL_TIME` to 150 (seconds). + * set `ITERATIONS` to the number of iterations that each configuration should run for. +6. Execute steps 5-10 of the `README.md` file at the testnet repository. + +7. Repeat the following steps for each desired `vote_extension_size` + 1. Update the configuration (you can skip this step if you didn't change the `vote_extension_size`) + * Update the `vote_extensions_size` in the `testnet.toml` to the desired value. + * `make configgen` + * `ANSIBLE_SSH_RETRIES=10 ansible-playbook ./ansible/re-init-testapp.yaml -u root -i ./ansible/hosts --limit=validators -e "testnet_dir=testnet" -f 20` + * `make restart` + 2. Run the test + * `make runload` + This will repeat the tests `ITERATIONS` times every time it is invoked. + 3. Collect your data + * `make retrieve-data` + Gathers all relevant data from the testnet into the orchestrating machine, inside folder `experiments`. + Two subfolders are created, one blockstore DB for a CometBFT validator and one for the Prometheus DB data. + * Verify that the data was collected without errors with `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s). +8. Clean up your setup. + * `make terraform-destroy`; don't forget that you need to type **yes** for it to complete. + + ### Result Extraction In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but: @@ -254,4 +259,4 @@ In order to obtain a latency plot, follow the instructions above for the 200 nod * The `results.txt` file contains only one experiment * Therefore, no need for any `for` loops -As for prometheus, the same method as for the 200 node experiment can be applied. +As for Prometheus, the same method as for the 200 node experiment can be applied. diff --git a/docs/rfc/rfc-100-abci-vote-extension-propag.md b/docs/rfc/rfc-100-abci-vote-extension-propag.md index de3296e5a21..fce0fe9afc2 100644 --- a/docs/rfc/rfc-100-abci-vote-extension-propag.md +++ b/docs/rfc/rfc-100-abci-vote-extension-propag.md @@ -737,6 +737,6 @@ required to make progress will always be held somewhere in the network. [abci-0-17-0]: https://github.com/cometbft/cometbft/blob/v0.34.x/spec/abci/README.md [abci-1-0]: https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/README.md -[abci-2-0]: https://github.com/cometbft/cometbft/blob/main/spec/abci/README.md -[light-client-spec]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/README.md +[abci-2-0]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/README.md +[light-client-spec]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/README.md [toggle-vote-extensions]: https://github.com/tendermint/tendermint/issues/8453 diff --git a/docs/tools/README.md b/docs/tools/README.md index de29e17f122..88e19b76711 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -2,7 +2,7 @@ order: 1 parent: title: Tools - order: 6 + order: 5 --- # Overview diff --git a/docs/tools/debugging.md b/docs/tools/debugging.md index 69a73fa9503..4f7af58b1f4 100644 --- a/docs/tools/debugging.md +++ b/docs/tools/debugging.md @@ -102,4 +102,4 @@ The list of available RPC endpoints can be found by making a request to the RPC For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to `http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints. -Additional information on the CometBFT RPC endpoints can be found in the [rpc documentation](https://docs.cometbft.com/master/rpc). +Additional information on the CometBFT RPC endpoints can be found in the [rpc documentation](https://docs.cometbft.com/v0.38/rpc). diff --git a/evidence/doc.go b/evidence/doc.go index 49c41ca2de3..1e4909ff019 100644 --- a/evidence/doc.go +++ b/evidence/doc.go @@ -1,7 +1,7 @@ /* Package evidence handles all evidence storage and gossiping from detection to block proposal. For the different types of evidence refer to the `evidence.go` file in the types package -or https://github.com/cometbft/cometbft/blob/main/spec/consensus/light-client/accountability.md. +or https://github.com/cometbft/cometbft/blob/v0.38.x/spec/consensus/light-client/accountability.md. # Gossiping diff --git a/evidence/mocks/block_store.go b/evidence/mocks/block_store.go index 566fdcec8c4..45be790b5bf 100644 --- a/evidence/mocks/block_store.go +++ b/evidence/mocks/block_store.go @@ -16,6 +16,10 @@ type BlockStore struct { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -46,6 +54,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) @@ -58,13 +70,12 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } -type mockConstructorTestingTNewBlockStore interface { +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { +}) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/evidence/pool.go b/evidence/pool.go index e36b66db38e..b502341a861 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -132,11 +132,11 @@ func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // AddEvidence checks the evidence is valid and adds it to the pool. func (evpool *Pool) AddEvidence(ev types.Evidence) error { - evpool.logger.Debug("Attempting to add evidence", "ev", ev) + evpool.logger.Info("Attempting to add evidence", "ev", ev) // We have already verified this piece of evidence - no need to do it again if evpool.isPending(ev) { - evpool.logger.Debug("Evidence already pending, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) return nil } @@ -144,7 +144,7 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { if evpool.isCommitted(ev) { // this can happen if the peer that sent us the evidence is behind so we shouldn't // punish the peer. - evpool.logger.Debug("Evidence was already committed, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence was already committed, ignoring this one", "ev", ev) return nil } @@ -513,13 +513,13 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { // check if we already have this evidence if evpool.isPending(dve) { - evpool.logger.Debug("evidence already pending; ignoring", "evidence", dve) + evpool.logger.Info("evidence already pending; ignoring", "evidence", dve) continue } // check that the evidence is not already committed on chain if evpool.isCommitted(dve) { - evpool.logger.Debug("evidence already committed; ignoring", "evidence", dve) + evpool.logger.Info("evidence already committed; ignoring", "evidence", dve) continue } diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 815e3666134..2b8a8e886ad 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -416,8 +416,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo block := state.MakeBlock(i, test.MakeNTxs(i, 1), lastCommit.ToCommit(), nil, state.Validators.Proposer.Address) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = cmtversion.Consensus{Block: version.BlockProtocol, App: 1} - const parts = 1 - partSet, err := block.MakePartSet(parts) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) if err != nil { return nil, err } diff --git a/evidence/verify.go b/evidence/verify.go index 3ccdd0f4292..cd09285221a 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -95,7 +95,6 @@ func (evpool *Pool) verify(evidence types.Evidence) error { default: return fmt.Errorf("unrecognized evidence type: %T", evidence) } - } // VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves @@ -104,16 +103,25 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // the conflicting header's commit // - 2/3+ of the conflicting validator set correctly signed the conflicting block // - the nodes trusted header at the same height as the conflicting header has a different hash +// - all signatures must be checked as this will be used as evidence // // CONTRACT: must run ValidateBasic() on the evidence before verifying // // must check that the evidence has not expired (i.e. is outside the maximum age threshold) -func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, - commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { +func VerifyLightClientAttack( + e *types.LightClientAttackEvidence, + commonHeader, trustedHeader *types.SignedHeader, + commonVals *types.ValidatorSet, + now time.Time, //nolint:revive + trustPeriod time.Duration, //nolint:revive +) error { + // TODO: Should the current time and trust period be used in this method? + // If not, why were the parameters present? + // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single // verification jump between the common header and the conflicting one if commonHeader.Height != e.ConflictingBlock.Height { - err := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) + err := commonVals.VerifyCommitLightTrustingAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) if err != nil { return fmt.Errorf("skipping verification of conflicting block failed: %w", err) } @@ -125,7 +133,7 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t } // Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header - if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, + if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLightAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, e.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil { return fmt.Errorf("invalid commit from conflicting block: %w", err) } diff --git a/go.mod b/go.mod index da6f0c7de42..4fbd8803e31 100644 --- a/go.mod +++ b/go.mod @@ -1,296 +1,151 @@ module github.com/cometbft/cometbft -go 1.20 +go 1.23.1 + +toolchain go1.23.2 require ( - github.com/BurntSushi/toml v1.2.1 - github.com/adlio/schema v1.3.3 + github.com/BurntSushi/toml v1.4.0 + github.com/adlio/schema v1.3.6 github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/fortytw2/leaktest v1.3.0 - github.com/go-kit/kit v0.12.0 + github.com/go-kit/kit v0.13.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/golang/protobuf v1.5.3 - github.com/golangci/golangci-lint v1.52.0 + github.com/golang/protobuf v1.5.4 github.com/google/orderedcode v0.0.1 - github.com/gorilla/websocket v1.5.0 + github.com/gorilla/websocket v1.5.3 github.com/informalsystems/tm-load-test v1.3.0 - github.com/lib/pq v1.10.7 - github.com/libp2p/go-buffer-pool v0.1.0 - github.com/minio/highwayhash v1.0.2 + github.com/lib/pq v1.10.9 + github.com/minio/highwayhash v1.0.3 github.com/ory/dockertest v3.3.5+incompatible github.com/pkg/errors v0.9.1 - github.com/pointlander/peg v1.0.1 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.59.1 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/rs/cors v1.8.3 - github.com/sasha-s/go-deadlock v0.3.1 + github.com/rs/cors v1.11.1 + github.com/sasha-s/go-deadlock v0.3.5 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.6.1 - github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.2 - golang.org/x/crypto v0.7.0 - golang.org/x/net v0.8.0 - google.golang.org/grpc v1.54.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.9.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 + google.golang.org/grpc v1.67.0 ) -require ( - github.com/bufbuild/buf v1.15.1 - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 -) +require github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 require ( - github.com/Masterminds/semver/v3 v3.2.0 - github.com/btcsuite/btcd/btcec/v2 v2.3.2 - github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/cometbft/cometbft-db v0.7.0 - github.com/cosmos/gogoproto v1.4.6 - github.com/go-git/go-git/v5 v5.6.1 + github.com/Masterminds/semver/v3 v3.3.0 + github.com/btcsuite/btcd/btcec/v2 v2.3.4 + github.com/btcsuite/btcd/btcutil v1.1.6 + github.com/cometbft/cometbft-db v1.0.1 + github.com/cosmos/gogoproto v1.7.0 + github.com/go-git/go-git/v5 v5.12.0 github.com/gofrs/uuid v4.4.0+incompatible - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.6.0 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae - github.com/vektra/mockery/v2 v2.23.1 - golang.org/x/sync v0.1.0 - gonum.org/v1/gonum v0.12.0 - google.golang.org/protobuf v1.30.0 + golang.org/x/sync v0.8.0 + gonum.org/v1/gonum v0.15.1 + google.golang.org/protobuf v1.34.2 ) require ( - 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect - 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/Abirdcfly/dupword v0.0.11 // indirect - github.com/Antonboom/errname v0.1.9 // indirect - github.com/Antonboom/nilnil v0.1.3 // indirect + dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect - github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/DataDog/zstd v1.4.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/OpenPeeDeeP/depguard v1.1.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect - github.com/acomagu/bufpipe v1.0.4 // indirect - github.com/alexkohler/prealloc v1.0.0 // indirect - github.com/alingse/asasalint v0.0.11 // indirect - github.com/ashanbrown/forbidigo v1.5.1 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.0 // indirect - github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v3 v3.4.0 // indirect - github.com/breml/bidichk v0.2.4 // indirect - github.com/breml/errchkjson v0.3.1 // indirect - github.com/bufbuild/connect-go v1.5.2 // indirect - github.com/bufbuild/protocompile v0.5.1 // indirect - github.com/butuzov/ireturn v0.1.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/charithe/durationcheck v0.0.10 // indirect - github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect - github.com/chigopher/pathlib v0.12.0 // indirect - github.com/cloudflare/circl v1.3.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/containerd/continuity v0.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.10.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/denis-tingaikin/go-header v0.4.3 // indirect - github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dgraph-io/badger/v4 v4.3.0 // indirect + github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91 // indirect github.com/docker/cli v23.0.1+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v23.0.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect - github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/fatih/structtag v1.2.0 // indirect - github.com/felixge/fgprof v0.9.3 // indirect - github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-chi/chi/v5 v5.0.8 // indirect - github.com/go-critic/go-critic v0.7.0 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.4.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-toolsmith/astcast v1.1.0 // indirect - github.com/go-toolsmith/astcopy v1.1.0 // indirect - github.com/go-toolsmith/astequal v1.1.0 // indirect - github.com/go-toolsmith/astfmt v1.1.0 // indirect - github.com/go-toolsmith/astp v1.1.0 // indirect - github.com/go-toolsmith/strparse v1.1.0 // indirect - github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid/v5 v5.0.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect - github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect - github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect - github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.4.0 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect - github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.13.0 // indirect - github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 // indirect - github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect - github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect - github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/iancoleman/strcase v0.2.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect - github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jinzhu/copier v0.3.5 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/julz/importas v0.1.0 // indirect - github.com/junk1tm/musttag v0.5.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kisielk/errcheck v1.6.3 // indirect - github.com/kisielk/gotool v1.0.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.4 // indirect - github.com/klauspost/compress v1.16.0 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect - github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.6 // indirect - github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/ldez/gomoddirectives v0.2.3 // indirect - github.com/ldez/tagliatelle v0.4.0 // indirect - github.com/leonklingele/grouper v1.1.1 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/linxGnu/grocksdb v1.9.3 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/maratori/testableexamples v1.0.0 // indirect - github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.3.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/moricho/tparallel v0.3.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.9.5 // indirect - github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.9.0 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect - github.com/opencontainers/runc v1.1.3 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/profile v1.7.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect - github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect - github.com/polyfloyd/go-errorlint v1.4.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/quasilyte/go-ruleguard v0.3.19 // indirect - github.com/quasilyte/gogrep v0.5.0 // indirect - github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect - github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/rs/zerolog v1.29.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.3.0 // indirect - github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/satori/go.uuid v1.2.0 // indirect - github.com/securego/gosec/v2 v2.15.0 // indirect - github.com/sergi/go-diff v1.2.0 // indirect - github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/sivchari/containedctx v1.0.2 // indirect - github.com/sivchari/nosnakecase v1.7.0 // indirect - github.com/sivchari/tenv v1.7.1 // indirect - github.com/skeema/knownhosts v1.1.0 // indirect - github.com/sonatard/noctx v0.0.2 // indirect - github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect - github.com/tetafro/godot v1.4.11 // indirect - github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.0.3 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/yagipy/maintidx v1.0.0 // indirect - github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.3 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/sdk v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect + go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools v2.2.0+incompatible // indirect - honnef.co/go/tools v0.4.3 // indirect - mvdan.cc/gofumpt v0.4.0 // indirect - mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect - mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect +) + +retract ( + // a regression was introduced + v0.38.4 + // a breaking change was introduced + v0.38.3 + // superseeded by v0.38.3 because of ASA-2024-001 + [v0.38.0, v0.38.2] ) diff --git a/go.sum b/go.sum index 4e0c853dbf3..ec360e1f3bc 100644 --- a/go.sum +++ b/go.sum @@ -1,131 +1,53 @@ -4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= -4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= -4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= -4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= -github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= -github.com/Antonboom/errname v0.1.9 h1:BZDX4r3l4TBZxZ2o2LNrlGxSHran4d1u4veZdoORTT4= -github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58= -github.com/Antonboom/nilnil v0.1.3 h1:6RTbx3d2mcEu3Zwq9TowQpQMVpP75zugwOtqY1RTtcE= -github.com/Antonboom/nilnil v0.1.3/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= -github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= -github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= -github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= +github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= -github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.5.1 h1:WXhzLjOlnuDYPYQo/eFlcFMi8X/kLfvWLYu6CSoebis= -github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= -github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= -github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= -github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= -github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= -github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= -github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= -github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= -github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= @@ -135,229 +57,124 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.15.1 h1:v7sK2uMEsGX4Z2hvu+xiMheH3C3AKBGfxPBgdUZYDQ8= -github.com/bufbuild/buf v1.15.1/go.mod h1:TQeGKam1QMfHy/xsSnnMpxN3JK5HBb6aNvZj4m52gkE= -github.com/bufbuild/connect-go v1.5.2 h1:G4EZd5gF1U1ZhhbVJXplbuUnfKpBZ5j5izqIwu2g2W8= -github.com/bufbuild/connect-go v1.5.2/go.mod h1:GmMJYR6orFqD0Y6ZgX8pwQ8j9baizDrIQMm1/a6LnHk= -github.com/bufbuild/protocompile v0.5.1 h1:mixz5lJX4Hiz4FpqFREJHIXLfaLBntfaJv1h+/jS+Qg= -github.com/bufbuild/protocompile v0.5.1/go.mod h1:G5iLmavmF4NsYtpZFvE3B/zFch2GIY8+wjsYLR/lc40= -github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= -github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= -github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chigopher/pathlib v0.12.0 h1:1GM7fN/IwXXmOHbd1jkMqHD2wUhYqUvafgxTwmLT/q8= -github.com/chigopher/pathlib v0.12.0/go.mod h1:EJ5UtJ/sK8Nt6q3VWN+EwZLZ3g0afJiG8NegYiQQ/gQ= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.3.1 h1:4OVCZRL62ijwEwxnF6I7hLwxvIYi3VaZt8TflkqtrtA= -github.com/cloudflare/circl v1.3.1/go.mod h1:+CauBF6R70Jqcyl8N2hC8pAXYbWkGIezuSbuGLtRhnw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= -github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/cometbft/cometbft-db v1.0.1 h1:SylKuLseMLQKw3+i8y8KozZyJcQSL98qEe2CGMCGTYE= +github.com/cometbft/cometbft-db v1.0.1/go.mod h1:EBrFs1GDRiTqrWXYi4v90Awf/gcdD5ExzdPbg4X8+mk= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosmos/gogoproto v1.4.6 h1:Ee7z15dWJaGlgM2rWrK8N2IX7PQcuccu8oG68jp5RL4= -github.com/cosmos/gogoproto v1.4.6/go.mod h1:VS/ASYmPgv6zkPKLjR9EB91lwbLHOzaGCirmKKhncfI= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= +github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0= -github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= -github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= +github.com/dgraph-io/badger/v4 v4.3.0 h1:lcsCE1/1qrRhqP+zYx6xDZb8n7U+QlwNicpc676Ub40= +github.com/dgraph-io/badger/v4 v4.3.0/go.mod h1:Sc0T595g8zqAQRDf44n+z3wG4BOqLwceaFntt8KPxUM= +github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91 h1:Pux6+xANi0I7RRo5E1gflI4EZ2yx3BGZ75JkAIvGEOA= +github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91/go.mod h1:swkazRqnUf1N62d0Nutz7KIj2UKqsm/H8tD0nBJAXqM= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= -github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= -github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= -github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= -github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= -github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= -github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-critic/go-critic v0.7.0 h1:tqbKzB8pqi0NsRZ+1pyU4aweAF7A7QN0Pi4Q02+rYnQ= -github.com/go-critic/go-critic v0.7.0/go.mod h1:moYzd7GdVXE2C2hYTwd7h0CPcqlUeclsyBRwMa38v64= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= -github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ= -github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= -github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= -github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= -github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= -github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= -github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= -github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= -github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= -github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= -github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= -github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= -github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= -github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= -github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= -github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= -github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -366,958 +183,333 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.52.0 h1:T7w3tuF1goz64qGV+ML4MgysSl/yUfA3UZJK92oE48A= -github.com/golangci/golangci-lint v1.52.0/go.mod h1:wlTh+d/oVlgZC2yCe6nlxrxNAnuhEQC0Zdygoh72Uak= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= -github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= -github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U= -github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= -github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= -github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/informalsystems/tm-load-test v1.3.0 h1:FGjKy7vBw6mXNakt+wmNWKggQZRsKkEYpaFk/zR64VA= github.com/informalsystems/tm-load-test v1.3.0/go.mod h1:OQ5AQ9TbT5hKWBNIwsMjn6Bf4O0U4b1kRc+0qZlQJKw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 h1:2uT3aivO7NVpUPGcQX7RbHijHMyWix/yCnIrCWc+5co= -github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= -github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= -github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/junk1tm/musttag v0.5.0 h1:bV1DTdi38Hi4pG4OVWa7Kap0hi0o7EczuK6wQt9zPOM= -github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= -github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= -github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= -github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= -github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= -github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= -github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= -github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.4.0 h1:sylp7d9kh6AdXN2DpVGHBRb5guTVAgOxqNGhbqc4b1c= -github.com/ldez/tagliatelle v0.4.0/go.mod h1:mNtTfrHy2haaBAw+VT7IBV6VXBThS7TCreYWbBcJ87I= -github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= -github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linxGnu/grocksdb v1.9.3 h1:s1cbPcOd0cU2SKXRG1nEqCOWYAELQjdqg3RVI2MH9ik= +github.com/linxGnu/grocksdb v1.9.3/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= -github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= -github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= -github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= -github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/revive v1.3.1 h1:OlQkcH40IB2cGuprTPcjB0iIUddgVZgGmDX3IAMR8D4= -github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= +github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moricho/tparallel v0.3.0 h1:8dDx3S3e+jA+xiQXC7O3dvfRTe/J+FYlTDDW01Y7z/Q= -github.com/moricho/tparallel v0.3.0/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= -github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.9.5 h1:TzssWan6orBiLYVqewCG8faud9qlFntJE30ACpzmGME= -github.com/nishanths/exhaustive v0.9.5/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA= -github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= -github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.9.0 h1:Sm0zX5QfjJzkeCjEp+t6d3Ha0jwvoDjleP9XCsrEzOA= -github.com/nunnatsa/ginkgolinter v0.9.0/go.mod h1:FHaMLURXP7qImeH6bvxWJUpyH+2tuqe5j4rW1gxJRmI= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.8.0 h1:pAM+oBNPrpXRs+E/8spkeGx9QgekbRVyr74EUvRVOUI= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= -github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 h1:hUmXhbljNFtrH5hzV9kiRoddZ5nfPTq3K0Sb2hYYiqE= -github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3/go.mod h1:q5NXNGzqj5uPnVuhGkZfmgHqNUhf15VLi6L9kW0VEc0= -github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 h1:RHHRCZeaNyBXdYPMjZNH8/XHDBH38TZzw8izrW7dmBE= -github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4/go.mod h1:RdR1j20Aj5pB6+fw6Y9Ur7lMHpegTEjY1vc19hEZL40= -github.com/pointlander/peg v1.0.1 h1:mgA/GQE8TeS9MdkU6Xn6iEzBmQUQCNuWD7rHCK6Mjs0= -github.com/pointlander/peg v1.0.1/go.mod h1:5hsGDQR2oZI4QoWz0/Kdg3VSVEC31iJw/b7WjqCBGRI= -github.com/polyfloyd/go-errorlint v1.4.0 h1:b+sQ5HibPIAjEZwtuwU8Wz/u0dMZ7YL+bk+9yWyHVJk= -github.com/polyfloyd/go-errorlint v1.4.0/go.mod h1:qJCkPeBn+0EXkdKTrUCcuFStM2xrDKfxI3MGLXPexUs= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc= -github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw= -github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= -github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= -github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= -github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= -github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= -github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= -github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= -github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= -github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw= -github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= -github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= -github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= -github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= -github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= -github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= -github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= -github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= -github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.4.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= -github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e h1:MV6KaVu/hzByHP0UvJ4HcMGE/8a6A4Rggc/0wx2AvJo= -github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= -github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= -github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= -github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= -github.com/vektra/mockery/v2 v2.23.1 h1:N59FENM2d/gWE6Ns5JPuf9a7jqQWeheGefZqvuvb1dM= -github.com/vektra/mockery/v2 v2.23.1/go.mod h1:Zh3Kv1ckKs6FokhlVLcCu6UTyzfS3M8mpROz1lBNp+w= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= -github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= -github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= -github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= -gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo= +go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU= -golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= -gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1326,22 +518,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1352,35 +535,13 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= -honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= -mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= -mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/inspect/inspect.go b/inspect/inspect.go index b09faa23eed..ad87551b900 100644 --- a/inspect/inspect.go +++ b/inspect/inspect.go @@ -21,9 +21,7 @@ import ( "golang.org/x/sync/errgroup" ) -var ( - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -) +var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) // Inspector manages an RPC service that exports methods to debug a failed node. // After a node shuts down due to a consensus failure, it will no longer start @@ -50,7 +48,13 @@ type Inspector struct { // The caller is responsible for starting and stopping the Inspector service. // //nolint:lll -func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector { +func New( + cfg *config.RPCConfig, + bs state.BlockStore, + ss state.Store, + txidx txindex.TxIndexer, + blkidx indexer.BlockIndexer, +) *Inspector { routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger) eb := types.NewEventBus() eb.SetLogger(logger.With("module", "events")) @@ -82,9 +86,8 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - lg := logger.With("module", "inspect") ss := state.NewStore(sDB, state.StoreOptions{}) - return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil + return New(cfg.RPC, bs, ss, txidx, blkidx), nil } // Run starts the Inspector servers and blocks until the servers shut down. The passed diff --git a/inspect/inspect_test.go b/inspect/inspect_test.go index 69aa9f7a082..38245ac0c62 100644 --- a/inspect/inspect_test.go +++ b/inspect/inspect_test.go @@ -14,7 +14,6 @@ import ( "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/inspect" "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" httpclient "github.com/cometbft/cometbft/rpc/client/http" indexermocks "github.com/cometbft/cometbft/state/indexer/mocks" @@ -35,7 +34,6 @@ func TestInspectConstructor(t *testing.T) { require.NoError(t, err) require.NotNil(t, d) }) - } func TestInspectRun(t *testing.T) { @@ -55,7 +53,6 @@ func TestInspectRun(t *testing.T) { cancel() stoppedWG.Wait() }) - } func TestBlock(t *testing.T) { @@ -77,8 +74,7 @@ func TestBlock(t *testing.T) { blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -130,8 +126,7 @@ func TestTxSearch(t *testing.T) { Return([]*abcitypes.TxResult{testTxResult}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -150,7 +145,7 @@ func TestTxSearch(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") require.NoError(t, err) - var page = 1 + page := 1 resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) @@ -163,6 +158,7 @@ func TestTxSearch(t *testing.T) { stateStoreMock.AssertExpectations(t) blockStoreMock.AssertExpectations(t) } + func TestTx(t *testing.T) { testHash := []byte("test") testTx := []byte("tx") @@ -178,8 +174,7 @@ func TestTx(t *testing.T) { }, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -209,6 +204,7 @@ func TestTx(t *testing.T) { stateStoreMock.AssertExpectations(t) blockStoreMock.AssertExpectations(t) } + func TestConsensusParams(t *testing.T) { testHeight := int64(1) testMaxGas := int64(55) @@ -226,8 +222,7 @@ func TestConsensusParams(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -277,8 +272,7 @@ func TestBlockResults(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -325,8 +319,7 @@ func TestCommit(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -379,8 +372,7 @@ func TestBlockByHash(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -432,8 +424,7 @@ func TestBlockchain(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -485,8 +476,7 @@ func TestValidators(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -546,8 +536,7 @@ func TestBlockSearch(t *testing.T) { mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). Return([]int64{testHeight}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} diff --git a/internal/indexer/indexer_utils.go b/internal/indexer/indexer_utils.go new file mode 100644 index 00000000000..c6caaee6f85 --- /dev/null +++ b/internal/indexer/indexer_utils.go @@ -0,0 +1,119 @@ +package indexer + +import ( + "fmt" + "math/big" + + "github.com/cometbft/cometbft/state/indexer" +) + +// If the actual event value is a float, we get the condition and parse it as a float +// to compare against +func compareFloat(op1 *big.Float, op2 interface{}) (int, bool, error) { + switch opVal := op2.(type) { + case *big.Int: + vF := new(big.Float) + vF.SetInt(opVal) + cmp := op1.Cmp(vF) + return cmp, false, nil + + case *big.Float: + return op1.Cmp(opVal), true, nil + default: + return -1, false, fmt.Errorf("unable to parse arguments, bad type: %T", op2) + } +} + +// If the event value we compare against the condition (op2) is an integer +// we convert the int to float with a precision equal to the number of bits +// needed to represent the integer to avoid rounding issues with floats +// where 100 would equal to 100.2 because 100.2 is rounded to 100, while 100.7 +// would be rounded to 101. +func compareInt(op1 *big.Int, op2 interface{}) (int, bool, error) { + + switch opVal := op2.(type) { + case *big.Int: + return op1.Cmp(opVal), false, nil + case *big.Float: + vF := new(big.Float) + vF.SetInt(op1) + return vF.Cmp(opVal), true, nil + default: + return -1, false, fmt.Errorf("unable to parse arguments, unexpected type: %T", op2) + } +} + +func CheckBounds(ranges indexer.QueryRange, v interface{}) (bool, error) { + // These functions fetch the lower and upper bounds of the query + // It is expected that for x > 5, the value of lowerBound is 6. + // This is achieved by adding one to the actual lower bound. + // For a query of x < 5, the value of upper bound is 4. + // This is achieved by subtracting one from the actual upper bound. + + // For integers this behavior will work. However, for floats, we cannot simply add/sub 1. + // Query :x < 5.5 ; x = 5 should match the query. If we subtracted one as for integers, + // the upperBound would be 4.5 and x would not match. Thus we do not subtract anything for + // floating point bounds. + + // We can rewrite these functions to not add/sub 1 but the function handles also time arguments. + // To be sure we are not breaking existing queries that compare time, and as we are planning to replace + // the indexer in the future, we adapt the code here to handle floats as a special case. + lowerBound := ranges.LowerBoundValue() + upperBound := ranges.UpperBoundValue() + + // *Explanation for the isFloat condition below.* + // In LowerBoundValue(), for floating points, we cannot simply add 1 due to the reasons explained in + // in the comment at the beginning. The same is true for subtracting one for UpperBoundValue(). + // That means that for integers, if the condition is >=, cmp will be either 0 or 1 + // ( cmp == -1 should always be false). + // But if the lowerBound is a float, we have not subtracted one, so returning a 0 + // is correct only if ranges.IncludeLowerBound is true. + // example int: x < 100; upperBound = 99; if x.Cmp(99) == 0 the condition holds + // example float: x < 100.0; upperBound = 100.0; if x.Cmp(100) ==0 then returning x + // would be wrong. + switch vVal := v.(type) { + case *big.Int: + if lowerBound != nil { + cmp, isFloat, err := compareInt(vVal, lowerBound) + if err != nil { + return false, err + } + if cmp == -1 || (isFloat && cmp == 0 && !ranges.IncludeLowerBound) { + return false, err + } + } + if upperBound != nil { + cmp, isFloat, err := compareInt(vVal, upperBound) + if err != nil { + return false, err + } + if cmp == 1 || (isFloat && cmp == 0 && !ranges.IncludeUpperBound) { + return false, err + } + } + + case *big.Float: + if lowerBound != nil { + cmp, isFloat, err := compareFloat(vVal, lowerBound) + if err != nil { + return false, err + } + if cmp == -1 || (cmp == 0 && isFloat && !ranges.IncludeLowerBound) { + return false, err + } + } + if upperBound != nil { + cmp, isFloat, err := compareFloat(vVal, upperBound) + if err != nil { + return false, err + } + if cmp == 1 || (cmp == 0 && isFloat && !ranges.IncludeUpperBound) { + return false, err + } + } + + default: + return false, fmt.Errorf("invalid argument type in query: %T", v) + } + return true, nil +} diff --git a/internal/test/validator.go b/internal/test/validator.go index 73733a018a9..ddc471ee8e6 100644 --- a/internal/test/validator.go +++ b/internal/test/validator.go @@ -10,7 +10,7 @@ import ( "github.com/cometbft/cometbft/types" ) -func Validator(ctx context.Context, votingPower int64) (*types.Validator, types.PrivValidator, error) { +func Validator(_ context.Context, votingPower int64) (*types.Validator, types.PrivValidator, error) { privVal := types.NewMockPV() pubKey, err := privVal.GetPubKey() if err != nil { diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index f6be50332de..92386e50023 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -12,12 +12,14 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" ) -const Version = "0.0.1" -const readBufferSize = 1024 // 1KB at a time +const ( + Version = "0.0.1" + readBufferSize = 1024 // 1KB at a time +) // Parse command-line options func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { - var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + flagSet := flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") @@ -78,10 +80,9 @@ func main() { } if err == io.EOF { os.Exit(0) - } else { - fmt.Println("logjack errored") - os.Exit(1) } + fmt.Println("logjack errored") + os.Exit(1) } _, err = group.Write(buf[:n]) if err != nil { diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index 358e37be8df..f9744f9c7b4 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -3,6 +3,7 @@ package bits import ( "encoding/binary" "fmt" + "math/bits" "regexp" "strings" "sync" @@ -31,7 +32,27 @@ func NewBitArray(bits int) *BitArray { } } -// Size returns the number of bits in the bitarray +// NewBitArrayFromFn returns a new bit array. +// It returns nil if the number of bits is zero. +// It initializes the `i`th bit to the value of `fn(i)`. +func NewBitArrayFromFn(bits int, fn func(int) bool) *BitArray { + if bits <= 0 { + return nil + } + bA := &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } + for i := 0; i < bits; i++ { + v := fn(i) + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } + } + return bA +} + +// Size returns the number of bits in the bitarray. func (bA *BitArray) Size() int { if bA == nil { return 0 @@ -247,44 +268,69 @@ func (bA *BitArray) PickRandom() (int, bool) { } bA.mtx.Lock() - trueIndices := bA.getTrueIndices() + numTrueIndices := bA.getNumTrueIndices() + if numTrueIndices == 0 { // no bits set to true + bA.mtx.Unlock() + return 0, false + } + index := bA.getNthTrueIndex(cmtrand.Intn(numTrueIndices)) bA.mtx.Unlock() - - if len(trueIndices) == 0 { // no bits set to true + if index == -1 { return 0, false } - - return trueIndices[cmtrand.Intn(len(trueIndices))], true + return index, true } -func (bA *BitArray) getTrueIndices() []int { - trueIndices := make([]int, 0, bA.Bits) - curBit := 0 +func (bA *BitArray) getNumTrueIndices() int { + count := 0 numElems := len(bA.Elems) - // set all true indices + // handle all elements except the last one for i := 0; i < numElems-1; i++ { - elem := bA.Elems[i] - if elem == 0 { - curBit += 64 - continue - } - for j := 0; j < 64; j++ { - if (elem & (uint64(1) << uint64(j))) > 0 { - trueIndices = append(trueIndices, curBit) - } - curBit++ - } + count += bits.OnesCount64(bA.Elems[i]) } // handle last element - lastElem := bA.Elems[numElems-1] - numFinalBits := bA.Bits - curBit + numFinalBits := bA.Bits - (numElems-1)*64 for i := 0; i < numFinalBits; i++ { - if (lastElem & (uint64(1) << uint64(i))) > 0 { - trueIndices = append(trueIndices, curBit) + if (bA.Elems[numElems-1] & (uint64(1) << uint64(i))) > 0 { + count++ + } + } + return count +} + +// getNthTrueIndex returns the index of the nth true bit in the bit array. +// n is 0 indexed. (e.g. for bitarray x__x, getNthTrueIndex(0) returns 0). +// If there is no such value, it returns -1. +func (bA *BitArray) getNthTrueIndex(n int) int { + numElems := len(bA.Elems) + count := 0 + + // Iterate over each element + for i := 0; i < numElems; i++ { + // Count set bits in the current element + setBits := bits.OnesCount64(bA.Elems[i]) + + // If the count of set bits in this element plus the count so far + // is greater than or equal to n, then the nth bit must be in this element + if count+setBits >= n { + // Find the index of the nth set bit within this element + for j := 0; j < 64; j++ { + if bA.Elems[i]&(1<}, @@ -409,6 +455,13 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { // Construct new BitArray and copy over. numBits := len(bits) bA2 := NewBitArray(numBits) + if bA2 == nil { + // Treat it as if we encountered the case: b == "null" + bA.Bits = 0 + bA.Elems = nil + return nil + } + for i := 0; i < numBits; i++ { if bits[i] == 'x' { bA2.SetIndex(i, true) diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 4694da9a919..0f7351f346e 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -12,25 +12,24 @@ import ( cmtrand "github.com/cometbft/cometbft/libs/rand" ) -func randBitArray(bits int) (*BitArray, []byte) { +var ( + empty16Bits = "________________" + empty64Bits = empty16Bits + empty16Bits + empty16Bits + empty16Bits + full16bits = "xxxxxxxxxxxxxxxx" + full64bits = full16bits + full16bits + full16bits + full16bits +) + +func randBitArray(bits int) *BitArray { src := cmtrand.Bytes((bits + 7) / 8) - bA := NewBitArray(bits) - for i := 0; i < len(src); i++ { - for j := 0; j < 8; j++ { - if i*8+j >= bits { - return bA, src - } - setBit := src[i]&(1< 0 - bA.SetIndex(i*8+j, setBit) - } + srcIndexToBit := func(i int) bool { + return src[i/8]&(1< 0 } - return bA, src + return NewBitArrayFromFn(bits, srcIndexToBit) } func TestAnd(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(51) + bA2 := randBitArray(31) bA3 := bA1.And(bA2) var bNil *BitArray @@ -53,9 +52,8 @@ func TestAnd(t *testing.T) { } func TestOr(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(57) + bA2 := randBitArray(31) bA3 := bA1.Or(bA2) bNil := (*BitArray)(nil) @@ -63,7 +61,7 @@ func TestOr(t *testing.T) { require.Equal(t, bA1.Or(nil), bA1) require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) - if bA3.Bits != 51 { + if bA3.Bits != 57 { t.Error("Expected max bits") } if len(bA3.Elems) != len(bA1.Elems) { @@ -75,6 +73,10 @@ func TestOr(t *testing.T) { t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) } } + if bA3.getNumTrueIndices() == 0 { + t.Error("Expected at least one true bit. " + + "This has a false positive rate that is less than 1 in 2^80 (cryptographically improbable).") + } } func TestSub(t *testing.T) { @@ -117,8 +119,6 @@ func TestSub(t *testing.T) { } func TestPickRandom(t *testing.T) { - empty16Bits := "________________" - empty64Bits := empty16Bits + empty16Bits + empty16Bits + empty16Bits testCases := []struct { bA string ok bool @@ -133,6 +133,7 @@ func TestPickRandom(t *testing.T) { {`"x` + empty64Bits + `"`, true}, {`"` + empty64Bits + `x"`, true}, {`"x` + empty64Bits + `x"`, true}, + {`"` + empty64Bits + `___x"`, true}, } for _, tc := range testCases { var bitArr *BitArray @@ -143,7 +144,89 @@ func TestPickRandom(t *testing.T) { } } -func TestBytes(t *testing.T) { +func TestGetNumTrueIndices(t *testing.T) { + type testcase struct { + Input string + ExpectedResult int + } + testCases := []testcase{ + {"x_x_x_", 3}, + {"______", 0}, + {"xxxxxx", 6}, + {"x_x_x_x_x_x_x_x_x_", 9}, + } + numOriginalTestCases := len(testCases) + for i := 0; i < numOriginalTestCases; i++ { + testCases = append(testCases, testcase{testCases[i].Input + "x", testCases[i].ExpectedResult + 1}) + testCases = append(testCases, testcase{full64bits + testCases[i].Input, testCases[i].ExpectedResult + 64}) + testCases = append(testCases, testcase{empty64Bits + testCases[i].Input, testCases[i].ExpectedResult}) + } + + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+tc.Input+`"`), &bitArr) + require.NoError(t, err) + result := bitArr.getNumTrueIndices() + require.Equal(t, tc.ExpectedResult, result, "for input %s, expected %d, got %d", tc.Input, tc.ExpectedResult, result) + result = bitArr.Not().getNumTrueIndices() + require.Equal(t, bitArr.Bits-result, bitArr.getNumTrueIndices()) + } +} + +func TestGetNthTrueIndex(t *testing.T) { + type testcase struct { + Input string + N int + ExpectedResult int + } + testCases := []testcase{ + // Basic cases + {"x_x_x_", 0, 0}, + {"x_x_x_", 1, 2}, + {"x_x_x_", 2, 4}, + {"______", 1, -1}, // No true indices + {"xxxxxx", 5, 5}, // Last true index + {"x_x_x_x_x_x_x_", 9, -1}, // Out-of-range + + // Edge cases + {"xxxxxx", 7, -1}, // Out-of-range + {"______", 0, -1}, // No true indices + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 49, 49}, // Last true index + {"____________________________________________", 1, -1}, // No true indices + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 63, 63}, // last index of first word + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 64, 64}, // first index of second word + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 100, -1}, // Out-of-range + + // Input beyond 64 bits + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 99, 99}, // Last true index + + // Input less than 64 bits + {"x_x_x_", 3, -1}, // Out-of-range + } + + numOriginalTestCases := len(testCases) + // Add 64 underscores to each test case + for i := 0; i < numOriginalTestCases; i++ { + expectedResult := testCases[i].ExpectedResult + if expectedResult != -1 { + expectedResult += 64 + } + testCases = append(testCases, testcase{empty64Bits + testCases[i].Input, testCases[i].N, expectedResult}) + } + + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+tc.Input+`"`), &bitArr) + require.NoError(t, err) + + // Get the nth true index + result := bitArr.getNthTrueIndex(tc.N) + + require.Equal(t, tc.ExpectedResult, result, "for bit array %s, input %d, expected %d, got %d", tc.Input, tc.N, tc.ExpectedResult, result) + } +} + +func TestBytes(_ *testing.T) { bA := NewBitArray(4) bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { @@ -188,9 +271,9 @@ func TestEmptyFull(t *testing.T) { } } -func TestUpdateNeverPanics(t *testing.T) { +func TestUpdateNeverPanics(_ *testing.T) { newRandBitArray := func(n int) *BitArray { - ba, _ := randBitArray(n) + ba := randBitArray(n) return ba } pairs := []struct { @@ -210,7 +293,7 @@ func TestUpdateNeverPanics(t *testing.T) { } } -func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { +func TestNewBitArrayNeverCrashesOnNegatives(_ *testing.T) { bitList := []int{-127, -128, -1 << 31} for _, bits := range bitList { _ = NewBitArray(bits) @@ -218,7 +301,6 @@ func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { } func TestJSONMarshalUnmarshal(t *testing.T) { - bA1 := NewBitArray(0) bA2 := NewBitArray(1) @@ -288,3 +370,30 @@ func TestBitArrayProtoBuf(t *testing.T) { } } } + +// Tests that UnmarshalJSON doesn't crash when no bits are passed into the JSON. +// See issue https://github.com/cometbft/cometbft/issues/2658 +func TestUnmarshalJSONDoesntCrashOnZeroBits(t *testing.T) { + type indexCorpus struct { + BitArray *BitArray `json:"ba"` + Index int `json:"i"` + } + + ic := new(indexCorpus) + blob := []byte(`{"BA":""}`) + err := json.Unmarshal(blob, ic) + require.NoError(t, err) + require.Equal(t, ic.BitArray, &BitArray{Bits: 0, Elems: nil}) +} + +func BenchmarkPickRandomBitArray(b *testing.B) { + // A random 150 bit string to use as the benchmark bit array + benchmarkBitArrayStr := "_______xx__xxx_xx__x_xx_x_x_x__x_x_x_xx__xx__xxx__xx_x_xxx_x__xx____x____xx__xx____x_x__x_____xx_xx_xxxxxxx__xx_x_xxxx_x___x_xxxxx_xx__xxxx_xx_x___x_x" + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+benchmarkBitArrayStr+`"`), &bitArr) + require.NoError(b, err) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = bitArr.PickRandom() + } +} diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index 95b4cc35fca..621016f8513 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -58,8 +58,8 @@ func (bz HexBytes) String() string { func (bz HexBytes) Format(s fmt.State, verb rune) { switch verb { case 'p': - s.Write([]byte(fmt.Sprintf("%p", bz))) + s.Write([]byte(fmt.Sprintf("%p", bz))) //nolint: errcheck default: - s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) //nolint: errcheck } } diff --git a/libs/cli/setup.go b/libs/cli/setup.go index 521695bdbd4..9154fa9860c 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -125,7 +125,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } // Bind all flags and read the config into viper -func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { +func bindFlagsLoadViper(cmd *cobra.Command, _ []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err @@ -138,17 +138,15 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config // If a config file is found, read it in. - if err := viper.ReadInConfig(); err == nil { - // stderr, so if we redirect output to json file, this doesn't appear - // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) - } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + err := viper.ReadInConfig() + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { // ignore not found error, return other errors return err } return nil } -func validateOutput(cmd *cobra.Command, args []string) error { +func validateOutput(_ *cobra.Command, _ []string) error { // validate output format output := viper.GetString(OutputFlag) switch output { diff --git a/libs/clist/clist.go b/libs/clist/clist.go index b18306490f9..5eb48f00a12 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -223,7 +223,7 @@ type CList struct { waitCh chan struct{} head *CElement // first element tail *CElement // last element - len int // list length + curLen int // list length maxLen int // max list length } @@ -234,7 +234,7 @@ func (l *CList) Init() *CList { l.waitCh = make(chan struct{}) l.head = nil l.tail = nil - l.len = 0 + l.curLen = 0 l.mtx.Unlock() return l } @@ -252,9 +252,9 @@ func newWithMax(maxLength int) *CList { func (l *CList) Len() int { l.mtx.RLock() - len := l.len + curLen := l.curLen l.mtx.RUnlock() - return len + return curLen } func (l *CList) Front() *CElement { @@ -329,14 +329,14 @@ func (l *CList) PushBack(v interface{}) *CElement { } // Release waiters on FrontWait/BackWait maybe - if l.len == 0 { + if l.curLen == 0 { l.wg.Done() close(l.waitCh) } - if l.len >= l.maxLen { + if l.curLen >= l.maxLen { panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen)) } - l.len++ + l.curLen++ // Modify the tail if l.tail == nil { @@ -373,13 +373,13 @@ func (l *CList) Remove(e *CElement) interface{} { } // If we're removing the only item, make CList FrontWait/BackWait wait. - if l.len == 1 { + if l.curLen == 1 { l.wg = waitGroup1() // WaitGroups are difficult to re-use. l.waitCh = make(chan struct{}) } // Update l.len - l.len-- + l.curLen-- // Connect next/prev and set head/tail if prev == nil { diff --git a/libs/json/encoder.go b/libs/json/encoder.go index 11990e2af6c..67112384fdb 100644 --- a/libs/json/encoder.go +++ b/libs/json/encoder.go @@ -42,7 +42,7 @@ func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { return buf.Bytes(), nil } -func encode(w io.Writer, v interface{}) error { +func encode(w *bytes.Buffer, v any) error { // Bare nil values can't be reflected, so we must handle them here. if v == nil { return writeStr(w, "null") @@ -60,7 +60,7 @@ func encode(w io.Writer, v interface{}) error { return encodeReflect(w, rv) } -func encodeReflect(w io.Writer, rv reflect.Value) error { +func encodeReflect(w *bytes.Buffer, rv reflect.Value) error { if !rv.IsValid() { return errors.New("invalid reflect value") } @@ -115,7 +115,7 @@ func encodeReflect(w io.Writer, rv reflect.Value) error { } } -func encodeReflectList(w io.Writer, rv reflect.Value) error { +func encodeReflectList(w *bytes.Buffer, rv reflect.Value) error { // Emit nil slices as null. if rv.Kind() == reflect.Slice && rv.IsNil() { return writeStr(w, "null") @@ -150,7 +150,7 @@ func encodeReflectList(w io.Writer, rv reflect.Value) error { return writeStr(w, "]") } -func encodeReflectMap(w io.Writer, rv reflect.Value) error { +func encodeReflectMap(w *bytes.Buffer, rv reflect.Value) error { if rv.Type().Key().Kind() != reflect.String { return errors.New("map key must be string") } @@ -181,7 +181,7 @@ func encodeReflectMap(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeReflectStruct(w io.Writer, rv reflect.Value) error { +func encodeReflectStruct(w *bytes.Buffer, rv reflect.Value) error { sInfo := makeStructInfo(rv.Type()) if err := writeStr(w, "{"); err != nil { return err @@ -212,7 +212,7 @@ func encodeReflectStruct(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeReflectInterface(w io.Writer, rv reflect.Value) error { +func encodeReflectInterface(w *bytes.Buffer, rv reflect.Value) error { // Get concrete value and dereference pointers. for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { if rv.IsNil() { @@ -237,14 +237,17 @@ func encodeReflectInterface(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeStdlib(w io.Writer, v interface{}) error { - // Doesn't stream the output because that adds a newline, as per: - // https://golang.org/pkg/encoding/json/#Encoder.Encode - blob, err := json.Marshal(v) +func encodeStdlib(w *bytes.Buffer, v any) error { + // Stream the output of the JSON marshaling directly into the buffer. + // The stdlib encoder will write a newline, so we must truncate it, + // which is why we pass in a bytes.Buffer throughout, not io.Writer. + enc := json.NewEncoder(w) + err := enc.Encode(v) if err != nil { return err } - _, err = w.Write(blob) + // Remove the last byte from the buffer + w.Truncate(w.Len() - 1) return err } diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go index e6eb18a1225..8cf536b26be 100644 --- a/libs/json/encoder_test.go +++ b/libs/json/encoder_test.go @@ -102,3 +102,20 @@ func TestMarshal(t *testing.T) { }) } } + +func BenchmarkJsonMarshalStruct(b *testing.B) { + s := "string" + sPtr := &s + i64 := int64(64) + ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) + car := &Car{Wheels: 4} + boat := Boat{Sail: true} + for i := 0; i < b.N; i++ { + _, _ = json.Marshal(Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, + Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, + Child: &Struct{Bool: false, String: "child"}, private: "private", + }) + } +} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index 46d5b471b45..1776c9723cb 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -33,8 +33,10 @@ type Boat struct { func (b Boat) Drive() error { return nil } // These are public and private encryption keys. -type PublicKey [8]byte -type PrivateKey [8]byte +type ( + PublicKey [8]byte + PrivateKey [8]byte +) // Custom has custom marshalers and unmarshalers, taking pointer receivers. type CustomPtr struct { @@ -45,7 +47,7 @@ func (c *CustomPtr) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } -func (c *CustomPtr) UnmarshalJSON(bz []byte) error { +func (c *CustomPtr) UnmarshalJSON(_ []byte) error { c.Value = "custom" return nil } @@ -60,7 +62,7 @@ func (c CustomValue) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } -func (c CustomValue) UnmarshalJSON(bz []byte) error { +func (c CustomValue) UnmarshalJSON(_ []byte) error { return nil } diff --git a/libs/os/os.go b/libs/os/os.go index 334eaf4c896..cfab38f48c2 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -41,7 +41,7 @@ func Kill() error { } func Exit(s string) { - fmt.Printf(s + "\n") + fmt.Println(s) os.Exit(1) } diff --git a/libs/protoio/io.go b/libs/protoio/io.go index 6244afd97b6..7de9dad5685 100644 --- a/libs/protoio/io.go +++ b/libs/protoio/io.go @@ -67,9 +67,8 @@ func getSize(v interface{}) (int, bool) { ProtoSize() (n int) }); ok { return sz.ProtoSize(), true - } else { - return 0, false } + return 0, false } // byteReader wraps an io.Reader and implements io.ByteReader, required by @@ -97,3 +96,7 @@ func (r *byteReader) ReadByte() (byte, error) { } return r.buf[0], nil } + +func (r *byteReader) resetBytesRead() { + r.bytesRead = 0 +} diff --git a/libs/protoio/io_test.go b/libs/protoio/io_test.go index c6d3c10654f..b95c187df0f 100644 --- a/libs/protoio/io_test.go +++ b/libs/protoio/io_test.go @@ -97,10 +97,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { if i != size { panic("not enough messages read") } - if err := reader.Close(); err != nil { - return err - } - return nil + return reader.Close() } type buffer struct { diff --git a/libs/protoio/reader.go b/libs/protoio/reader.go index 95b8d345585..054a114df8b 100644 --- a/libs/protoio/reader.go +++ b/libs/protoio/reader.go @@ -49,24 +49,25 @@ func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { if c, ok := r.(io.Closer); ok { closer = c } - return &varintReader{r, nil, maxSize, closer} + return &varintReader{r, newByteReader(r), nil, maxSize, closer} } type varintReader struct { - r io.Reader - buf []byte - maxSize int - closer io.Closer -} - -func (r *varintReader) ReadMsg(msg proto.Message) (int, error) { + r io.Reader // ReadUvarint needs an io.ByteReader, and we also need to keep track of the // number of bytes read, so we use our own byteReader. This can't be // buffered, so the caller should pass a buffered io.Reader to avoid poor // performance. - byteReader := newByteReader(r.r) - l, err := binary.ReadUvarint(byteReader) - n := byteReader.bytesRead + byteReader *byteReader + buf []byte + maxSize int + closer io.Closer +} + +func (r *varintReader) ReadMsg(msg proto.Message) (int, error) { + r.byteReader.resetBytesRead() + l, err := binary.ReadUvarint(r.byteReader) + n := r.byteReader.bytesRead if err != nil { return n, err } diff --git a/libs/protoio/writer.go b/libs/protoio/writer.go index 0eb65850cfd..d1f6f03d1bc 100644 --- a/libs/protoio/writer.go +++ b/libs/protoio/writer.go @@ -42,7 +42,7 @@ import ( // equivalent to the gogoproto NewDelimitedWriter, except WriteMsg() also returns the // number of bytes written, which is necessary in the p2p package. func NewDelimitedWriter(w io.Writer) WriteCloser { - return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil} + return &varintWriter{w, nil, nil} } type varintWriter struct { @@ -69,6 +69,9 @@ func (w *varintWriter) WriteMsg(msg proto.Message) (int, error) { } // fallback + if w.lenBuf == nil { + w.lenBuf = make([]byte, binary.MaxVarintLen64) + } data, err := proto.Marshal(msg) if err != nil { return 0, err diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index e1db675adc1..b7cc180a72c 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -10,8 +10,8 @@ package query import ( "fmt" + "math/big" "regexp" - "strconv" "strings" "time" @@ -218,13 +218,23 @@ func compileCondition(cond syntax.Condition) (condition, error) { return out, nil } -// TODO(creachadair): The existing implementation allows anything number shaped -// to be treated as a number. This preserves the parts of that behavior we had -// tests for, but we should probably get rid of that. +// We use this regex to support queries of the form "8atom", "6.5stake", +// which are actively used in production. +// The regex takes care of removing the non-number suffix. var extractNum = regexp.MustCompile(`^\d+(\.\d+)?`) -func parseNumber(s string) (float64, error) { - return strconv.ParseFloat(extractNum.FindString(s), 64) +func parseNumber(s string) (*big.Float, error) { + intVal := new(big.Int) + if _, ok := intVal.SetString(s, 10); !ok { + f, _, err := big.ParseFloat(extractNum.FindString(s), 10, 125, big.ToNearestEven) + if err != nil { + return nil, err + } + return f, err + } + f, _, err := big.ParseFloat(extractNum.FindString(s), 10, uint(intVal.BitLen()), big.ToNearestEven) + return f, err + } // A map of operator ⇒ argtype ⇒ match-constructor. @@ -248,7 +258,7 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) syntax.TNumber: func(v interface{}) func(string) bool { return func(s string) bool { w, err := parseNumber(s) - return err == nil && w == v.(float64) + return err == nil && w.Cmp(v.(*big.Float)) == 0 } }, syntax.TDate: func(v interface{}) func(string) bool { @@ -268,7 +278,7 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) syntax.TNumber: func(v interface{}) func(string) bool { return func(s string) bool { w, err := parseNumber(s) - return err == nil && w < v.(float64) + return err == nil && w.Cmp(v.(*big.Float)) < 0 } }, syntax.TDate: func(v interface{}) func(string) bool { @@ -288,7 +298,7 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) syntax.TNumber: func(v interface{}) func(string) bool { return func(s string) bool { w, err := parseNumber(s) - return err == nil && w <= v.(float64) + return err == nil && w.Cmp(v.(*big.Float)) <= 0 } }, syntax.TDate: func(v interface{}) func(string) bool { @@ -308,7 +318,7 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) syntax.TNumber: func(v interface{}) func(string) bool { return func(s string) bool { w, err := parseNumber(s) - return err == nil && w > v.(float64) + return err == nil && w.Cmp(v.(*big.Float)) > 0 } }, syntax.TDate: func(v interface{}) func(string) bool { @@ -328,7 +338,7 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) syntax.TNumber: func(v interface{}) func(string) bool { return func(s string) bool { w, err := parseNumber(s) - return err == nil && w >= v.(float64) + return err == nil && w.Cmp(v.(*big.Float)) >= 0 } }, syntax.TDate: func(v interface{}) func(string) bool { diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index 68b270895ce..2c8fcc557ed 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -173,6 +173,84 @@ var apiTypeEvents = []types.Event{ }, } +func TestBigNumbers(t *testing.T) { + + apiBigNumTest := map[string][]string{ + "big.value": { + "99999999999999999999", + }, + "big2.value": { + "18446744073709551615", // max(uint64) == 18446744073709551615 + }, + "big.floatvalue": { + "99999999999999999999.10", + }, + "big2.floatvalue": { + "18446744073709551615.6", // max(uint64) == 18446744073709551615 + }, + } + + testCases := []struct { + s string + events map[string][]string + matches bool + }{ + + // Test cases for values that exceed the capacity if int64/float64. + {`big.value >= 99999999999999999999`, + apiBigNumTest, + true}, + {`big.value > 99999999999999999998`, + apiBigNumTest, + true}, + {`big2.value <= 18446744073709551615`, + apiBigNumTest, true}, + {`big.floatvalue >= 99999999999999999999`, + apiBigNumTest, + true}, + {`big.floatvalue > 99999999999999999998.10`, + apiBigNumTest, + true}, + {`big.floatvalue > 99999999999999999998`, + apiBigNumTest, + true}, + {`big2.floatvalue <= 18446744073709551615.6`, + apiBigNumTest, + true}, + {`big2.floatvalue <= 18446744073709551615.6`, + apiBigNumTest, + true}, + {`big2.floatvalue >= 18446744073709551615`, + apiBigNumTest, + true}, + {`big2.floatvalue >= 12.5`, + apiBigNumTest, + true}, + {`big.value >= 10`, + apiBigNumTest, + true}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%02d", i+1), func(t *testing.T) { + c, err := query.New(tc.s) + if err != nil { + t.Fatalf("NewCompiled %#q: unexpected error: %v", tc.s, err) + } + + got, err := c.Matches(tc.events) + if err != nil { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got error %v", + tc.s, tc.events, err) + } + if got != tc.matches { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got %v, want %v", + tc.s, tc.events, got, tc.matches) + } + }) + } +} + func TestCompiledMatches(t *testing.T) { var ( txDate = "2017-01-01" diff --git a/libs/pubsub/query/syntax/doc.go b/libs/pubsub/query/syntax/doc.go index b9fb1afede2..e60423abfcc 100644 --- a/libs/pubsub/query/syntax/doc.go +++ b/libs/pubsub/query/syntax/doc.go @@ -17,7 +17,7 @@ // The lexical terms are defined here using RE2 regular expression notation: // // // The name of an event attribute (type.value) -// tag = #'\w+(\.\w+)*' +// tag = #`^[\w]+[\.-\w]?$` // // // A datestamp (YYYY-MM-DD) // date = #'DATE \d{4}-\d{2}-\d{2}' diff --git a/libs/pubsub/query/syntax/parser.go b/libs/pubsub/query/syntax/parser.go index a100ec79c73..26c8554908a 100644 --- a/libs/pubsub/query/syntax/parser.go +++ b/libs/pubsub/query/syntax/parser.go @@ -3,8 +3,7 @@ package syntax import ( "fmt" "io" - "math" - "strconv" + "math/big" "strings" "time" ) @@ -68,17 +67,35 @@ func (a *Arg) String() string { } } -// Number returns the value of the argument text as a number, or a NaN if the +// Number returns the value of the argument text as a number, or nil if the // text does not encode a valid number value. -func (a *Arg) Number() float64 { +func (a *Arg) Number() *big.Float { if a == nil { - return -1 + return nil } - v, err := strconv.ParseFloat(a.text, 64) - if err == nil && v >= 0 { - return v + intVal := new(big.Int) + if _, ok := intVal.SetString(a.text, 10); !ok { + f, _, err := big.ParseFloat(a.text, 10, 125, big.ToNearestEven) + if err != nil { + return nil + } + return f + } + // If it is indeed a big integer, we make sure to convert it to a float with enough precision + // to represent all the bits + bitLen := uint(intVal.BitLen()) + var f *big.Float + var err error + if bitLen <= 64 { + f, _, err = big.ParseFloat(a.text, 10, 0, big.ToNearestEven) + } else { + f, _, err = big.ParseFloat(a.text, 10, bitLen, big.ToNearestEven) + } + if err != nil { + return nil } - return math.NaN() + return f + } // Time returns the value of the argument text as a time, or the zero value if diff --git a/libs/pubsub/query/syntax/scanner.go b/libs/pubsub/query/syntax/scanner.go index 332e3f7b145..15da354c273 100644 --- a/libs/pubsub/query/syntax/scanner.go +++ b/libs/pubsub/query/syntax/scanner.go @@ -99,7 +99,7 @@ func (s *Scanner) Next() error { } if '0' <= ch && ch <= '9' { return s.scanNumber(ch) - } else if isTagRune(ch) { + } else if isFirstTagRune(ch) { return s.scanTagLike(ch) } switch ch { @@ -302,7 +302,11 @@ func (s *Scanner) invalid(ch rune) error { func isDigit(r rune) bool { return '0' <= r && r <= '9' } func isTagRune(r rune) bool { - return r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) + return r == '.' || r == '_' || r == '-' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +func isFirstTagRune(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) } func isTimeRune(r rune) bool { diff --git a/libs/pubsub/query/syntax/syntax_test.go b/libs/pubsub/query/syntax/syntax_test.go index 29a85aa9ec6..a097500ff7a 100644 --- a/libs/pubsub/query/syntax/syntax_test.go +++ b/libs/pubsub/query/syntax/syntax_test.go @@ -25,6 +25,8 @@ func TestScanner(t *testing.T) { // Tags {`foo foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + {`foo foo-foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + {`foo foo._bar_bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, // Strings (values) {` '' x 'x' 'x y'`, []syntax.Token{syntax.TString, syntax.TTag, syntax.TString, syntax.TString}}, @@ -167,6 +169,8 @@ func TestParseValid(t *testing.T) { {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + + {"cosm-wasm.transfer_amount=100", true}, } for _, test := range tests { diff --git a/libs/rand/random.go b/libs/rand/random.go index 73f56b76224..053e03e15e8 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -164,13 +164,12 @@ MAIN_LOOP: if v >= 62 { // only 62 characters in strChars val >>= 6 continue - } else { - chars = append(chars, strChars[v]) - if len(chars) == length { - break MAIN_LOOP - } - val >>= 6 } + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 } } diff --git a/libs/rand/random_test.go b/libs/rand/random_test.go index 10bb601b5e7..ec4aa327185 100644 --- a/libs/rand/random_test.go +++ b/libs/rand/random_test.go @@ -68,7 +68,7 @@ func testThemAll() string { return out.String() } -func TestRngConcurrencySafety(t *testing.T) { +func TestRngConcurrencySafety(_ *testing.T) { var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) diff --git a/libs/strings/string.go b/libs/strings/string.go index 37026dcc208..f012d761b0e 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -59,9 +59,7 @@ func IsASCIIText(s string) bool { return false } for _, b := range []byte(s) { - if 32 <= b && b <= 126 { - // good - } else { + if b < 32 || b > 126 { return false } } diff --git a/light/client.go b/light/client.go index d155c993f1e..b9153b4d5fe 100644 --- a/light/client.go +++ b/light/client.go @@ -178,8 +178,8 @@ func NewClient( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { if err := trustOptions.ValidateBasic(); err != nil { return nil, fmt.Errorf("invalid TrustOptions: %w", err) } @@ -215,8 +215,8 @@ func NewClientFromTrustedStore( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { c := &Client{ chainID: chainID, trustingPeriod: trustingPeriod, @@ -384,7 +384,7 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp } // 3) Cross-verify with witnesses to ensure everybody has the same state. - if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { + if err := c.compareFirstLightBlockWithWitnesses(ctx, l); err != nil { return err } @@ -506,7 +506,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // headers are not adjacent, verifySkipping is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. // Intermediate headers are not saved to database. -// https://github.com/cometbft/cometbft/blob/main/spec/consensus/light-client.md +// https://github.com/cometbft/cometbft/blob/v0.38.x/spec/consensus/light-client.md // // If the header, which is older than the currently trusted header, is // requested and the light client does not have it, VerifyHeader will perform: @@ -614,8 +614,8 @@ func (c *Client) verifySequential( ctx context.Context, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) error { - + now time.Time, +) error { var ( verifiedBlock = trustedBlock interimBlock *types.LightBlock @@ -708,8 +708,8 @@ func (c *Client) verifySkipping( source provider.Provider, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) ([]*types.LightBlock, error) { - + now time.Time, +) ([]*types.LightBlock, error) { var ( blockCache = []*types.LightBlock{newLightBlock} depth = 0 @@ -778,8 +778,8 @@ func (c *Client) verifySkippingAgainstPrimary( ctx context.Context, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) error { - + now time.Time, +) error { trace, err := c.verifySkipping(ctx, c.primary, trustedBlock, newLightBlock, now) switch errors.Unwrap(err).(type) { @@ -933,8 +933,8 @@ func (c *Client) updateTrustedLightBlock(l *types.LightBlock) error { func (c *Client) backwards( ctx context.Context, trustedHeader *types.Header, - newHeader *types.Header) error { - + newHeader *types.Header, +) error { var ( verifiedHeader = trustedHeader interimHeader *types.Header @@ -1126,9 +1126,9 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) return nil, lastError } -// compareFirstHeaderWithWitnesses compares h with all witnesses. If any +// compareFirstLightBlockWithWitnesses compares light block l with all witnesses. If any // witness reports a different header than h, the function returns an error. -func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.SignedHeader) error { +func (c *Client) compareFirstLightBlockWithWitnesses(ctx context.Context, l *types.LightBlock) error { compareCtx, cancel := context.WithCancel(ctx) defer cancel() @@ -1141,7 +1141,7 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S errc := make(chan error, len(c.witnesses)) for i, witness := range c.witnesses { - go c.compareNewHeaderWithWitness(compareCtx, errc, h, witness, i) + go c.compareNewLightBlockWithWitness(compareCtx, errc, l, witness, i) } witnessesToRemove := make([]int, 0, len(c.witnesses)) @@ -1153,23 +1153,29 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S switch e := err.(type) { case nil: continue - case errConflictingHeaders: - c.logger.Error(fmt.Sprintf(`Witness #%d has a different header. Please check primary is correct -and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex]) + case ErrConflictingHeaders: + c.logger.Error("Witness reports a conflicting header. "+ + "Please check if the primary is correct or use a different witness.", + "witness", c.witnesses[e.WitnessIndex], "err", err) return err case errBadWitness: // If witness sent us an invalid header, then remove it - c.logger.Info("witness sent an invalid light block, removing...", + c.logger.Info("Witness sent an invalid light block, removing...", "witness", c.witnesses[e.WitnessIndex], "err", err) witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + case ErrProposerPrioritiesDiverge: + c.logger.Error("Witness reports conflicting proposer priorities. "+ + "Please check if the primary is correct or use a different witness.", + "witness", c.witnesses[e.WitnessIndex], "err", err) + return err default: // benign errors can be ignored with the exception of context errors if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return err } // the witness either didn't respond or didn't have the block. We ignore it. - c.logger.Info("error comparing first header with witness. You may want to consider removing the witness", + c.logger.Info("Error comparing first header with witness. You may want to consider removing the witness", "err", err) } @@ -1177,7 +1183,7 @@ and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "wit // remove witnesses that have misbehaved if err := c.removeWitnesses(witnessesToRemove); err != nil { - c.logger.Error("failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) + c.logger.Error("Failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) } return nil diff --git a/light/client_test.go b/light/client_test.go index 6e975212d62..24c0dbe8695 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -32,11 +32,13 @@ var ( bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - // 3/3 signed - h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + // 3/3 signed. + vals2 = vals.CopyIncrementProposerPriority(1) + h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) - // 3/3 signed - h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, + // 3/3 signed. + vals3 = vals2.CopyIncrementProposerPriority(1) + h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals3, vals3, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour trustOptions = light.TrustOptions{ @@ -46,9 +48,9 @@ var ( } valSet = map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, - 3: vals, - 4: vals, + 2: vals2, + 3: vals3, + 4: vals.CopyIncrementProposerPriority(1), } headerSet = map[int64]*types.SignedHeader{ 1: h1, @@ -58,7 +60,7 @@ var ( 3: h3, } l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} - l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals2} fullNode = mockp.New( chainID, headerSet, @@ -913,13 +915,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { chainID, map[int64]*types.SignedHeader{ 1: h1, - 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), }, map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, + 2: vals2, }, ) // header is empty @@ -931,7 +933,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { }, map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, + 2: vals2, }, ) @@ -1155,3 +1157,56 @@ func TestClientHandlesContexts(t *testing.T) { require.Error(t, err) require.True(t, errors.Is(err, context.Canceled)) } + +// TestClientErrorsDifferentProposerPriorities tests the case where the witness +// sends us a light block with a validator set with different proposer priorities. +func TestClientErrorsDifferentProposerPriorities(t *testing.T) { + primary := mockp.New( + chainID, + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals2, + }, + ) + witness := mockp.New( + chainID, + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + }, + ) + + // Proposer priorities in vals and vals2 are different. + // This is because vals2 = vals.CopyIncrementProposerPriority(1) + require.Equal(t, vals.Hash(), vals2.Hash()) + require.NotEqual(t, vals.ProposerPriorityHash(), vals2.ProposerPriorityHash()) + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + fullNode, + []provider.Provider{primary, witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + // witness should have behaved properly -> no error + require.NoError(t, err) + assert.EqualValues(t, 2, len(c.Witnesses())) + + // witness behaves incorrectly, but we can't prove who's guilty -> error + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) + require.Error(t, err) + + // witness left in the list + assert.EqualValues(t, 2, len(c.Witnesses())) +} diff --git a/light/detector.go b/light/detector.go index 228dec61a6c..5b742761eba 100644 --- a/light/detector.go +++ b/light/detector.go @@ -31,7 +31,8 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig } var ( headerMatched bool - lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader + lastVerifiedBlock = primaryTrace[len(primaryTrace)-1] + lastVerifiedHeader = lastVerifiedBlock.SignedHeader witnessesToRemove = make([]int, 0) ) c.logger.Debug("Running detector against trace", "finalizeBlockHeight", lastVerifiedHeader.Height, @@ -48,7 +49,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig // and compare it with the header from the primary errc := make(chan error, len(c.witnesses)) for i, witness := range c.witnesses { - go c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i) + go c.compareNewLightBlockWithWitness(ctx, errc, lastVerifiedBlock, witness, i) } // handle errors from the header comparisons as they come in @@ -58,7 +59,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig switch e := err.(type) { case nil: // at least one header matched headerMatched = true - case errConflictingHeaders: + case ErrConflictingHeaders: // We have conflicting headers. This could possibly imply an attack on the light client. // First we need to verify the witness's header using the same skipping verification and then we // need to find the point that the headers diverge and examine this for any evidence of an attack. @@ -79,6 +80,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig c.logger.Info("witness returned an error during header comparison, removing...", "witness", c.witnesses[e.WitnessIndex], "err", err) witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + case ErrProposerPrioritiesDiverge: + c.logger.Info("witness reported validator set with different proposer priorities", + "witness", c.witnesses[e.WitnessIndex], "err", err) + return e default: // Benign errors which can be ignored unless there was a context // canceled @@ -104,17 +109,19 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig return ErrFailedHeaderCrossReferencing } -// compareNewHeaderWithWitness takes the verified header from the primary and compares it with a +// compareNewLightBlockWithWitness takes the verified header from the primary and compares it with a // header from a specified witness. The function can return one of three errors: // -// 1: errConflictingHeaders -> there may have been an attack on this light client +// 1: ErrConflictingHeaders -> there may have been an attack on this light client // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one // // Note: In the case of an invalid header we remove the witness // // 3: nil -> the hashes of the two headers match -func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, - witness provider.Provider, witnessIndex int) { +func (c *Client) compareNewLightBlockWithWitness(ctx context.Context, errc chan error, l *types.LightBlock, + witness provider.Provider, witnessIndex int, +) { + h := l.SignedHeader lightBlock, err := witness.LightBlock(ctx, h.Height) switch err { @@ -150,7 +157,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro // witness' last header is below the primary's header. We check the times to see if the blocks // have conflicting times if !lightBlock.Time.Before(h.Time) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} return } @@ -175,7 +182,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro // the witness still doesn't have a block at the height of the primary. // Check if there is a conflicting time if !lightBlock.Time.Before(h.Time) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} return } @@ -197,7 +204,13 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro } if !bytes.Equal(h.Hash(), lightBlock.Hash()) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + } + + // ProposerPriorityHash is not part of the header hash, so we need to check it separately. + wanted, got := l.ValidatorSet.ProposerPriorityHash(), lightBlock.ValidatorSet.ProposerPriorityHash() + if !bytes.Equal(wanted, got) { + errc <- ErrProposerPrioritiesDiverge{WitnessHash: got, WitnessIndex: witnessIndex, PrimaryHash: wanted} } c.logger.Debug("Matching header received by witness", "height", h.Height, "witness", witnessIndex) @@ -245,7 +258,7 @@ func (c *Client) handleConflictingHeaders( if primaryBlock.Commit.Round != witnessTrace[len(witnessTrace)-1].Commit.Round { c.logger.Info("The light client has detected, and prevented, an attempted amnesia attack." + " We think this attack is pretty unlikely, so if you see it, that's interesting to us." + - " Can you let us know by opening an issue through https://github.com/tendermint/tendermint/issues/new?") + " Can you let us know by opening an issue through https://github.com/cometbft/cometbft/issues/new?") } // This may not be valid because the witness itself is at fault. So now we reverse it, examining the diff --git a/light/doc.go b/light/doc.go index 3cc2741bf05..9b1ba62d165 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/README.md +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,10 +118,10 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://docs.cometbft.com/main/core/light-client.html +https://docs.cometbft.com/v0.38.x/core/light-client.html for usage example. Or see -https://github.com/cometbft/cometbft/tree/main/spec/consensus/light-client +https://github.com/cometbft/cometbft/tree/v0.38.x/spec/consensus/light-client for the full spec */ package light diff --git a/light/errors.go b/light/errors.go index bc6357def66..066f709c654 100644 --- a/light/errors.go +++ b/light/errors.go @@ -75,20 +75,36 @@ var ErrLightClientAttack = errors.New(`attempted attack detected. // continue running the light client. var ErrNoWitnesses = errors.New("no witnesses connected. please reset light client") -// ----------------------------- INTERNAL ERRORS --------------------------------- - // ErrConflictingHeaders is thrown when two conflicting headers are discovered. -type errConflictingHeaders struct { +type ErrConflictingHeaders struct { Block *types.LightBlock WitnessIndex int } -func (e errConflictingHeaders) Error() string { +func (e ErrConflictingHeaders) Error() string { return fmt.Sprintf( "header hash (%X) from witness (%d) does not match primary", e.Block.Hash(), e.WitnessIndex) } +// ErrProposerPrioritiesDiverge is thrown when two conflicting headers are +// discovered, but the error is non-attributable comparing to ErrConflictingHeaders. +// The difference is in validator set proposer priorities, which may change +// with every round of consensus. +type ErrProposerPrioritiesDiverge struct { + WitnessHash []byte + WitnessIndex int + PrimaryHash []byte +} + +func (e ErrProposerPrioritiesDiverge) Error() string { + return fmt.Sprintf( + "validator set's proposer priority hashes do not match: witness[%d]=%X, primary=%X", + e.WitnessIndex, e.WitnessHash, e.PrimaryHash) +} + +// ----------------------------- INTERNAL ERRORS --------------------------------- + // errBadWitness is returned when the witness either does not respond or // responds with an invalid header. type errBadWitness struct { diff --git a/light/provider/http/http.go b/light/provider/http/http.go index 9fb01dd96c1..b73f7bd4524 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -180,6 +180,15 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe commit, err := p.client.Commit(ctx, height) switch { case err == nil: + // See https://github.com/cometbft/cometbft/issues/575 + // If the node is starting at a non-zero height, but does not yet + // have any blocks, it can return an empty signed header without + // returning an error. + if commit.SignedHeader.IsEmpty() { + // Technically this means that the provider still needs to + // catch up. + return nil, provider.ErrHeightTooHigh + } return &commit.SignedHeader, nil case regexpTooHigh.MatchString(err.Error()): diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go index 8e388107380..789cc255fd9 100644 --- a/light/provider/mock/deadmock.go +++ b/light/provider/mock/deadmock.go @@ -20,10 +20,10 @@ func (p *deadMock) ChainID() string { return p.chainID } func (p *deadMock) String() string { return "deadMock" } -func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { +func (p *deadMock) LightBlock(context.Context, int64) (*types.LightBlock, error) { return nil, provider.ErrNoResponse } -func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error { +func (p *deadMock) ReportEvidence(context.Context, types.Evidence) error { return provider.ErrNoResponse } diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 5699dfe4395..dc3792941c2 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -21,6 +21,10 @@ type LightClient struct { func (_m *LightClient) ChainID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -35,6 +39,10 @@ func (_m *LightClient) ChainID() string { func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for TrustedLightBlock") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.LightBlock, error)); ok { @@ -61,6 +69,10 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, now) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, time.Time) (*types.LightBlock, error)); ok { @@ -87,6 +99,10 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, height, now) + if len(ret) == 0 { + panic("no return value specified for VerifyLightBlockAtHeight") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) (*types.LightBlock, error)); ok { @@ -109,13 +125,12 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } -type mockConstructorTestingTNewLightClient interface { +// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLightClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient { +}) *LightClient { mock := &LightClient{} mock.Mock.Test(t) diff --git a/mempool/bench_test.go b/mempool/bench_test.go index 1ddd59209e9..1e4b6d29970 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -1,12 +1,12 @@ package mempool import ( - "encoding/binary" "sync/atomic" "testing" "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/proxy" + "github.com/stretchr/testify/require" ) func BenchmarkReap(b *testing.B) { @@ -15,19 +15,12 @@ func BenchmarkReap(b *testing.B) { mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mp.config.Size = 100000 + mp.config.Size = 100_000_000 // so that the mempool never saturates + addTxs(b, mp, 0, 10000) - size := 10000 - for i := 0; i < size; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(tx, nil, TxInfo{}); err != nil { - b.Fatal(err) - } - } b.ResetTimer() for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) + mp.ReapMaxBytesMaxGas(100_000_000, -1) } } @@ -37,19 +30,16 @@ func BenchmarkCheckTx(b *testing.B) { mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mp.config.Size = 1000000 - + mp.config.Size = 100_000_000 b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) + tx := kvstore.NewTxFromID(i) b.StartTimer() - if err := mp.CheckTx(tx, nil, TxInfo{}); err != nil { - b.Fatal(err) - } + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(b, err, i) } } @@ -59,21 +49,18 @@ func BenchmarkParallelCheckTx(b *testing.B) { mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mp.config.Size = 100000000 - + mp.config.Size = 100_000_000 var txcnt uint64 next := func() uint64 { - return atomic.AddUint64(&txcnt, 1) - 1 + return atomic.AddUint64(&txcnt, 1) } b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, next()) - if err := mp.CheckTx(tx, nil, TxInfo{}); err != nil { - b.Fatal(err) - } + tx := kvstore.NewTxFromID(int(next())) + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(b, err, tx) } }) } @@ -84,17 +71,79 @@ func BenchmarkCheckDuplicateTx(b *testing.B) { mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mp.config.Size = 1000000 + mp.config.Size = 2 + tx := kvstore.NewTxFromID(1) + if err := mp.CheckTx(tx, nil, TxInfo{}); err != nil { + b.Fatal(err) + } + err := mp.FlushAppConn() + require.NoError(b, err) + + b.ResetTimer() for i := 0; i < b.N; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(tx, nil, TxInfo{}); err != nil { - b.Fatal(err) - } + err := mp.CheckTx(tx, nil, TxInfo{}) + require.ErrorAs(b, err, &ErrTxInCache, "tx should be duplicate") + } +} - if err := mp.CheckTx(tx, nil, TxInfo{}); err == nil { - b.Fatal("tx should be duplicate") - } +func BenchmarkUpdate(b *testing.B) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + numTxs := 1000 + b.ResetTimer() + for i := 1; i <= b.N; i++ { + b.StopTimer() + txs := addTxs(b, mp, i*numTxs, numTxs) + require.Equal(b, len(txs), mp.Size(), len(txs)) + b.StartTimer() + + doUpdate(b, mp, int64(i), txs) + require.Zero(b, mp.Size()) + } +} + +func BenchmarkUpdateAndRecheck(b *testing.B) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + numTxs := 1000 + b.ResetTimer() + for i := 1; i <= b.N; i++ { + b.StopTimer() + mp.Flush() + txs := addTxs(b, mp, i*numTxs, numTxs) + require.Equal(b, len(txs), mp.Size(), len(txs)) + b.StartTimer() + + // Update a part of txs and recheck the rest. + doUpdate(b, mp, int64(i), txs[:numTxs/2]) } + +} + +func BenchmarkUpdateRemoteClient(b *testing.B) { + mp, cleanup := newMempoolWithAsyncConnection(b) + defer cleanup() + + b.ResetTimer() + for i := 1; i <= b.N; i++ { + b.StopTimer() + tx := kvstore.NewTxFromID(i) + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(b, err) + err = mp.FlushAppConn() + require.NoError(b, err) + require.Equal(b, 1, mp.Size()) + b.StartTimer() + + txs := mp.ReapMaxTxs(mp.Size()) + doUpdate(b, mp, int64(i), txs) + } + } diff --git a/mempool/cache_test.go b/mempool/cache_test.go index bb659f20924..b71d9c6122e 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -3,9 +3,10 @@ package mempool import ( "crypto/rand" "crypto/sha256" - "fmt" "testing" + "fmt" + "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/proxy" diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index e0501cebb2a..455cce3ab4e 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -3,9 +3,10 @@ package mempool import ( "bytes" "context" - "errors" + "fmt" "sync" "sync/atomic" + "time" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/config" @@ -13,7 +14,6 @@ import ( "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/proxy" "github.com/cometbft/cometbft/types" ) @@ -24,12 +24,11 @@ import ( // mempool uses a concurrent list structure for storing transactions that can // be efficiently accessed by multiple concurrent readers. type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes + height atomic.Int64 // the last block Update()'d to + txsBytes atomic.Int64 // total size of mempool, in bytes // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool + notifiedTxsAvailable atomic.Bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty config *config.MempoolConfig @@ -43,11 +42,8 @@ type CListMempool struct { txs *clist.CList // concurrent linked-list of good txs proxyAppConn proxy.AppConnMempool - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here + // Keeps track of the rechecking process. + recheck *recheck // Map for quick access to txs to record sender in CheckTx. // txsMap: txKey -> CElement @@ -74,17 +70,15 @@ func NewCListMempool( height int64, options ...CListMempoolOption, ) *CListMempool { - mp := &CListMempool{ - config: cfg, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: NopMetrics(), + config: cfg, + proxyAppConn: proxyAppConn, + txs: clist.New(), + recheck: newRecheck(), + logger: log.NewNopLogger(), + metrics: NopMetrics(), } + mp.height.Store(height) if cfg.CacheSize > 0 { mp.cache = NewLRUTxCache(cfg.CacheSize) @@ -101,6 +95,32 @@ func NewCListMempool( return mp } +func (mem *CListMempool) getCElement(txKey types.TxKey) (*clist.CElement, bool) { + if e, ok := mem.txsMap.Load(txKey); ok { + return e.(*clist.CElement), true + } + return nil, false +} + +func (mem *CListMempool) getMemTx(txKey types.TxKey) *mempoolTx { + if e, ok := mem.getCElement(txKey); ok { + return e.Value.(*mempoolTx) + } + return nil +} + +func (mem *CListMempool) removeAllTxs() { + for e := mem.txs.Front(); e != nil; e = e.Next() { + mem.txs.Remove(e) + e.DetachPrev() + } + + mem.txsMap.Range(func(key, _ interface{}) bool { + mem.txsMap.Delete(key) + return true + }) +} + // NOTE: not thread safe - should only be called once, on startup func (mem *CListMempool) EnableTxsAvailable() { mem.txsAvailable = make(chan struct{}, 1) @@ -132,6 +152,9 @@ func WithMetrics(metrics *Metrics) CListMempoolOption { // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Lock() { + if mem.recheck.setRecheckFull() { + mem.logger.Debug("the state of recheckFull has flipped") + } mem.updateMtx.Lock() } @@ -147,12 +170,17 @@ func (mem *CListMempool) Size() int { // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) + return mem.txsBytes.Load() } // Lock() must be help by the caller during execution. func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.Flush(context.TODO()) + err := mem.proxyAppConn.Flush(context.TODO()) + if err != nil { + return ErrFlushAppConn{Err: err} + } + + return nil } // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. @@ -160,18 +188,10 @@ func (mem *CListMempool) Flush() { mem.updateMtx.RLock() defer mem.updateMtx.RUnlock() - _ = atomic.SwapInt64(&mem.txsBytes, 0) + mem.txsBytes.Store(0) mem.cache.Reset() - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - return true - }) + mem.removeAllTxs() } // TxsFront returns the first transaction in the ordered list for peer @@ -205,7 +225,6 @@ func (mem *CListMempool) CheckTx( cb func(*abci.ResponseCheckTx), txInfo TxInfo, ) error { - mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic defer mem.updateMtx.RUnlock() @@ -213,6 +232,7 @@ func (mem *CListMempool) CheckTx( txSize := len(tx) if err := mem.isFull(txSize); err != nil { + mem.metrics.RejectedTxs.Add(1) return err } @@ -225,15 +245,13 @@ func (mem *CListMempool) CheckTx( if mem.preCheck != nil { if err := mem.preCheck(tx); err != nil { - return ErrPreCheck{ - Reason: err, - } + return ErrPreCheck{Err: err} } } // NOTE: proxyAppConn may error if tx buffer is full if err := mem.proxyAppConn.Error(); err != nil { - return err + return ErrAppConnMempool{Err: err} } if !mem.cache.Push(tx) { // if the transaction already exists in the cache @@ -241,9 +259,8 @@ func (mem *CListMempool) CheckTx( // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - memTx.senders.LoadOrStore(txInfo.SenderID, true) + if memTx := mem.getMemTx(tx.Key()); memTx != nil { + memTx.addSender(txInfo.SenderID) // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. @@ -253,9 +270,9 @@ func (mem *CListMempool) CheckTx( reqRes, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.RequestCheckTx{Tx: tx}) if err != nil { - return err + panic(fmt.Errorf("CheckTx request for tx %s failed: %w", log.NewLazySprintf("%v", tx.Hash()), err)) } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) + reqRes.SetCallback(mem.reqResCb(tx, txInfo, cb)) return nil } @@ -270,15 +287,34 @@ func (mem *CListMempool) CheckTx( // When rechecking, we don't need the peerID, so the recheck callback happens // here. func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { + switch r := req.Value.(type) { + case *abci.Request_CheckTx: + // Process only Recheck responses. + if r.CheckTx.Type != abci.CheckTxType_Recheck { + return + } + default: + // ignore other type of requests return } - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + tx := types.Tx(req.GetCheckTx().Tx) + if mem.recheck.done() { + mem.logger.Error("rechecking has finished; discard late recheck response", + "tx", log.NewLazySprintf("%v", tx.Key())) + return + } + mem.metrics.RecheckTimes.Add(1) + mem.resCbRecheck(tx, r.CheckTx) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) + default: + // ignore other messages + } } // Request specific callback that should be set on individual reqRes objects @@ -292,20 +328,20 @@ func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { // Used in CheckTx to record PeerID who sent us the tx. func (mem *CListMempool) reqResCb( tx []byte, - peerID uint16, - peerP2PID p2p.ID, + txInfo TxInfo, externalCb func(*abci.ResponseCheckTx), ) func(res *abci.Response) { return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") + if !mem.recheck.done() { + panic(log.NewLazySprintf("rechecking has not finished; cannot check new tx %v", + types.Tx(tx).Hash())) } - mem.resCbFirstTime(tx, peerID, peerP2PID, res) + mem.resCbFirstTime(tx, txInfo, res) // update metrics mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) // passed in by the caller of CheckTx, eg. the RPC if externalCb != nil { @@ -319,39 +355,29 @@ func (mem *CListMempool) reqResCb( func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) mem.txsMap.Store(memTx.tx.Key(), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) + mem.txsBytes.Add(int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } +// RemoveTxByKey removes a transaction from the mempool by its TxKey index. // Called from: // - Update (lock held) if tx was committed // - resCbRecheck (lock not held) if tx was invalidated -func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(tx.Key()) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) -} - -// RemoveTxByKey removes a transaction from the mempool by its TxKey index. func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - if e, ok := mem.txsMap.Load(txKey); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement)) - return nil - } - return errors.New("found empty transaction") + if elem, ok := mem.getCElement(txKey); ok { + mem.txs.Remove(elem) + elem.DetachPrev() + mem.txsMap.Delete(txKey) + tx := elem.Value.(*mempoolTx).tx + mem.txsBytes.Add(int64(-len(tx))) + return nil } - return errors.New("transaction not found") + return ErrTxNotFound } func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() - ) - + memSize := mem.Size() + txsBytes := mem.SizeBytes() if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { return ErrMempoolIsFull{ NumTxs: memSize, @@ -361,6 +387,10 @@ func (mem *CListMempool) isFull(txSize int) error { } } + if mem.recheck.consideredFull() { + return ErrRecheckFull + } + return nil } @@ -370,8 +400,7 @@ func (mem *CListMempool) isFull(txSize int) error { // handled by the resCbRecheck callback. func (mem *CListMempool) resCbFirstTime( tx []byte, - peerID uint16, - peerP2PID p2p.ID, + txInfo TxInfo, res *abci.Response, ) { switch r := res.Value.(type) { @@ -386,22 +415,39 @@ func (mem *CListMempool) resCbFirstTime( if err := mem.isFull(len(tx)); err != nil { // remove from cache (mempool might have a space later) mem.cache.Remove(tx) - mem.logger.Error(err.Error()) + // use debug level to avoid spamming logs when traffic is high + mem.logger.Debug(err.Error()) + mem.metrics.RejectedTxs.Add(1) + return + } + + // Check transaction not already in the mempool + if e, ok := mem.txsMap.Load(types.Tx(tx).Key()); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.addSender(txInfo.SenderID) + mem.logger.Debug( + "transaction already there, not adding it again", + "tx", types.Tx(tx).Hash(), + "res", r, + "height", mem.height.Load(), + "total", mem.Size(), + ) + mem.metrics.RejectedTxs.Add(1) return } memTx := &mempoolTx{ - height: mem.height, + height: mem.height.Load(), gasWanted: r.CheckTx.GasWanted, tx: tx, } - memTx.senders.Store(peerID, true) + memTx.addSender(txInfo.SenderID) mem.addTx(memTx) mem.logger.Debug( "added good transaction", "tx", types.Tx(tx).Hash(), "res", r, - "height", memTx.height, + "height", mem.height.Load(), "total", mem.Size(), ) mem.notifyTxsAvailable() @@ -410,7 +456,7 @@ func (mem *CListMempool) resCbFirstTime( mem.logger.Debug( "rejected bad transaction", "tx", types.Tx(tx).Hash(), - "peerID", peerP2PID, + "peerID", txInfo.SenderP2PID, "res", r, "err", postCheckErr, ) @@ -431,72 +477,28 @@ func (mem *CListMempool) resCbFirstTime( // // The case where the app checks the tx for the first time is handled by the // resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, memTx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - mem.logger.Error( - "re-CheckTx transaction mismatch", - "got", types.Tx(tx), - "expected", memTx.tx, - ) - - if mem.recheckCursor == mem.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - mem.recheckCursor = nil - return - } - - mem.recheckCursor = mem.recheckCursor.Next() - memTx = mem.recheckCursor.Value.(*mempoolTx) - } +func (mem *CListMempool) resCbRecheck(tx types.Tx, res *abci.ResponseCheckTx) { + // Check whether tx is still in the list of transactions that can be rechecked. + if !mem.recheck.findNextEntryMatching(&tx) { + // Reached the end of the list and didn't find a matching tx; rechecking has finished. + return + } - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, res) + } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) - mem.removeTx(tx, mem.recheckCursor) - // We remove the invalid tx from the cache because it might be good later - if !mem.config.KeepInvalidTxsInCache { - mem.cache.Remove(tx) - } - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() + if (res.Code != abci.CodeTypeOK) || postCheckErr != nil { + // Tx became invalidated due to newly committed block. + mem.logger.Debug("tx is no longer valid", "tx", tx.Hash(), "res", res, "postCheckErr", postCheckErr) + if err := mem.RemoveTxByKey(tx.Key()); err != nil { + mem.logger.Debug("Transaction could not be removed from mempool", "err", err) } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } + if !mem.config.KeepInvalidTxsInCache { + mem.cache.Remove(tx) + mem.metrics.EvictedTxs.Add(1) } - default: - // ignore other messages } } @@ -509,9 +511,8 @@ func (mem *CListMempool) notifyTxsAvailable() { if mem.Size() == 0 { panic("notified txs available but mempool is empty!") } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { + if mem.txsAvailable != nil && mem.notifiedTxsAvailable.CompareAndSwap(false, true) { // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true select { case mem.txsAvailable <- struct{}{}: default: @@ -585,9 +586,11 @@ func (mem *CListMempool) Update( preCheck PreCheckFunc, postCheck PostCheckFunc, ) error { + mem.logger.Debug("Update", "height", height, "len(txs)", len(txs)) + // Set height - mem.height = height - mem.notifiedTxsAvailable = false + mem.height.Store(height) + mem.notifiedTxsAvailable.Store(false) if preCheck != nil { mem.preCheck = preCheck @@ -616,71 +619,183 @@ func (mem *CListMempool) Update( // 100 // https://github.com/tendermint/tendermint/issues/3322. if err := mem.RemoveTxByKey(tx.Key()); err != nil { - mem.logger.Error("Committed transaction could not be removed from mempool", "key", tx.Key(), err.Error()) + mem.logger.Debug("Committed transaction not in local mempool (not an error)", + "key", tx.Key(), + "error", err.Error()) } } - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. + // Recheck txs left in the mempool to remove them if they became invalid in the new state. + if mem.config.Recheck { + mem.recheckTxs() + } + + // Notify if there are still txs left in the mempool. if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } + mem.notifyTxsAvailable() } // Update metrics mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) return nil } +// recheckTxs sends all transactions in the mempool to the app for re-validation. When the function +// returns, all recheck responses from the app have been processed. func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") + mem.logger.Debug("recheck txs", "height", mem.height.Load(), "num-txs", mem.Size()) + + if mem.Size() <= 0 { + return } - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() + mem.recheck.init(mem.txs.Front(), mem.txs.Back()) - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. + // NOTE: globalCb may be called concurrently, but CheckTx cannot be executed concurrently + // because this function has the lock (via Update and Lock). for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) + tx := e.Value.(*mempoolTx).tx + mem.recheck.numPendingTxs.Add(1) + + // Send a CheckTx request to the app. If we're using a sync client, the resCbRecheck + // callback will be called right after receiving the response. _, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.RequestCheckTx{ - Tx: memTx.tx, + Tx: tx, Type: abci.CheckTxType_Recheck, }) if err != nil { - mem.logger.Error("recheckTx", err, "err") - return + panic(fmt.Errorf("(re-)CheckTx request for tx %s failed: %w", log.NewLazySprintf("%v", tx.Hash()), err)) } } - // In 0 { + mem.logger.Error("not all txs were rechecked", "not-rechecked", n) + } + mem.logger.Debug("done rechecking txs", "height", mem.height.Load(), "num-txs", mem.Size()) +} + +// The cursor and end pointers define a dynamic list of transactions that could be rechecked. The +// end pointer is fixed. When a recheck response for a transaction is received, cursor will point to +// the entry in the mempool corresponding to that transaction, thus narrowing the list. Transactions +// corresponding to entries between the old and current positions of cursor will be ignored for +// rechecking. This is to guarantee that recheck responses are processed in the same sequential +// order as they appear in the mempool. +type recheck struct { + cursor *clist.CElement // next expected recheck response + end *clist.CElement // last entry in the mempool to recheck + doneCh chan struct{} // to signal that rechecking has finished successfully (for async app connections) + numPendingTxs atomic.Int32 // number of transactions still pending to recheck + isRechecking atomic.Bool // true iff the rechecking process has begun and is not yet finished + recheckFull atomic.Bool // whether rechecking TXs cannot be completed before a new block is decided +} + +func newRecheck() *recheck { + return &recheck{ + doneCh: make(chan struct{}, 1), + } +} + +func (rc *recheck) init(first, last *clist.CElement) { + if !rc.done() { + panic("Having more than one rechecking process at a time is not possible.") + } + rc.cursor = first + rc.end = last + rc.numPendingTxs.Store(0) + rc.isRechecking.Store(true) } -//-------------------------------------------------------------------------------- +// done returns true when there is no recheck response to process. +// Safe for concurrent use by multiple goroutines. +func (rc *recheck) done() bool { + return !rc.isRechecking.Load() +} + +// setDone registers that rechecking has finished. +func (rc *recheck) setDone() { + rc.cursor = nil + rc.recheckFull.Store(false) + rc.isRechecking.Store(false) +} -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // +// setNextEntry sets cursor to the next entry in the list. If there is no next, cursor will be nil. +func (rc *recheck) setNextEntry() { + rc.cursor = rc.cursor.Next() +} + +// tryFinish will check if the cursor is at the end of the list and notify the channel that +// rechecking has finished. It returns true iff it's done rechecking. +func (rc *recheck) tryFinish() bool { + if rc.cursor == rc.end { + // Reached end of the list without finding a matching tx. + rc.setDone() + } + if rc.done() { + // Notify that recheck has finished. + select { + case rc.doneCh <- struct{}{}: + default: + } + return true + } + return false +} + +// findNextEntryMatching searches for the next transaction matching the given transaction, which +// corresponds to the recheck response to be processed next. Then it checks if it has reached the +// end of the list, so it can finish rechecking. +// +// The goal is to guarantee that transactions are rechecked in the order in which they are in the +// mempool. Transactions whose recheck response arrive late or don't arrive at all are skipped and +// not rechecked. +func (rc *recheck) findNextEntryMatching(tx *types.Tx) bool { + found := false + for ; !rc.done(); rc.setNextEntry() { + expectedTx := rc.cursor.Value.(*mempoolTx).tx + if bytes.Equal(*tx, expectedTx) { + // Found an entry in the list of txs to recheck that matches tx. + found = true + rc.numPendingTxs.Add(-1) + break + } + } + + if !rc.tryFinish() { + // Not finished yet; set the cursor for processing the next recheck response. + rc.setNextEntry() + } + return found +} + +// doneRechecking returns the channel used to signal that rechecking has finished. +func (rc *recheck) doneRechecking() <-chan struct{} { + return rc.doneCh +} - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map +// setRecheckFull sets recheckFull to true if rechecking is still in progress. It returns true iff +// the value of recheckFull has changed. +func (rc *recheck) setRecheckFull() bool { + rechecking := !rc.done() + recheckFull := rc.recheckFull.Swap(rechecking) + return rechecking != recheckFull } -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) +// consideredFull returns true iff the mempool should be considered as full while rechecking is in +// progress. +func (rc *recheck) consideredFull() bool { + return rc.recheckFull.Load() } diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 061975ecf91..8d676789f3c 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -3,9 +3,11 @@ package mempool import ( "context" "encoding/binary" + "errors" "fmt" mrand "math/rand" "os" + "sync" "testing" "time" @@ -33,16 +35,17 @@ import ( // test. type cleanupFunc func() -func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) { +func newMempoolWithAppMock(client abciclient.Client) (*CListMempool, cleanupFunc, error) { conf := test.ResetTestRoot("mempool_test") - mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client) + mp, cu := newMempoolWithAppAndConfigMock(conf, client) return mp, cu, nil } -func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator, +func newMempoolWithAppAndConfigMock( cfg *config.Config, - client abciclient.Client) (*CListMempool, cleanupFunc) { + client abciclient.Client, +) (*CListMempool, cleanupFunc) { appConnMem := client appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() @@ -95,13 +98,10 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mp Mempool, count int, peerID uint16) types.Txs { - txs := make(types.Txs, count) +func callCheckTx(t *testing.T, mp Mempool, txs types.Txs, peerID uint16) { txInfo := TxInfo{SenderID: peerID} - for i := 0; i < count; i++ { - txBytes := kvstore.NewRandomTx(20) - txs[i] = txBytes - if err := mp.CheckTx(txBytes, nil, txInfo); err != nil { + for i, tx := range txs { + if err := mp.CheckTx(tx, nil, txInfo); err != nil { // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs // returned. @@ -111,6 +111,36 @@ func checkTxs(t *testing.T, mp Mempool, count int, peerID uint16) types.Txs { t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) } } +} + +// Generate a list of random transactions. +func NewRandomTxs(numTxs int, txLen int) types.Txs { + txs := make(types.Txs, numTxs) + for i := 0; i < numTxs; i++ { + txBytes := kvstore.NewRandomTx(txLen) + txs[i] = txBytes + } + return txs +} + +// Generate a list of random transactions of a given size and call CheckTx on +// each of them. +func addRandomTxs(t *testing.T, mp Mempool, count int, peerID uint16) []types.Tx { + t.Helper() + txs := NewRandomTxs(count, 20) + callCheckTx(t, mp, txs, peerID) + return txs +} + +func addTxs(tb testing.TB, mp Mempool, first, num int) []types.Tx { + tb.Helper() + txs := make([]types.Tx, 0, num) + for i := first; i < num; i++ { + tx := kvstore.NewTxFromID(i) + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(tb, err) + txs = append(txs, tx) + } return txs } @@ -121,7 +151,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { defer cleanup() // Ensure gas calculation behaves as expected - checkTxs(t, mp, 1, UnknownPeerID) + addRandomTxs(t, mp, 1, UnknownPeerID) tx0 := mp.TxsFront().Value.(*mempoolTx) require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") // ensure each tx is 20 bytes long @@ -153,7 +183,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 20000, 30, 20}, } for tcIndex, tt := range tests { - checkTxs(t, mp, tt.numTxsToCreate, UnknownPeerID) + addRandomTxs(t, mp, tt.numTxsToCreate, UnknownPeerID) got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", len(got), tt.expectedNumTxs, tcIndex) @@ -194,7 +224,7 @@ func TestMempoolFilters(t *testing.T) { for tcIndex, tt := range tests { err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) require.NoError(t, err) - checkTxs(t, mp, tt.numTxsToCreate, UnknownPeerID) + addRandomTxs(t, mp, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mp.Flush() } @@ -249,19 +279,16 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { mockClient.On("Error").Return(nil).Times(4) mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) + mockClient.On("Flush", mock.Anything).Return(nil) - app := kvstore.NewInMemoryApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup, err := newMempoolWithAppMock(cc, mockClient) + mp, cleanup, err := newMempoolWithAppMock(mockClient) require.NoError(t, err) defer cleanup() // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} for _, tx := range txs { - reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(&abci.RequestCheckTx{Tx: tx})) - reqRes.Response = abci.ToResponseCheckTx(&abci.ResponseCheckTx{Code: abci.CodeTypeOK}) - + reqRes := newReqRes(tx, abci.CodeTypeOK, abci.CheckTxType_New) mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) err := mp.CheckTx(tx, nil, TxInfo{}) require.NoError(t, err) @@ -269,6 +296,8 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { // ensure that the callback that the mempool sets on the ReqRes is run. reqRes.InvokeCallback() } + require.Len(t, txs, mp.Size()) + require.True(t, mp.recheck.done()) // Calling update to remove the first transaction from the mempool. // This call also triggers the mempool to recheck its remaining transactions. @@ -357,7 +386,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch of txs, it should only fire once - txs := checkTxs(t, mp, 100, UnknownPeerID) + txs := addRandomTxs(t, mp, 100, UnknownPeerID) ensureFire(t, mp.TxsAvailable(), timeoutMS) ensureNoFire(t, mp.TxsAvailable(), timeoutMS) @@ -372,7 +401,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mp, 50, UnknownPeerID) + moreTxs := addRandomTxs(t, mp, 50, UnknownPeerID) ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // now call update with all the txs. it should not fire as there are no txs left @@ -383,7 +412,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch more txs, it should only fire once - checkTxs(t, mp, 100, UnknownPeerID) + addRandomTxs(t, mp, 100, UnknownPeerID) ensureFire(t, mp.TxsAvailable(), timeoutMS) ensureNoFire(t, mp.TxsAvailable(), timeoutMS) } @@ -631,7 +660,42 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 20, mp.SizeBytes()) assert.NoError(t, mp.RemoveTxByKey(types.Tx(tx1).Key())) assert.EqualValues(t, 10, mp.SizeBytes()) +} + +func TestMempoolNoCacheOverflow(t *testing.T) { + mp, cleanup := newMempoolWithAsyncConnection(t) + defer cleanup() + // add tx0 + tx0 := kvstore.NewTxFromID(0) + err := mp.CheckTx(tx0, nil, TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // saturate the cache to remove tx0 + for i := 1; i <= mp.config.CacheSize; i++ { + err = mp.CheckTx(kvstore.NewTxFromID(i), nil, TxInfo{}) + require.NoError(t, err) + } + err = mp.FlushAppConn() + require.NoError(t, err) + assert.False(t, mp.cache.Has(kvstore.NewTxFromID(0))) + + // add again tx0 + err = mp.CheckTx(tx0, nil, TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // tx0 should appear only once in mp.txs + found := 0 + for e := mp.txs.Front(); e != nil; e = e.Next() { + if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { + found++ + } + } + assert.True(t, found == 1) } // This will non-deterministically catch some concurrency failures like @@ -639,18 +703,7 @@ func TestMempoolTxsBytes(t *testing.T) { // TODO: all of the tests should probably also run using the remote proxy app // since otherwise we're not actually testing the concurrency of the mempool here! func TestMempoolRemoteAppConcurrency(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) - app := kvstore.NewInMemoryApplication() - _, server := newRemoteApp(t, sockPath, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - - cfg := test.ResetTestRoot("mempool_test") - - mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) + mp, cleanup := newMempoolWithAsyncConnection(t) defer cleanup() // generate small number of txs @@ -662,9 +715,9 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { } // simulate a group of peers sending them over and over - N := cfg.Mempool.Size + n := mp.config.Size maxPeers := 5 - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { peerID := mrand.Intn(maxPeers) txNum := mrand.Intn(nTxs) tx := txs[txNum] @@ -676,21 +729,304 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { require.NoError(t, mp.FlushAppConn()) } -// caller must close server -func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) { - clientCreator, err := abciclient.NewClient(addr, "socket", true) +func TestMempoolConcurrentUpdateAndReceiveCheckTxResponse(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + for h := 1; h <= 100; h++ { + // Two concurrent threads for each height. One updates the mempool with one valid tx, + // writing the pool's height; the other, receives a CheckTx response, reading the height. + var wg sync.WaitGroup + wg.Add(2) + + go func(h int) { + defer wg.Done() + + doUpdate(t, mp, int64(h), []types.Tx{tx}) + require.Equal(t, int64(h), mp.height.Load(), "height mismatch") + }(h) + + go func(h int) { + defer wg.Done() + + tx := kvstore.NewTxFromID(h) + mp.resCbFirstTime(tx, TxInfo{}, abci.ToResponseCheckTx(&abci.ResponseCheckTx{Code: abci.CodeTypeOK})) + require.Equal(t, h, mp.Size(), "pool size mismatch") + }(h) + + wg.Wait() + } +} + +func TestMempoolNotifyTxsAvailable(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + mp.EnableTxsAvailable() + assert.NotNil(t, mp.txsAvailable) + require.False(t, mp.notifiedTxsAvailable.Load()) + + // Adding a new valid tx to the pool will notify a tx is available + tx := kvstore.NewTxFromID(1) + mp.resCbFirstTime(tx, TxInfo{}, abci.ToResponseCheckTx(&abci.ResponseCheckTx{Code: abci.CodeTypeOK})) + require.Equal(t, 1, mp.Size(), "pool size mismatch") + require.True(t, mp.notifiedTxsAvailable.Load()) + require.Len(t, mp.TxsAvailable(), 1) + <-mp.TxsAvailable() + + // Receiving CheckTx response for a tx already in the pool should not notify of available txs + mp.resCbFirstTime(tx, TxInfo{}, abci.ToResponseCheckTx(&abci.ResponseCheckTx{Code: abci.CodeTypeOK})) + require.Equal(t, 1, mp.Size()) + require.True(t, mp.notifiedTxsAvailable.Load()) + require.Empty(t, mp.TxsAvailable()) + + // Updating the pool will remove the tx and set the variable to false + err := mp.Update(1, []types.Tx{tx}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + require.Zero(t, mp.Size()) + require.False(t, mp.notifiedTxsAvailable.Load()) +} + +// Test that adding a transaction panics when the CheckTx request fails. +func TestMempoolSyncCheckTxReturnError(t *testing.T) { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("SetResponseCallback", mock.Anything) + + mp, cleanup, err := newMempoolWithAppMock(mockClient) + require.NoError(t, err) + defer cleanup() + + // The app will return an error on a CheckTx request. + tx := []byte{0x01} + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(nil, errors.New("")).Once() + + // Adding the transaction should panic when the call to the app returns an error. + defer func() { + if r := recover(); r == nil { + t.Errorf("CheckTx did not panic") + } + }() + err = mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(t, err) +} + +// Test that rechecking panics when a CheckTx request fails, when using a sync ABCI client. +func TestMempoolSyncRecheckTxReturnError(t *testing.T) { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("SetResponseCallback", mock.Anything) + mockClient.On("Error").Return(nil) + + mp, cleanup, err := newMempoolWithAppMock(mockClient) require.NoError(t, err) + defer cleanup() + + // First we add a two transactions to the mempool. + txs := []types.Tx{[]byte{0x01}, []byte{0x02}} + for _, tx := range txs { + reqRes := newReqRes(tx, abci.CodeTypeOK, abci.CheckTxType_New) + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(t, err) + + // ensure that the callback that the mempool sets on the ReqRes is run. + reqRes.InvokeCallback() + } + require.Len(t, txs, mp.Size()) + + // The first tx is valid when rechecking and the client will call the callback right after the + // response from the app and before returning. + reqRes0 := newReqRes(txs[0], abci.CodeTypeOK, abci.CheckTxType_Recheck) + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes0, nil).Once() + + // On the second CheckTx request, the app returns an error. + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(nil, errors.New("")).Once() + + // Rechecking should panic when the call to the app returns an error. + defer func() { + if r := recover(); r == nil { + t.Errorf("recheckTxs did not panic") + } + }() + mp.recheckTxs() +} + +// Test that rechecking finishes correctly when a CheckTx response never arrives, when using an +// async ABCI client. +func TestMempoolAsyncRecheckTxReturnError(t *testing.T) { + var callback abciclient.Callback + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("Error").Return(nil).Times(4) + mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) + + mp, cleanup, err := newMempoolWithAppMock(mockClient) + require.NoError(t, err) + defer cleanup() + + // Add 4 txs to the mempool. + txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} + for _, tx := range txs { + reqRes := newReqRes(tx, abci.CodeTypeOK, abci.CheckTxType_New) + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + err := mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(t, err) + + // ensure that the callback that the mempool sets on the ReqRes is run. + reqRes.InvokeCallback() + } + + // The 4 txs are added to the mempool. + require.Len(t, txs, mp.Size()) + + // Check that recheck has not started. + require.True(t, mp.recheck.done()) + require.Nil(t, mp.recheck.cursor) + require.Nil(t, mp.recheck.end) + require.False(t, mp.recheck.isRechecking.Load()) + mockClient.AssertExpectations(t) + + // One call to CheckTxAsync per tx, for rechecking. + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(nil, nil).Times(4) + + // On the async client, the callbacks are executed when flushing the connection. The app replies + // to the request for the first tx (valid) and for the third tx (invalid), so the callback is + // invoked twice. The app does not reply to the requests for the second and fourth txs, so the + // callback is not invoked on these two cases. + mockClient.On("Flush", mock.Anything).Run(func(_ mock.Arguments) { + // First tx is valid. + reqRes1 := newReqRes(txs[0], abci.CodeTypeOK, abci.CheckTxType_Recheck) + callback(reqRes1.Request, reqRes1.Response) + // Third tx is invalid. + reqRes2 := newReqRes(txs[2], 1, abci.CheckTxType_Recheck) + callback(reqRes2.Request, reqRes2.Response) + }).Return(nil) + + // mp.recheck.done() should be true only before and after calling recheckTxs. + mp.recheckTxs() + require.True(t, mp.recheck.done()) + require.False(t, mp.recheck.isRechecking.Load()) + require.Nil(t, mp.recheck.cursor) + require.NotNil(t, mp.recheck.end) + require.Equal(t, mp.recheck.end, mp.txs.Back()) + require.Equal(t, len(txs)-1, mp.Size()) // one invalid tx was removed + require.Equal(t, int32(2), mp.recheck.numPendingTxs.Load()) + + mockClient.AssertExpectations(t) +} + +// This test used to cause a data race when rechecking (see https://github.com/cometbft/cometbft/issues/1827). +func TestMempoolRecheckRace(t *testing.T) { + mp, cleanup := newMempoolWithAsyncConnection(t) + defer cleanup() + + // Add a bunch of transactions to the mempool. + var err error + txs := newUniqueTxs(10) + for _, tx := range txs { + err = mp.CheckTx(tx, nil, TxInfo{}) + require.NoError(t, err) + } + + // Update one transaction to force rechecking the rest. + doUpdate(t, mp, 1, txs[:1]) + + // Recheck has finished + require.True(t, mp.recheck.done()) + require.Nil(t, mp.recheck.cursor) + + // Add again the same transaction that was updated. Recheck has finished so adding this tx + // should not result in a data race on the variable recheck.cursor. + err = mp.CheckTx(txs[:1][0], nil, TxInfo{}) + require.Equal(t, err, ErrTxInCache) + require.Zero(t, mp.recheck.numPendingTxs.Load()) +} + +// Test adding transactions while a concurrent routine reaps txs and updates the mempool, simulating +// the consensus module, when using an async ABCI client. +func TestMempoolConcurrentCheckTxAndUpdate(t *testing.T) { + mp, cleanup := newMempoolWithAsyncConnection(t) + defer cleanup() + + maxHeight := 100 + var wg sync.WaitGroup + wg.Add(1) + + // A process that continuously reaps and update the mempool, simulating creation and committing + // of blocks by the consensus module. + go func() { + defer wg.Done() + + time.Sleep(50 * time.Millisecond) // wait a bit to have some txs in mempool before starting updating + for h := 1; h <= maxHeight; h++ { + if mp.Size() == 0 { + break + } + txs := mp.ReapMaxBytesMaxGas(100, -1) + doUpdate(t, mp, int64(h), txs) + } + }() + + // Concurrently, add transactions (one per height). + for h := 1; h <= maxHeight; h++ { + err := mp.CheckTx(kvstore.NewTxFromID(h), nil, TxInfo{}) + require.NoError(t, err) + } + + wg.Wait() + + // All added transactions should have been removed from the mempool. + require.Zero(t, mp.Size()) +} + +func newMempoolWithAsyncConnection(tb testing.TB) (*CListMempool, cleanupFunc) { + tb.Helper() + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) + app := kvstore.NewInMemoryApplication() + _, server := newRemoteApp(tb, sockPath, app) + tb.Cleanup(func() { + if err := server.Stop(); err != nil { + tb.Error(err) + } + }) + cfg := test.ResetTestRoot("mempool_test") + return newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) +} + +// caller must close server. +func newRemoteApp(tb testing.TB, addr string, app abci.Application) (abciclient.Client, service.Service) { + tb.Helper() + clientCreator, err := abciclient.NewClient(addr, "socket", true) + require.NoError(tb, err) // Start server server := abciserver.NewSocketServer(addr, app) server.SetLogger(log.TestingLogger().With("module", "abci-server")) if err := server.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) + tb.Fatalf("Error starting socket server: %v", err.Error()) } return clientCreator, server } +func newReqRes(tx types.Tx, code uint32, requestType abci.CheckTxType) *abciclient.ReqRes { //nolint: unparam + reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(&abci.RequestCheckTx{Tx: tx, Type: requestType})) + reqRes.Response = abci.ToResponseCheckTx(&abci.ResponseCheckTx{Code: code}) + return reqRes +} + func abciResponses(n int, code uint32) []*abci.ExecTxResult { responses := make([]*abci.ExecTxResult, 0, n) for i := 0; i < n; i++ { @@ -698,3 +1034,13 @@ func abciResponses(n int, code uint32) []*abci.ExecTxResult { } return responses } + +func doUpdate(tb testing.TB, mp Mempool, height int64, txs []types.Tx) { + tb.Helper() + mp.Lock() + err := mp.FlushAppConn() + require.NoError(tb, err) + err = mp.Update(height, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil) + require.NoError(tb, err) + mp.Unlock() +} diff --git a/mempool/errors.go b/mempool/errors.go new file mode 100644 index 00000000000..effe08cff9c --- /dev/null +++ b/mempool/errors.go @@ -0,0 +1,89 @@ +package mempool + +import ( + "errors" + "fmt" +) + +// ErrTxNotFound is returned to the client if tx is not found in mempool +var ErrTxNotFound = errors.New("transaction not found in mempool") + +// ErrTxInCache is returned to the client if we saw tx earlier +var ErrTxInCache = errors.New("tx already exists in cache") + +// ErrRecheckFull is returned when checking if the mempool is full and +// rechecking is still in progress after a new block was committed. +var ErrRecheckFull = errors.New("mempool is still rechecking after a new committed block, so it is considered as full") + +// ErrTxTooLarge defines an error when a transaction is too big to be sent in a +// message to other peers. +type ErrTxTooLarge struct { + Max int + Actual int +} + +func (e ErrTxTooLarge) Error() string { + return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) +} + +// ErrMempoolIsFull defines an error where CometBFT and the application cannot +// handle that much load. +type ErrMempoolIsFull struct { + NumTxs int + MaxTxs int + TxsBytes int64 + MaxTxsBytes int64 + RecheckFull bool +} + +func (e ErrMempoolIsFull) Error() string { + return fmt.Sprintf( + "mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", + e.NumTxs, + e.MaxTxs, + e.TxsBytes, + e.MaxTxsBytes, + ) +} + +// ErrPreCheck defines an error where a transaction fails a pre-check. +type ErrPreCheck struct { + Err error +} + +func (e ErrPreCheck) Error() string { + return fmt.Sprintf("tx pre check: %v", e.Err) +} + +func (e ErrPreCheck) Unwrap() error { + return e.Err +} + +// IsPreCheckError returns true if err is due to pre check failure. +func IsPreCheckError(err error) bool { + return errors.As(err, &ErrPreCheck{}) +} + +type ErrAppConnMempool struct { + Err error +} + +func (e ErrAppConnMempool) Error() string { + return fmt.Sprintf("appConn mempool: %v", e.Err) +} + +func (e ErrAppConnMempool) Unwrap() error { + return e.Err +} + +type ErrFlushAppConn struct { + Err error +} + +func (e ErrFlushAppConn) Error() string { + return fmt.Sprintf("flush appConn mempool: %v", e.Err) +} + +func (e ErrFlushAppConn) Unwrap() error { + return e.Err +} diff --git a/mempool/mempool.go b/mempool/mempool.go index 812fee2d7cd..8ab37162641 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -2,7 +2,6 @@ package mempool import ( "crypto/sha256" - "errors" "fmt" "math" @@ -53,6 +52,8 @@ type Mempool interface { // Lock locks the mempool. The consensus must be able to hold lock to safely // update. + // Before acquiring the lock, it signals the mempool that a new update is coming. + // If the mempool is still rechecking at this point, it should be considered full. Lock() // Unlock unlocks the mempool. @@ -144,52 +145,5 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { } } -// ErrTxInCache is returned to the client if we saw tx earlier -var ErrTxInCache = errors.New("tx already exists in cache") - // TxKey is the fixed length array key used as an index. type TxKey [sha256.Size]byte - -// ErrTxTooLarge defines an error when a transaction is too big to be sent in a -// message to other peers. -type ErrTxTooLarge struct { - Max int - Actual int -} - -func (e ErrTxTooLarge) Error() string { - return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) -} - -// ErrMempoolIsFull defines an error where CometBFT and the application cannot -// handle that much load. -type ErrMempoolIsFull struct { - NumTxs int - MaxTxs int - TxsBytes int64 - MaxTxsBytes int64 -} - -func (e ErrMempoolIsFull) Error() string { - return fmt.Sprintf( - "mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", - e.NumTxs, - e.MaxTxs, - e.TxsBytes, - e.MaxTxsBytes, - ) -} - -// ErrPreCheck defines an error where a transaction fails a pre-check. -type ErrPreCheck struct { - Reason error -} - -func (e ErrPreCheck) Error() string { - return e.Reason.Error() -} - -// IsPreCheckError returns true if err is due to pre check failure. -func IsPreCheckError(err error) bool { - return errors.As(err, &ErrPreCheck{}) -} diff --git a/mempool/mempoolTx.go b/mempool/mempoolTx.go new file mode 100644 index 00000000000..5c160ce2f7f --- /dev/null +++ b/mempool/mempoolTx.go @@ -0,0 +1,34 @@ +package mempool + +import ( + "sync" + "sync/atomic" + + "github.com/cometbft/cometbft/types" +) + +// mempoolTx is an entry in the mempool +type mempoolTx struct { + height int64 // height that this tx had been validated in + gasWanted int64 // amount of gas this tx states it will require + tx types.Tx // validated by the application + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map +} + +// Height returns the height for this transaction +func (memTx *mempoolTx) Height() int64 { + return atomic.LoadInt64(&memTx.height) +} + +func (memTx *mempoolTx) isSender(peerID uint16) bool { + _, ok := memTx.senders.Load(peerID) + return ok +} + +func (memTx *mempoolTx) addSender(senderID uint16) bool { + _, added := memTx.senders.LoadOrStore(senderID, true) + return added +} diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index 100c5e71cb6..3d202e320ad 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -20,6 +20,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "size", Help: "Number of uncommitted transactions in the mempool.", }, labels).With(labelsAndValues...), + SizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "size_bytes", + Help: "Total size of the mempool in bytes.", + }, labels).With(labelsAndValues...), TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -32,19 +38,19 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "failed_txs", - Help: "Number of failed transactions.", + Help: "FailedTxs defines the number of failed transactions. These are transactions that failed to make it into the mempool because they were deemed invalid. metrics:Number of failed transactions.", }, labels).With(labelsAndValues...), RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "rejected_txs", - Help: "Number of rejected transactions.", + Help: "RejectedTxs defines the number of rejected transactions. These are transactions that failed to make it into the mempool due to resource limits, e.g. mempool is full. metrics:Number of rejected transactions.", }, labels).With(labelsAndValues...), EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "evicted_txs", - Help: "Number of evicted transactions.", + Help: "EvictedTxs defines the number of evicted transactions. These are valid transactions that passed CheckTx and make it into the mempool but later became invalid. metrics:Number of evicted transactions.", }, labels).With(labelsAndValues...), RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, @@ -52,16 +58,24 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "recheck_times", Help: "Number of times transactions are rechecked in the mempool.", }, labels).With(labelsAndValues...), + ActiveOutboundConnections: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "active_outbound_connections", + Help: "Number of connections being actively used for gossiping transactions (experimental feature).", + }, labels).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Size: discard.NewGauge(), - TxSizeBytes: discard.NewHistogram(), - FailedTxs: discard.NewCounter(), - RejectedTxs: discard.NewCounter(), - EvictedTxs: discard.NewCounter(), - RecheckTimes: discard.NewCounter(), + Size: discard.NewGauge(), + SizeBytes: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), + ActiveOutboundConnections: discard.NewGauge(), } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 85ca8c0cfbd..6a24a5b48a7 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -18,26 +18,34 @@ type Metrics struct { // Number of uncommitted transactions in the mempool. Size metrics.Gauge + // Total size of the mempool in bytes. + SizeBytes metrics.Gauge + // Histogram of transaction sizes in bytes. TxSizeBytes metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"1,3,7"` - // Number of failed transactions. + // FailedTxs defines the number of failed transactions. These are + // transactions that failed to make it into the mempool because they were + // deemed invalid. + // metrics:Number of failed transactions. FailedTxs metrics.Counter // RejectedTxs defines the number of rejected transactions. These are - // transactions that passed CheckTx but failed to make it into the mempool - // due to resource limits, e.g. mempool is full and no lower priority - // transactions exist in the mempool. - //metrics:Number of rejected transactions. + // transactions that failed to make it into the mempool due to resource + // limits, e.g. mempool is full. + // metrics:Number of rejected transactions. RejectedTxs metrics.Counter // EvictedTxs defines the number of evicted transactions. These are valid - // transactions that passed CheckTx and existed in the mempool but were later - // evicted to make room for higher priority valid transactions that passed - // CheckTx. - //metrics:Number of evicted transactions. + // transactions that passed CheckTx and make it into the mempool but later + // became invalid. + // metrics:Number of evicted transactions. EvictedTxs metrics.Counter // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter + + // Number of connections being actively used for gossiping transactions + // (experimental feature). + ActiveOutboundConnections metrics.Gauge } diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index 7573c58e978..173ac9ec086 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -20,6 +20,10 @@ type Mempool struct { func (_m *Mempool) CheckTx(tx types.Tx, callback func(*abcitypes.ResponseCheckTx), txInfo mempool.TxInfo) error { ret := _m.Called(tx, callback, txInfo) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 error if rf, ok := ret.Get(0).(func(types.Tx, func(*abcitypes.ResponseCheckTx), mempool.TxInfo) error); ok { r0 = rf(tx, callback, txInfo) @@ -44,6 +48,10 @@ func (_m *Mempool) Flush() { func (_m *Mempool) FlushAppConn() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FlushAppConn") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -63,6 +71,10 @@ func (_m *Mempool) Lock() { func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { ret := _m.Called(maxBytes, maxGas) + if len(ret) == 0 { + panic("no return value specified for ReapMaxBytesMaxGas") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { r0 = rf(maxBytes, maxGas) @@ -79,6 +91,10 @@ func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { func (_m *Mempool) ReapMaxTxs(max int) types.Txs { ret := _m.Called(max) + if len(ret) == 0 { + panic("no return value specified for ReapMaxTxs") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int) types.Txs); ok { r0 = rf(max) @@ -95,6 +111,10 @@ func (_m *Mempool) ReapMaxTxs(max int) types.Txs { func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { ret := _m.Called(txKey) + if len(ret) == 0 { + panic("no return value specified for RemoveTxByKey") + } + var r0 error if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { r0 = rf(txKey) @@ -109,6 +129,10 @@ func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { func (_m *Mempool) Size() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -123,6 +147,10 @@ func (_m *Mempool) Size() int { func (_m *Mempool) SizeBytes() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SizeBytes") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -137,6 +165,10 @@ func (_m *Mempool) SizeBytes() int64 { func (_m *Mempool) TxsAvailable() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxsAvailable") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -158,6 +190,10 @@ func (_m *Mempool) Unlock() { func (_m *Mempool) Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { ret := _m.Called(blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error if rf, ok := ret.Get(0).(func(int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { r0 = rf(blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) @@ -168,13 +204,12 @@ func (_m *Mempool) Update(blockHeight int64, blockTxs types.Txs, deliverTxRespon return r0 } -type mockConstructorTestingTNewMempool interface { +// NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMempool(t interface { mock.TestingT Cleanup(func()) -} - -// NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempool(t mockConstructorTestingTNewMempool) *Mempool { +}) *Mempool { mock := &Mempool{} mock.Mock.Test(t) diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go new file mode 100644 index 00000000000..6bfff3b04d4 --- /dev/null +++ b/mempool/nop_mempool.go @@ -0,0 +1,107 @@ +package mempool + +import ( + "errors" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/types" +) + +// NopMempool is a mempool that does nothing. +// +// The ABCI app is responsible for storing, disseminating, and proposing transactions. +// See [ADR-111](../docs/architecture/adr-111-nop-mempool.md). +type NopMempool struct{} + +// errNotAllowed indicates that the operation is not allowed with `nop` mempool. +var errNotAllowed = errors.New("not allowed with `nop` mempool") + +var _ Mempool = &NopMempool{} + +// CheckTx always returns an error. +func (*NopMempool) CheckTx(types.Tx, func(*abci.ResponseCheckTx), TxInfo) error { + return errNotAllowed +} + +// RemoveTxByKey always returns an error. +func (*NopMempool) RemoveTxByKey(types.TxKey) error { return errNotAllowed } + +// ReapMaxBytesMaxGas always returns nil. +func (*NopMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return nil } + +// ReapMaxTxs always returns nil. +func (*NopMempool) ReapMaxTxs(int) types.Txs { return nil } + +// Lock does nothing. +func (*NopMempool) Lock() {} + +// Unlock does nothing. +func (*NopMempool) Unlock() {} + +// Update does nothing. +func (*NopMempool) Update( + int64, + types.Txs, + []*abci.ExecTxResult, + PreCheckFunc, + PostCheckFunc, +) error { + return nil +} + +// FlushAppConn does nothing. +func (*NopMempool) FlushAppConn() error { return nil } + +// Flush does nothing. +func (*NopMempool) Flush() {} + +// TxsAvailable always returns nil. +func (*NopMempool) TxsAvailable() <-chan struct{} { + return nil +} + +// EnableTxsAvailable does nothing. +func (*NopMempool) EnableTxsAvailable() {} + +// SetTxRemovedCallback does nothing. +func (*NopMempool) SetTxRemovedCallback(func(txKey types.TxKey)) {} + +// Size always returns 0. +func (*NopMempool) Size() int { return 0 } + +// SizeBytes always returns 0. +func (*NopMempool) SizeBytes() int64 { return 0 } + +// NopMempoolReactor is a mempool reactor that does nothing. +type NopMempoolReactor struct { + service.BaseService +} + +// NewNopMempoolReactor returns a new `nop` reactor. +// +// To be used only in RPC. +func NewNopMempoolReactor() *NopMempoolReactor { + return &NopMempoolReactor{*service.NewBaseService(nil, "NopMempoolReactor", nil)} +} + +var _ p2p.Reactor = &NopMempoolReactor{} + +// GetChannels always returns nil. +func (*NopMempoolReactor) GetChannels() []*p2p.ChannelDescriptor { return nil } + +// AddPeer does nothing. +func (*NopMempoolReactor) AddPeer(p2p.Peer) {} + +// InitPeer always returns nil. +func (*NopMempoolReactor) InitPeer(p2p.Peer) p2p.Peer { return nil } + +// RemovePeer does nothing. +func (*NopMempoolReactor) RemovePeer(p2p.Peer, interface{}) {} + +// Receive does nothing. +func (*NopMempoolReactor) Receive(p2p.Envelope) {} + +// SetSwitch does nothing. +func (*NopMempoolReactor) SetSwitch(*p2p.Switch) {} diff --git a/mempool/nop_mempool_test.go b/mempool/nop_mempool_test.go new file mode 100644 index 00000000000..01b169e0695 --- /dev/null +++ b/mempool/nop_mempool_test.go @@ -0,0 +1,38 @@ +package mempool + +import ( + "testing" + + "github.com/cometbft/cometbft/types" + "github.com/stretchr/testify/assert" +) + +var tx = types.Tx([]byte{0x01}) + +func TestNopMempool_Basic(t *testing.T) { + mem := &NopMempool{} + + assert.Equal(t, 0, mem.Size()) + assert.Equal(t, int64(0), mem.SizeBytes()) + + err := mem.CheckTx(tx, nil, TxInfo{}) + assert.Equal(t, errNotAllowed, err) + + err = mem.RemoveTxByKey(tx.Key()) + assert.Equal(t, errNotAllowed, err) + + txs := mem.ReapMaxBytesMaxGas(0, 0) + assert.Nil(t, txs) + + txs = mem.ReapMaxTxs(0) + assert.Nil(t, txs) + + err = mem.FlushAppConn() + assert.NoError(t, err) + + err = mem.Update(0, nil, nil, nil, nil) + assert.NoError(t, err) + + txsAvailable := mem.TxsAvailable() + assert.Nil(t, txsAvailable) +} diff --git a/mempool/reactor.go b/mempool/reactor.go index 9306e1d0f1d..ef3e9a7c382 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -1,16 +1,19 @@ package mempool import ( + "context" "errors" - "fmt" "time" + "fmt" + cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/clist" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" protomem "github.com/cometbft/cometbft/proto/tendermint/mempool" "github.com/cometbft/cometbft/types" + "golang.org/x/sync/semaphore" ) // Reactor handles mempool tx broadcasting amongst peers. @@ -21,6 +24,12 @@ type Reactor struct { config *cfg.MempoolConfig mempool *CListMempool ids *mempoolIDs + + // Semaphores to keep track of how many connections to peers are active for broadcasting + // transactions. Each semaphore has a capacity that puts an upper bound on the number of + // connections for different groups of peers. + activePersistentPeersSemaphore *semaphore.Weighted + activeNonPersistentPeersSemaphore *semaphore.Weighted } // NewReactor returns a new Reactor with the given config and mempool. @@ -31,6 +40,9 @@ func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { ids: newMempoolIDs(), } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) + memR.activePersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers)) + memR.activeNonPersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers)) + return memR } @@ -78,12 +90,47 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *Reactor) AddPeer(peer p2p.Peer) { if memR.config.Broadcast { - go memR.broadcastTxRoutine(peer) + go func() { + // Always forward transactions to unconditional peers. + if !memR.Switch.IsPeerUnconditional(peer.ID()) { + // Depending on the type of peer, we choose a semaphore to limit the gossiping peers. + var peerSemaphore *semaphore.Weighted + if peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers > 0 { + peerSemaphore = memR.activePersistentPeersSemaphore + } else if !peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers > 0 { + peerSemaphore = memR.activeNonPersistentPeersSemaphore + } + + if peerSemaphore != nil { + for peer.IsRunning() { + // Block on the semaphore until a slot is available to start gossiping with this peer. + // Do not block indefinitely, in case the peer is disconnected before gossiping starts. + ctxTimeout, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + // Block sending transactions to peer until one of the connections become + // available in the semaphore. + err := peerSemaphore.Acquire(ctxTimeout, 1) + cancel() + + if err != nil { + continue + } + + // Release semaphore to allow other peer to start sending transactions. + defer peerSemaphore.Release(1) + break + } + } + } + + memR.mempool.metrics.ActiveOutboundConnections.Add(1) + defer memR.mempool.metrics.ActiveOutboundConnections.Add(-1) + memR.broadcastTxRoutine(peer) + }() } } // RemovePeer implements Reactor. -func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (memR *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } @@ -108,10 +155,16 @@ func (memR *Reactor) Receive(e p2p.Envelope) { for _, tx := range protoTxs { ntx := types.Tx(tx) err = memR.mempool.CheckTx(ntx, nil, txInfo) - if errors.Is(err, ErrTxInCache) { - memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + if err != nil { + switch { + case errors.Is(err, ErrTxInCache): + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) + case errors.As(err, &ErrMempoolIsFull{}): + // using debug level to avoid flooding when traffic is high + memR.Logger.Debug(err.Error()) + default: + memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + } } } default: @@ -138,6 +191,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { if !memR.IsRunning() || !peer.IsRunning() { return } + // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWait() returned nil. Go ahead and // start from the beginning. @@ -176,7 +230,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // NOTE: Transaction batching was disabled due to // https://github.com/tendermint/tendermint/issues/5796 - if _, ok := memTx.senders.Load(peerID); !ok { + if !memTx.isSender(peerID) { success := peer.Send(p2p.Envelope{ ChannelID: MempoolChannel, Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 6d07e4a09b3..f79ed97d6a9 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -59,7 +59,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) { } } - txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) + txs := addRandomTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) waitForTxsOnReactors(t, txs, reactors) } @@ -89,7 +89,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) + txs := addRandomTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -106,7 +106,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID) + _ = addRandomTxs(t, reactors[1].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -143,11 +143,11 @@ func TestReactorNoBroadcastToSender(t *testing.T) { } const peerID = 1 - checkTxs(t, reactors[0].mempool, numTxs, peerID) + addRandomTxs(t, reactors[0].mempool, numTxs, peerID) ensureNoTxs(t, reactors[peerID], 100*time.Millisecond) } -func TestReactor_MaxTxBytes(t *testing.T) { +func TestMempoolReactorMaxTxBytes(t *testing.T) { config := cfg.TestConfig() const N = 2 @@ -259,6 +259,51 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { } } +// Test the experimental feature that limits the number of outgoing connections for gossiping +// transactions (only non-persistent peers). +// Note: in this test we know which gossip connections are active or not because of how the p2p +// functions are currently implemented, which affects the order in which peers are added to the +// mempool reactor. +func TestMempoolReactorMaxActiveOutboundConnections(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1 + reactors, _ := makeAndConnectReactors(config, 4) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } + } + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + peer.Set(types.PeerStateKey, peerState{1}) + } + } + + // Add a bunch transactions to the first reactor. + txs := newUniqueTxs(100) + callCheckTx(t, reactors[0].mempool, txs, UnknownPeerID) + + // Wait for all txs to be in the mempool of the second reactor; the other reactors should not + // receive any tx. (The second reactor only sends transactions to the first reactor.) + checkTxsInMempool(t, txs, reactors[1], 0) + for _, r := range reactors[2:] { + require.Zero(t, r.mempool.Size()) + } + + // Disconnect the second reactor from the first reactor. + firstPeer := reactors[0].Switch.Peers().List()[0] + reactors[0].Switch.StopPeerGracefully(firstPeer) + + // Now the third reactor should start receiving transactions from the first reactor; the fourth + // reactor's mempool should still be empty. + checkTxsInMempool(t, txs, reactors[2], 0) + for _, r := range reactors[3:] { + require.Zero(t, r.mempool.Size()) + } +} + // mempoolLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func mempoolLogger() log.Logger { @@ -294,6 +339,14 @@ func makeAndConnectReactors(config *cfg.Config, n int) ([]*Reactor, []*p2p.Switc return reactors, switches } +func newUniqueTxs(n int) types.Txs { + txs := make(types.Txs, n) + for i := 0; i < n; i++ { + txs[i] = kvstore.NewTxFromID(i) + } + return txs +} + func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { // wait for the txs in all mempools wg := new(sync.WaitGroup) @@ -301,7 +354,7 @@ func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { wg.Add(1) go func(r *Reactor, reactorIndex int) { defer wg.Done() - waitForTxsOnReactor(t, txs, r, reactorIndex) + checkTxsInOrder(t, txs, r, reactorIndex) }(reactor, i) } @@ -319,13 +372,30 @@ func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { } } -func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { - mempool := reactor.mempool - for mempool.Size() < len(txs) { +// Wait until the mempool has a certain number of transactions. +func waitForNumTxsInMempool(numTxs int, mempool Mempool) { + for mempool.Size() < numTxs { time.Sleep(time.Millisecond * 100) } +} + +// Wait until all txs are in the mempool and check that the number of txs in the +// mempool is as expected. +func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) { + waitForNumTxsInMempool(len(txs), reactor.mempool) + + reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) + require.Equal(t, len(txs), len(reapedTxs)) + require.Equal(t, len(txs), reactor.mempool.Size()) +} + +// Wait until all txs are in the mempool and check that they are in the same +// order as given. +func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + waitForNumTxsInMempool(len(txs), reactor.mempool) - reapedTxs := mempool.ReapMaxTxs(len(txs)) + // Check that all transactions in the mempool are in the same order as txs. + reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { assert.Equalf(t, tx, reapedTxs[i], "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) diff --git a/networks/local/Makefile b/networks/local/Makefile index c2d52334e96..6d96fe2f591 100644 --- a/networks/local/Makefile +++ b/networks/local/Makefile @@ -1,7 +1,7 @@ # Makefile for the "localnode" docker image. all: - docker build --tag cometbft/localnode localnode + docker buildx build --platform linux/amd64 --tag cometbft/localnode localnode .PHONY: all diff --git a/networks/local/README.md b/networks/local/README.md index ec6d857ac82..7159c45df91 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,3 +1,3 @@ # Local Cluster with Docker Compose -See the [docs](https://docs.cometbft.com/main/networks/docker-compose.html). +See the [docs](https://docs.cometbft.com/v0.38.x/networks/docker-compose.html). diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile index e1c3c452701..f1a93d5b9c7 100644 --- a/networks/local/localnode/Dockerfile +++ b/networks/local/localnode/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.7 +FROM alpine:3.19 RUN apk update && \ apk upgrade && \ diff --git a/node/node.go b/node/node.go index 7810b0c0997..4e6ac83de66 100644 --- a/node/node.go +++ b/node/node.go @@ -1,10 +1,12 @@ package node import ( + "bytes" "context" "fmt" "net" "net/http" + "os" "time" "github.com/prometheus/client_golang/prometheus" @@ -15,6 +17,7 @@ import ( cfg "github.com/cometbft/cometbft/config" cs "github.com/cometbft/cometbft/consensus" "github.com/cometbft/cometbft/evidence" + "github.com/cometbft/cometbft/light" "github.com/cometbft/cometbft/libs/log" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" @@ -62,6 +65,7 @@ type Node struct { stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor p2p.Reactor // for block-syncing + mempoolReactor p2p.Reactor // for gossipping transactions mempool mempl.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots @@ -69,6 +73,7 @@ type Node struct { stateSyncGenesis sm.State // provides the genesis state for state sync consensusState *cs.State // latest consensus state consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses evidencePool *evidence.Pool // tracking evidence proxyApp proxy.AppConns // connection to the application rpcListeners []net.Listener // rpc servers @@ -131,6 +136,133 @@ func StateProvider(stateProvider statesync.StateProvider) Option { } } +// BootstrapState synchronizes the stores with the application after state sync +// has been performed offline. It is expected that the block store and state +// store are empty at the time the function is called. +// +// If the block store is not empty, the function returns an error. +func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBProvider, height uint64, appHash []byte) error { + return BootstrapStateWithGenProvider(ctx, config, dbProvider, DefaultGenesisDocProviderFunc(config), height, appHash) +} + +// BootstrapStateWithGenProvider synchronizes the stores with the application after state sync +// has been performed offline. It is expected that the block store and state +// store are empty at the time the function is called. +// +// If the block store is not empty, the function returns an error. +func BootstrapStateWithGenProvider(ctx context.Context, config *cfg.Config, dbProvider cfg.DBProvider, genProvider GenesisDocProvider, height uint64, appHash []byte) (err error) { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + if ctx == nil { + ctx = context.Background() + } + + if config == nil { + logger.Info("no config provided, using default configuration") + config = cfg.DefaultConfig() + } + + if dbProvider == nil { + dbProvider = cfg.DefaultDBProvider + } + blockStore, stateDB, err := initDBs(config, dbProvider) + + defer func() { + if derr := blockStore.Close(); derr != nil { + logger.Error("Failed to close blockstore", "err", derr) + // Set the return value + err = derr + } + }() + + if err != nil { + return err + } + + if !blockStore.IsEmpty() { + return fmt.Errorf("blockstore not empty, trying to initialize non empty state") + } + + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }) + + defer func() { + if derr := stateStore.Close(); derr != nil { + logger.Error("Failed to close statestore", "err", derr) + // Set the return value + err = derr + } + }() + state, err := stateStore.Load() + if err != nil { + return err + } + + if !state.IsEmpty() { + return fmt.Errorf("state not empty, trying to initialize non empty state") + } + + genState, _, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genProvider) + if err != nil { + return err + } + + stateProvider, err := statesync.NewLightClientStateProvider( + ctx, + genState.ChainID, genState.Version, genState.InitialHeight, + config.StateSync.RPCServers, light.TrustOptions{ + Period: config.StateSync.TrustPeriod, + Height: config.StateSync.TrustHeight, + Hash: config.StateSync.TrustHashBytes(), + }, logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + + state, err = stateProvider.State(ctx, height) + if err != nil { + return err + } + if appHash == nil { + logger.Info("warning: cannot verify appHash. Verification will happen when node boots up!") + } else { + if !bytes.Equal(appHash, state.AppHash) { + if err := blockStore.Close(); err != nil { + logger.Error("failed to close blockstore: %w", err) + } + if err := stateStore.Close(); err != nil { + logger.Error("failed to close statestore: %w", err) + } + return fmt.Errorf("the app hash returned by the light client does not match the provided appHash, expected %X, got %X", state.AppHash, appHash) + } + } + + commit, err := stateProvider.Commit(ctx, height) + if err != nil { + return err + } + + if err = stateStore.Bootstrap(state); err != nil { + return err + } + + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + return err + } + + // Once the stores are bootstrapped, we need to set the height at which the node has finished + // statesyncing. This will allow the blocksync reactor to fetch blocks at a proper height. + // In case this operation fails, it is equivalent to a failure in online state sync where the operator + // needs to manually delete the state and blockstores and rerun the bootstrapping process. + err = stateStore.SetOfflineStateSyncHeight(state.LastBlockHeight) + if err != nil { + return fmt.Errorf("failed to set synced height: %w", err) + } + + return err +} + //------------------------------------------------------------------------------ // NewNode returns a new, ready to go, CometBFT Node. @@ -143,6 +275,23 @@ func NewNode(config *cfg.Config, metricsProvider MetricsProvider, logger log.Logger, options ...Option, +) (*Node, error) { + return NewNodeWithContext(context.TODO(), config, privValidator, + nodeKey, clientCreator, genesisDocProvider, dbProvider, + metricsProvider, logger, options...) +} + +// NewNodeWithContext is cancellable version of NewNode. +func NewNodeWithContext(ctx context.Context, + config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider cfg.DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + options ...Option, ) (*Node, error) { blockStore, stateDB, err := initDBs(config, dbProvider) if err != nil { @@ -195,9 +344,10 @@ func NewNode(config *cfg.Config, if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } + localAddr := pubKey.Address() // Determine whether we should attempt state sync. - stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, localAddr) if stateSync && state.LastBlockHeight > 0 { logger.Info("Found local state with non-zero height, skipping state sync") stateSync = false @@ -207,7 +357,7 @@ func NewNode(config *cfg.Config, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { return nil, err } @@ -222,14 +372,12 @@ func NewNode(config *cfg.Config, // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := !onlyValidatorIsUs(state, pubKey) + blockSync := !onlyValidatorIsUs(state, localAddr) logNodeStartupInfo(state, pubKey, logger, consensusLogger) - // Make MempoolReactor mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) - // Make Evidence Reactor evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateStore, blockStore, logger) if err != nil { return nil, err @@ -246,18 +394,28 @@ func NewNode(config *cfg.Config, sm.BlockExecutorWithMetrics(smMetrics), ) - // Make BlocksyncReactor. Don't start block sync if we're doing a state sync first. - bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics) + offlineStateSyncHeight := int64(0) + if blockStore.Height() == 0 { + offlineStateSyncHeight, err = blockExec.Store().GetOfflineStateSyncHeight() + if err != nil && err.Error() != "value empty" { + panic(fmt.Sprintf("failed to retrieve statesynced height from store %s; expected state store height to be %v", err, state.LastBlockHeight)) + } + } + // Don't start block sync if we're doing a state sync first. + bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, localAddr, logger, bsMetrics, offlineStateSyncHeight) if err != nil { return nil, fmt.Errorf("could not create blocksync reactor: %w", err) } - // Make ConsensusReactor consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, - privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, + privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, offlineStateSyncHeight, ) + err = stateStore.SetOfflineStateSyncHeight(0) + if err != nil { + panic(fmt.Sprintf("failed to reset the offline state sync height %s", err)) + } // Set up state sync reactor, and schedule a sync if requested. // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: @@ -266,7 +424,6 @@ func NewNode(config *cfg.Config, *config.StateSync, proxyApp.Snapshot(), proxyApp.Query(), - config.StateSync.TempDir, ssMetrics, ) stateSyncReactor.SetLogger(logger.With("module", "statesync")) @@ -276,10 +433,8 @@ func NewNode(config *cfg.Config, return nil, err } - // Setup Transport. transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) - // Setup Switch. p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, @@ -301,17 +456,6 @@ func NewNode(config *cfg.Config, return nil, fmt.Errorf("could not create addrbook: %w", err) } - for _, addr := range splitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { - netAddrs, err := p2p.NewNetAddressString(addr) - if err != nil { - return nil, fmt.Errorf("invalid bootstrap peer address: %w", err) - } - err = addrBook.AddAddress(netAddrs, netAddrs) - if err != nil { - return nil, fmt.Errorf("adding bootstrap address to addressbook: %w", err) - } - } - // Optionally, start the pex reactor // // TODO: @@ -324,8 +468,9 @@ func NewNode(config *cfg.Config, // // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress + var pexReactor *pex.Reactor if config.P2P.PexReactor { - createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } // Add private IDs to addrbook to block those peers being added @@ -345,12 +490,14 @@ func NewNode(config *cfg.Config, stateStore: stateStore, blockStore: blockStore, bcReactor: bcReactor, + mempoolReactor: mempoolReactor, mempool: mempool, consensusState: consensusState, consensusReactor: consensusReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state + pexReactor: pexReactor, evidencePool: evidencePool, proxyApp: proxyApp, txIndexer: txIndexer, @@ -425,7 +572,7 @@ func (n *Node) OnStart() error { if !ok { return fmt.Errorf("this blocksync reactor does not support switching from state sync") } - err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + err := startStateSync(n.stateSyncReactor, bcR, n.stateSyncProvider, n.config.StateSync, n.stateStore, n.blockStore, n.stateSyncGenesis) if err != nil { return fmt.Errorf("failed to start state sync: %w", err) @@ -445,10 +592,11 @@ func (n *Node) OnStop() { if err := n.eventBus.Stop(); err != nil { n.Logger.Error("Error closing eventBus", "err", err) } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } } - // now stop the reactors if err := n.sw.Stop(); err != nil { n.Logger.Error("Error closing switch", "err", err) @@ -486,15 +634,23 @@ func (n *Node) OnStop() { } } if n.blockStore != nil { + n.Logger.Info("Closing blockstore") if err := n.blockStore.Close(); err != nil { n.Logger.Error("problem closing blockstore", "err", err) } } if n.stateStore != nil { + n.Logger.Info("Closing statestore") if err := n.stateStore.Close(); err != nil { n.Logger.Error("problem closing statestore", "err", err) } } + if n.evidencePool != nil { + n.Logger.Info("Closing evidencestore") + if err := n.EvidencePool().Close(); err != nil { + n.Logger.Error("problem closing evidencestore", "err", err) + } + } } // ConfigureRPC makes sure RPC has all the objects it needs to operate. @@ -546,6 +702,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { } config := rpcserver.DefaultConfig() + config.MaxRequestBatchSize = n.config.RPC.MaxRequestBatchSize config.MaxBodyBytes = n.config.RPC.MaxBodyBytes config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes config.MaxOpenConnections = n.config.RPC.MaxOpenConnections @@ -640,6 +797,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { return nil, err } go func() { + //nolint:staticcheck // SA1019: core_grpc.StartGRPCClient is deprecated: A new gRPC API will be introduced after v0.38. if err := grpccore.StartGRPCServer(env, listener); err != nil { n.Logger.Error("Error starting gRPC server", "err", err) } @@ -694,11 +852,36 @@ func (n *Node) Switch() *p2p.Switch { return n.sw } +// BlockStore returns the Node's BlockStore. +func (n *Node) BlockStore() *store.BlockStore { + return n.blockStore +} + +// ConsensusReactor returns the Node's ConsensusReactor. +func (n *Node) ConsensusReactor() *cs.Reactor { + return n.consensusReactor +} + +// MempoolReactor returns the Node's mempool reactor. +func (n *Node) MempoolReactor() p2p.Reactor { + return n.mempoolReactor +} + // Mempool returns the Node's mempool. func (n *Node) Mempool() mempl.Mempool { return n.mempool } +// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. +func (n *Node) PEXReactor() *pex.Reactor { + return n.pexReactor +} + +// EvidencePool returns the Node's EvidencePool. +func (n *Node) EvidencePool() *evidence.Pool { + return n.evidencePool +} + // EventBus returns the Node's EventBus. func (n *Node) EventBus() *types.EventBus { return n.eventBus @@ -715,6 +898,11 @@ func (n *Node) GenesisDoc() *types.GenesisDoc { return n.genesisDoc } +// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. +func (n *Node) ProxyApp() proxy.AppConns { + return n.proxyApp +} + // Config returns the Node's config. func (n *Node) Config() *cfg.Config { return n.config diff --git a/node/node_test.go b/node/node_test.go index 031ca6e8fe2..01d5f0f8fcc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -264,10 +264,12 @@ func TestCreateProposalBlock(t *testing.T) { stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) - maxBytes := 16384 - var partSize uint32 = 256 - maxEvidenceBytes := int64(maxBytes / 2) - state.ConsensusParams.Block.MaxBytes = int64(maxBytes) + var ( + partSize uint32 = 256 + maxBytes int64 = 16384 + ) + maxEvidenceBytes := maxBytes / 2 + state.ConsensusParams.Block.MaxBytes = maxBytes state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) @@ -305,7 +307,7 @@ func TestCreateProposalBlock(t *testing.T) { // fill the mempool with more txs // than can fit in a block txLength := 100 - for i := 0; i <= maxBytes/txLength; i++ { + for i := 0; i <= int(maxBytes)/txLength; i++ { tx := cmtrand.Bytes(txLength) err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) assert.NoError(t, err) @@ -333,7 +335,7 @@ func TestCreateProposalBlock(t *testing.T) { // check that the part set does not exceed the maximum block size partSet, err := block.MakePartSet(partSize) require.NoError(t, err) - assert.Less(t, partSet.ByteSize(), int64(maxBytes)) + assert.Less(t, partSet.ByteSize(), maxBytes) partSetFromHeader := types.NewPartSetFromHeader(partSet.Header()) for partSetFromHeader.Count() < partSetFromHeader.Total() { diff --git a/node/setup.go b/node/setup.go index c118d724bcb..5215b18c7e7 100644 --- a/node/setup.go +++ b/node/setup.go @@ -149,14 +149,20 @@ func createAndStartIndexerService( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer ) - txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID) + + txIndexer, blockIndexer, allIndexersDisabled, err := block.IndexerFromConfigWithDisabledIndexers(config, dbProvider, chainID) if err != nil { return nil, nil, nil, err } + if allIndexersDisabled { + return nil, txIndexer, blockIndexer, nil + } + + txIndexer.SetLogger(logger.With("module", "txindex")) + blockIndexer.SetLogger(logger.With("module", "txindex")) indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) indexerService.SetLogger(logger.With("module", "txindex")) - if err := indexerService.Start(); err != nil { return nil, nil, nil, err } @@ -165,6 +171,7 @@ func createAndStartIndexerService( } func doHandshake( + ctx context.Context, stateStore sm.Store, state sm.State, blockStore sm.BlockStore, @@ -176,7 +183,7 @@ func doHandshake( handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { + if err := handshaker.HandshakeWithContext(ctx, proxyApp); err != nil { return fmt.Errorf("error during handshake: %v", err) } return nil @@ -209,14 +216,15 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL } } -func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { +func onlyValidatorIsUs(state sm.State, localAddr crypto.Address) bool { if state.Validators.Size() > 1 { return false } - addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(pubKey.Address(), addr) + valAddr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(localAddr, valAddr) } +// createMempoolAndMempoolReactor creates a mempool and a mempool reactor based on the config. func createMempoolAndMempoolReactor( config *cfg.Config, proxyApp proxy.AppConns, @@ -224,28 +232,36 @@ func createMempoolAndMempoolReactor( memplMetrics *mempl.Metrics, logger log.Logger, ) (mempl.Mempool, p2p.Reactor) { - logger = logger.With("module", "mempool") - mp := mempl.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) + switch config.Mempool.Type { + // allow empty string for backward compatibility + case cfg.MempoolTypeFlood, "": + logger = logger.With("module", "mempool") + mp := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + ) + mp.SetLogger(logger) + reactor := mempl.NewReactor( + config.Mempool, + mp, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + reactor.SetLogger(logger) - reactor := mempl.NewReactor( - config.Mempool, - mp, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() + return mp, reactor + case cfg.MempoolTypeNop: + // Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but + // adding it leads to a cleaner code. + return &mempl.NopMempool{}, mempl.NewNopMempoolReactor() + default: + panic(fmt.Sprintf("unknown mempool type: %q", config.Mempool.Type)) } - reactor.SetLogger(logger) - - return mp, reactor } func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider, @@ -270,12 +286,14 @@ func createBlocksyncReactor(config *cfg.Config, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, blockSync bool, + localAddr crypto.Address, logger log.Logger, metrics *blocksync.Metrics, + offlineStateSyncHeight int64, ) (bcReactor p2p.Reactor, err error) { switch config.BlockSync.Version { case "v0": - bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics) + bcReactor = blocksync.NewReactorWithAddr(state.Copy(), blockExec, blockStore, blockSync, localAddr, metrics, offlineStateSyncHeight) case "v1", "v2": return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version) default: @@ -297,6 +315,7 @@ func createConsensusReactor(config *cfg.Config, waitSync bool, eventBus *types.EventBus, consensusLogger log.Logger, + offlineStateSyncHeight int64, ) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, @@ -306,6 +325,7 @@ func createConsensusReactor(config *cfg.Config, mempool, evidencePool, cs.StateMetrics(csMetrics), + cs.OfflineStateSyncHeight(offlineStateSyncHeight), ) consensusState.SetLogger(consensusLogger) if privValidator != nil { @@ -408,7 +428,9 @@ func createSwitch(config *cfg.Config, p2p.SwitchPeerFilters(peerFilters...), ) sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) + if config.Mempool.Type != cfg.MempoolTypeNop { + sw.AddReactor("MEMPOOL", mempoolReactor) + } sw.AddReactor("BLOCKSYNC", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) @@ -450,7 +472,7 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, sw *p2p.Switch, logger log.Logger, -) { +) *pex.Reactor { // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewReactor(addrBook, &pex.ReactorConfig{ @@ -466,12 +488,18 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, }) pexReactor.SetLogger(logger.With("module", "pex")) sw.AddReactor("PEX", pexReactor) + return pexReactor } // startStateSync starts an asynchronous state sync process, then switches to block sync mode. -func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor, - stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, - stateStore sm.Store, blockStore *store.BlockStore, state sm.State, +func startStateSync( + ssR *statesync.Reactor, + bcR blockSyncReactor, + stateProvider statesync.StateProvider, + config *cfg.StateSyncConfig, + stateStore sm.Store, + blockStore *store.BlockStore, + state sm.State, ) error { ssR.Logger.Info("Starting state sync") @@ -580,11 +608,7 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { if err != nil { return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) } - if err := db.SetSync(genesisDocKey, b); err != nil { - return err - } - - return nil + return db.SetSync(genesisDocKey, b) } func createAndStartPrivValidatorSocketClient( diff --git a/p2p/README.md b/p2p/README.md index 85bb404aa55..bdde71d201d 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -4,7 +4,7 @@ The p2p package provides an abstraction around peer-to-peer communication. Docs: -- [Connection](https://github.com/cometbft/cometbft/blob/main/spec/p2p/connection.md) for details on how connections and multiplexing work -- [Peer](https://github.com/cometbft/cometbft/blob/main/spec/p2p/node.md) for details on peer ID, handshakes, and peer exchange -- [Node](https://github.com/cometbft/cometbft/blob/main/spec/p2p/node.md) for details about different types of nodes and how they should work -- [Config](https://github.com/cometbft/cometbft/blob/main/spec/p2p/config.md) for details on some config option +- [Connection](../spec/p2p/legacy-docs/connection.md) for details on how connections and multiplexing work +- [Peer](../spec/p2p/legacy-docs/node.md) for details on peer ID, handshakes, and peer exchange +- [Node](../spec/p2p/legacy-docs/node.md) for details about different types of nodes and how they should work +- [Config](../spec/p2p/legacy-docs/config.md) for details on some config option diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index aaee128f99d..bfac2340847 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -60,8 +60,8 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor { func (br *BaseReactor) SetSwitch(sw *Switch) { br.Switch = sw } -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(e Envelope) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } +func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } +func (*BaseReactor) AddPeer(Peer) {} +func (*BaseReactor) RemovePeer(Peer, interface{}) {} +func (*BaseReactor) Receive(Envelope) {} +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index cc2dcaf98d2..34eb66e9615 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -14,9 +14,9 @@ import ( "github.com/cosmos/gogoproto/proto" + "github.com/cometbft/cometbft/config" flow "github.com/cometbft/cometbft/libs/flowrate" "github.com/cometbft/cometbft/libs/log" - cmtmath "github.com/cometbft/cometbft/libs/math" "github.com/cometbft/cometbft/libs/protoio" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" @@ -47,8 +47,10 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) -type errorCbFunc func(interface{}) +type ( + receiveCbFunc func(chID byte, msgBytes []byte) + errorCbFunc func(interface{}) +) /* Each peer has one `MConnection` (multiplex connection) instance. @@ -134,6 +136,10 @@ type MConnConfig struct { // Maximum wait time for pongs PongTimeout time.Duration `mapstructure:"pong_timeout"` + + // Fuzz connection + TestFuzz bool `mapstructure:"test_fuzz"` + TestFuzzConfig *config.FuzzConnConfig `mapstructure:"test_fuzz_config"` } // DefaultMConnConfig returns the default config. @@ -190,8 +196,8 @@ func NewMConnectionWithConfig( } // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} + channelsIdx := map[byte]*Channel{} + channels := []*Channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -283,9 +289,10 @@ func (c *MConnection) FlushStop() { // Send and flush all pending msgs. // Since sendRoutine has exited, we can call this // safely - eof := c.sendSomePacketMsgs() + w := protoio.NewDelimitedWriter(c.bufConnWriter) + eof := c.sendSomePacketMsgs(w) for !eof { - eof = c.sendSomePacketMsgs() + eof = c.sendSomePacketMsgs(w) } c.flush() @@ -474,7 +481,7 @@ FOR_LOOP: break FOR_LOOP case <-c.send: // Send some PacketMsgs - eof := c.sendSomePacketMsgs() + eof := c.sendSomePacketMsgs(protoWriter) if !eof { // Keep sendRoutine awake. select { @@ -501,56 +508,79 @@ FOR_LOOP: // Returns true if messages from channels were exhausted. // Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { +func (c *MConnection) sendSomePacketMsgs(w protoio.Writer) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true) // Now send some PacketMsgs. - for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { + return c.sendBatchPacketMsgs(w, numBatchPacketMsgs) +} + +// Returns true if messages from channels were exhausted. +func (c *MConnection) sendBatchPacketMsgs(w protoio.Writer, batchSize int) bool { + // Send a batch of PacketMsgs. + totalBytesWritten := 0 + defer func() { + if totalBytesWritten > 0 { + c.sendMonitor.Update(totalBytesWritten) + } + }() + for i := 0; i < batchSize; i++ { + channel := selectChannelToGossipOn(c.channels) + // nothing to send across any channel. + if channel == nil { + return true + } + bytesWritten, err := c.sendPacketMsgOnChannel(w, channel) + if err { return true } + totalBytesWritten += bytesWritten } return false } -// Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { +// selects a channel to gossip our next message on. +// TODO: Make "batchChannelToGossipOn", so we can do our proto marshaling overheads in parallel, +// and we can avoid re-checking for `isSendPending`. +// We can easily mock the recentlySent differences for the batch choosing. +func selectChannelToGossipOn(channels []*Channel) *Channel { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 var leastChannel *Channel - for _, channel := range c.channels { + for _, channel := range channels { // If nothing to send, skip this channel + // TODO: Skip continually looking for isSendPending on channels we've already skipped in this batch-send. if !channel.isSendPending() { continue } // Get ratio, and keep track of lowest ratio. + // TODO: RecentlySent right now is bytes. This should be refactored to num messages to fix + // gossip prioritization bugs. ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) if ratio < leastRatio { leastRatio = ratio leastChannel = channel } } + return leastChannel +} - // Nothing to send? - if leastChannel == nil { - return true - } - // c.Logger.Info("Found a msgPacket to send") - +// returns (num_bytes_written, error_occurred). +func (c *MConnection) sendPacketMsgOnChannel(w protoio.Writer, sendChannel *Channel) (int, bool) { // Make & send a PacketMsg from this channel - _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) + n, err := sendChannel.writePacketMsgTo(w) if err != nil { c.Logger.Error("Failed to write PacketMsg", "err", err) c.stopForError(err) - return true + return n, true } - c.sendMonitor.Update(_n) + // TODO: Change this to only add flush signals at the start and end of the batch. c.flushTimer.Set() - return false + return n, false } // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. @@ -657,6 +687,7 @@ FOR_LOOP: // Cleanup close(c.pong) + //nolint:revive for range c.pong { // Drain } @@ -705,6 +736,7 @@ func (c *MConnection) Status() ConnectionStatus { status.RecvMonitor = c.recvMonitor.Status() status.Channels = make([]ChannelStatus, len(c.channels)) for i, channel := range c.channels { + channel := channel status.Channels[i] = ChannelStatus{ ID: channel.desc.ID, SendQueueCapacity: cap(channel.sendQueue), @@ -830,25 +862,29 @@ func (ch *Channel) isSendPending() bool { func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize - packet.Data = ch.sending[:cmtmath.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { + packet.Data = ch.sending packet.EOF = true ch.sending = nil atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize } else { + packet.Data = ch.sending[:maxSize] packet.EOF = false - ch.sending = ch.sending[cmtmath.MinInt(maxSize, len(ch.sending)):] + ch.sending = ch.sending[maxSize:] } return packet } // Writes next PacketMsg to w and updates c.recentlySent. -// Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { +// Not goroutine-safe. +func (ch *Channel) writePacketMsgTo(w protoio.Writer) (n int, err error) { packet := ch.nextPacketMsg() - n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) + n, err = w.WriteMsg(mustWrapPacket(&packet)) + if err != nil { + return 0, err + } atomic.AddInt64(&ch.recentlySent, int64(n)) - return + return n, nil } // Handles incoming PacketMsgs. It returns a message bytes if message is @@ -856,7 +892,7 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Not goroutine-safe func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) - var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) + recvCap, recvReceived := ch.desc.RecvMessageCapacity, len(ch.recving)+len(packet.Data) if recvCap < recvReceived { return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) } diff --git a/p2p/conn/evil_secret_connection_test.go b/p2p/conn/evil_secret_connection_test.go index 6cb3a9b71db..5cbb5139e2f 100644 --- a/p2p/conn/evil_secret_connection_test.go +++ b/p2p/conn/evil_secret_connection_test.go @@ -221,12 +221,16 @@ func (c *evilConn) signChallenge() []byte { b := &buffer{} c.secretConn = &SecretConnection{ - conn: b, - recvBuffer: nil, - recvNonce: new([aeadNonceSize]byte), - sendNonce: new([aeadNonceSize]byte), - recvAead: recvAead, - sendAead: sendAead, + conn: b, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + recvFrame: make([]byte, totalFrameSize), + recvSealedFrame: make([]byte, totalFrameSize+aeadSizeOverhead), + sendFrame: make([]byte, totalFrameSize), + sendSealedFrame: make([]byte, totalFrameSize+aeadSizeOverhead), } c.buffer = b diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 942220240b4..cdafefcd2b3 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -14,7 +14,6 @@ import ( "time" gogotypes "github.com/cosmos/gogoproto/types" - pool "github.com/libp2p/go-buffer-pool" "github.com/oasisprotocol/curve25519-voi/primitives/merlin" "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/curve25519" @@ -76,12 +75,16 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx cmtsync.Mutex - recvBuffer []byte - recvNonce *[aeadNonceSize]byte - - sendMtx cmtsync.Mutex - sendNonce *[aeadNonceSize]byte + recvMtx cmtsync.Mutex + recvBuffer []byte + recvNonce *[aeadNonceSize]byte + recvFrame []byte + recvSealedFrame []byte + + sendMtx cmtsync.Mutex + sendNonce *[aeadNonceSize]byte + sendFrame []byte + sendSealedFrame []byte } // MakeSecretConnection performs handshake and returns a new authenticated @@ -144,12 +147,16 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* } sc := &SecretConnection{ - conn: conn, - recvBuffer: nil, - recvNonce: new([aeadNonceSize]byte), - sendNonce: new([aeadNonceSize]byte), - recvAead: recvAead, - sendAead: sendAead, + conn: conn, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + recvFrame: make([]byte, totalFrameSize), + recvSealedFrame: make([]byte, aeadSizeOverhead+totalFrameSize), + sendFrame: make([]byte, totalFrameSize), + sendSealedFrame: make([]byte, aeadSizeOverhead+totalFrameSize), } // Sign the challenge bytes for authentication. @@ -187,15 +194,10 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKey { func (sc *SecretConnection) Write(data []byte) (n int, err error) { sc.sendMtx.Lock() defer sc.sendMtx.Unlock() + sealedFrame, frame := sc.sendSealedFrame, sc.sendFrame for 0 < len(data) { if err := func() error { - var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) - var frame = pool.Get(totalFrameSize) - defer func() { - pool.Put(sealedFrame) - pool.Put(frame) - }() var chunk []byte if dataMaxSize < len(data) { chunk = data[:dataMaxSize] @@ -235,21 +237,19 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { if 0 < len(sc.recvBuffer) { n = copy(data, sc.recvBuffer) sc.recvBuffer = sc.recvBuffer[n:] - return + return n, err } // read off the conn - var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) - defer pool.Put(sealedFrame) + sealedFrame := sc.recvSealedFrame _, err = io.ReadFull(sc.conn, sealedFrame) if err != nil { - return + return n, err } // decrypt the frame. // reads and updates the sc.recvNonce - var frame = pool.Get(totalFrameSize) - defer pool.Put(frame) + frame := sc.recvFrame _, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil) if err != nil { return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) @@ -324,7 +324,7 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return remEphPub, err } // Otherwise: @@ -437,7 +437,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return recvMsg, err } var _recvMsg = trs.FirstValue().(authSigMessage) diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 516302c35b9..5b5655e3d5f 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -129,7 +129,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { return nil, true, err } // In parallel, handle some reads and writes. - var trs, ok = async.Parallel( + trs, ok := async.Parallel( func(_ int) (interface{}, bool, error) { // Node writes: for _, nodeWrite := range nodeWrites { @@ -182,7 +182,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { } // Run foo & bar in parallel - var trs, ok = async.Parallel( + trs, ok := async.Parallel( genNodeRunner("foo", fooConn, fooWrites, &fooReads), genNodeRunner("bar", barConn, barWrites, &barReads), ) @@ -194,9 +194,9 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read = "" - var write = writes[0] - var readCount = 0 + read := "" + write := writes[0] + readCount := 0 for _, readChunk := range reads { read += readChunk readCount++ @@ -229,7 +229,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - err := cmtos.WriteFile(goldenFilepath, []byte(data), 0644) + err := cmtos.WriteFile(goldenFilepath, []byte(data), 0o644) require.NoError(t, err) } f, err := os.Open(goldenFilepath) @@ -259,11 +259,11 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { } func TestNilPubkey(t *testing.T) { - var fooConn, barConn = makeKVStoreConnPair() + fooConn, barConn := makeKVStoreConnPair() defer fooConn.Close() defer barConn.Close() - var fooPrvKey = ed25519.GenPrivKey() - var barPrvKey = privKeyWithNilPubKey{ed25519.GenPrivKey()} + fooPrvKey := ed25519.GenPrivKey() + barPrvKey := privKeyWithNilPubKey{ed25519.GenPrivKey()} go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests @@ -273,11 +273,11 @@ func TestNilPubkey(t *testing.T) { } func TestNonEd25519Pubkey(t *testing.T) { - var fooConn, barConn = makeKVStoreConnPair() + fooConn, barConn := makeKVStoreConnPair() defer fooConn.Close() defer barConn.Close() - var fooPrvKey = ed25519.GenPrivKey() - var barPrvKey = sr25519.GenPrivKey() + fooPrvKey := ed25519.GenPrivKey() + barPrvKey := sr25519.GenPrivKey() go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests @@ -309,7 +309,7 @@ func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { // Creates the data for a test vector file. // The file format is: // Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge) -func createGoldenTestVectors(t *testing.T) string { +func createGoldenTestVectors(*testing.T) string { data := "" for i := 0; i < 32; i++ { randSecretVector := cmtrand.Bytes(32) @@ -342,7 +342,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection ) // Make connections from both sides in parallel. - var trs, ok = async.Parallel( + trs, ok := async.Parallel( func(_ int) (val interface{}, abort bool, err error) { fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) if err != nil { diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index 3e137af89f8..b4111004c81 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -43,8 +43,8 @@ func NewPeer(ip net.IP) *Peer { } func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(e p2p.Envelope) bool { return true } -func (mp *Peer) Send(e p2p.Envelope) bool { return true } +func (mp *Peer) TrySend(_ p2p.Envelope) bool { return true } +func (mp *Peer) Send(_ p2p.Envelope) bool { return true } func (mp *Peer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{ DefaultNodeID: mp.addr.ID, @@ -61,6 +61,7 @@ func (mp *Peer) Get(key string) interface{} { } return nil } + func (mp *Peer) Set(key string, value interface{}) { mp.kv[key] = value } diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index adc0b2113ed..64d93a97358 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -19,7 +19,7 @@ func NewReactor() *Reactor { return r } -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(e p2p.Envelope) {} +func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } +func (r *Reactor) AddPeer(_ p2p.Peer) {} +func (r *Reactor) RemovePeer(_ p2p.Peer, _ interface{}) {} +func (r *Reactor) Receive(_ p2p.Envelope) {} diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index 235b0e976fb..590dbfb9961 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -22,6 +22,10 @@ type Peer struct { func (_m *Peer) CloseConn() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CloseConn") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -41,6 +45,10 @@ func (_m *Peer) FlushStop() { func (_m *Peer) Get(_a0 string) interface{} { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 interface{} if rf, ok := ret.Get(0).(func(string) interface{}); ok { r0 = rf(_a0) @@ -57,6 +65,10 @@ func (_m *Peer) Get(_a0 string) interface{} { func (_m *Peer) GetRemovalFailed() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetRemovalFailed") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -71,6 +83,10 @@ func (_m *Peer) GetRemovalFailed() bool { func (_m *Peer) ID() p2p.ID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 p2p.ID if rf, ok := ret.Get(0).(func() p2p.ID); ok { r0 = rf() @@ -85,6 +101,10 @@ func (_m *Peer) ID() p2p.ID { func (_m *Peer) IsOutbound() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsOutbound") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -99,6 +119,10 @@ func (_m *Peer) IsOutbound() bool { func (_m *Peer) IsPersistent() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsPersistent") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -113,6 +137,10 @@ func (_m *Peer) IsPersistent() bool { func (_m *Peer) IsRunning() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsRunning") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -127,6 +155,10 @@ func (_m *Peer) IsRunning() bool { func (_m *Peer) NodeInfo() p2p.NodeInfo { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NodeInfo") + } + var r0 p2p.NodeInfo if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { r0 = rf() @@ -143,6 +175,10 @@ func (_m *Peer) NodeInfo() p2p.NodeInfo { func (_m *Peer) OnReset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnReset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -157,6 +193,10 @@ func (_m *Peer) OnReset() error { func (_m *Peer) OnStart() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnStart") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -176,6 +216,10 @@ func (_m *Peer) OnStop() { func (_m *Peer) Quit() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Quit") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -192,6 +236,10 @@ func (_m *Peer) Quit() <-chan struct{} { func (_m *Peer) RemoteAddr() net.Addr { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RemoteAddr") + } + var r0 net.Addr if rf, ok := ret.Get(0).(func() net.Addr); ok { r0 = rf() @@ -208,6 +256,10 @@ func (_m *Peer) RemoteAddr() net.Addr { func (_m *Peer) RemoteIP() net.IP { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RemoteIP") + } + var r0 net.IP if rf, ok := ret.Get(0).(func() net.IP); ok { r0 = rf() @@ -224,6 +276,10 @@ func (_m *Peer) RemoteIP() net.IP { func (_m *Peer) Reset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -238,6 +294,10 @@ func (_m *Peer) Reset() error { func (_m *Peer) Send(_a0 p2p.Envelope) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 bool if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { r0 = rf(_a0) @@ -267,6 +327,10 @@ func (_m *Peer) SetRemovalFailed() { func (_m *Peer) SocketAddr() *p2p.NetAddress { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SocketAddr") + } + var r0 *p2p.NetAddress if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { r0 = rf() @@ -283,6 +347,10 @@ func (_m *Peer) SocketAddr() *p2p.NetAddress { func (_m *Peer) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -297,6 +365,10 @@ func (_m *Peer) Start() error { func (_m *Peer) Status() conn.ConnectionStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 conn.ConnectionStatus if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { r0 = rf() @@ -311,6 +383,10 @@ func (_m *Peer) Status() conn.ConnectionStatus { func (_m *Peer) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -325,6 +401,10 @@ func (_m *Peer) Stop() error { func (_m *Peer) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -339,6 +419,10 @@ func (_m *Peer) String() string { func (_m *Peer) TrySend(_a0 p2p.Envelope) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for TrySend") + } + var r0 bool if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { r0 = rf(_a0) @@ -349,13 +433,12 @@ func (_m *Peer) TrySend(_a0 p2p.Envelope) bool { return r0 } -type mockConstructorTestingTNewPeer interface { +// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeer(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeer(t mockConstructorTestingTNewPeer) *Peer { +}) *Peer { mock := &Peer{} mock.Mock.Test(t) diff --git a/p2p/netaddress.go b/p2p/netaddress.go index fef9afd165d..252178be3a3 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -45,11 +45,11 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { if !ok { if flag.Lookup("test.v") == nil { // normal run panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) - } else { // in testing - netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) - netAddr.ID = id - return netAddr } + // in testing + netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) + netAddr.ID = id + return netAddr } if err := validateID(id); err != nil { diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 9d08e437c74..64911ecebff 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -19,8 +19,8 @@ type mockPeer struct { } func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(e Envelope) bool { return true } -func (mp *mockPeer) Send(e Envelope) bool { return true } +func (mp *mockPeer) TrySend(Envelope) bool { return true } +func (mp *mockPeer) Send(Envelope) bool { return true } func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } func (mp *mockPeer) ID() ID { return mp.id } diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 780c2bad582..216932f8519 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -52,8 +52,7 @@ const ( defaultBanTime = 24 * time.Hour ) -type errMaxAttemptsToDial struct { -} +type errMaxAttemptsToDial struct{} func (e errMaxAttemptsToDial) Error() string { return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) @@ -84,6 +83,7 @@ type Reactor struct { book AddrBook config *ReactorConfig + ensurePeersCh chan struct{} // Wakes up ensurePeersRoutine() ensurePeersPeriod time.Duration // TODO: should go in the config // maps to prevent abuse @@ -132,6 +132,7 @@ func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { r := &Reactor{ book: b, config: config, + ensurePeersCh: make(chan struct{}), ensurePeersPeriod: defaultEnsurePeersPeriod, requestsSent: cmap.NewCMap(), lastReceivedRequests: cmap.NewCMap(), @@ -216,7 +217,7 @@ func (r *Reactor) AddPeer(p Peer) { } // RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { +func (r *Reactor) RemovePeer(p Peer, _ interface{}) { id := string(p.ID()) r.requestsSent.Delete(id) r.lastReceivedRequests.Delete(id) @@ -362,14 +363,6 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { return err } - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - for _, netAddr := range addrs { // NOTE: we check netAddr validity and routability in book#AddAddress. err = r.book.AddAddress(netAddr, srcAddr) @@ -379,21 +372,16 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // peer here too? continue } + } - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Debug(err.Error(), "addr", addr) - } - } - }(netAddr) + // Try to connect to addresses coming from a seed node without waiting (#2093) + for _, seedAddr := range r.seedAddrs { + if seedAddr.Equals(srcAddr) { + select { + case r.ensurePeersCh <- struct{}{}: + default: + } + break } } @@ -438,6 +426,8 @@ func (r *Reactor) ensurePeersRoutine() { select { case <-ticker.C: r.ensurePeers() + case <-r.ensurePeersCh: + r.ensurePeers() case <-r.Quit(): ticker.Stop() return diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 5c2bebf6649..0a25c70581c 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -19,9 +19,7 @@ import ( tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -81,7 +79,7 @@ func TestPEXReactorRunning(t *testing.T) { // create switches for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) sw.SetAddrBook(books[i]) @@ -224,8 +222,10 @@ func TestCheckSeeds(t *testing.T) { // 4. test create peer with all seeds having unresolvable DNS fails badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Error(t, peerSwitch.Start()) @@ -233,9 +233,11 @@ func TestCheckSeeds(t *testing.T) { // 5. test create peer with one good seed address succeeds badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, + seed.NetAddress().String(), + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Nil(t, peerSwitch.Start()) @@ -268,27 +270,48 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { require.Nil(t, err) defer os.RemoveAll(dir) - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - defer peerSwitch.Stop() //nolint:errcheck // ignore for tests + // Default is 10, we need one connection for the seed node. + cfg.MaxNumOutboundPeers = 2 - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - defer seed.Stop() //nolint:errcheck // ignore for tests + var id int + var knownAddrs []*p2p.NetAddress - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - defer secondPeer.Stop() //nolint:errcheck // ignore for tests + // 1. Create some peers + for id = 0; id < cfg.MaxNumOutboundPeers+1; id++ { + peer := testCreateDefaultPeer(dir, id) + require.NoError(t, peer.Start()) + addr := peer.NetAddress() + defer peer.Stop() //nolint:errcheck // ignore for tests - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) + knownAddrs = append(knownAddrs, addr) + t.Log("Created peer", id, addr) + } - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) + // 2. Create seed node which knows about the previous peers + seed := testCreateSeed(dir, id, knownAddrs, knownAddrs) + require.NoError(t, seed.Start()) + defer seed.Stop() //nolint:errcheck // ignore for tests + t.Log("Created seed", id, seed.NetAddress()) + + // 3. Create a node with only seed configured. + id++ + node := testCreatePeerWithSeed(dir, id, seed) + require.NoError(t, node.Start()) + defer node.Stop() //nolint:errcheck // ignore for tests + t.Log("Created node", id, node.NetAddress()) + + // 4. Check that the node connects to seed immediately + assertPeersWithTimeout(t, []*p2p.Switch{node}, 10*time.Millisecond, 3*time.Second, 1) + + // 5. Check that the node connects to the peers reported by the seed node + assertPeersWithTimeout(t, []*p2p.Switch{node}, 10*time.Millisecond, 1*time.Second, cfg.MaxNumOutboundPeers) + + // 6. Assert that the configured maximum number of inbound/outbound peers + // are respected, see https://github.com/cometbft/cometbft/issues/486 + outbound, inbound, dialing := node.NumPeers() + assert.LessOrEqual(t, inbound, cfg.MaxNumInboundPeers) + assert.LessOrEqual(t, outbound, cfg.MaxNumOutboundPeers) + assert.Zero(t, dialing) } func TestPEXReactorSeedMode(t *testing.T) { @@ -415,7 +438,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { // create switches for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) sw.SetAddrBook(books[i]) @@ -582,20 +605,16 @@ func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Sw peer := p2p.MakeSwitch( cfg, id, - "127.0.0.1", - "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) + book.SetLogger(log.TestingLogger().With("book", id)) sw.SetAddrBook(book) - sw.SetLogger(log.TestingLogger()) - r := NewReactor( book, config, ) - r.SetLogger(log.TestingLogger()) + r.SetLogger(log.TestingLogger().With("pex", id)) sw.AddReactor("pex", r) return sw }, @@ -614,8 +633,6 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) seed := p2p.MakeSwitch( cfg, id, - "127.0.0.1", - "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) book.SetLogger(log.TestingLogger()) @@ -668,7 +685,7 @@ func teardownReactor(book AddrBook) { } func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + sw := p2p.MakeSwitch(cfg, 0, func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetLogger(log.TestingLogger()) for _, r := range reactors { sw.AddReactor(r.String(), r) @@ -678,7 +695,6 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } func TestPexVectors(t *testing.T) { - addr := tmp2p.NetAddress{ ID: "1", IP: "127.0.0.1", diff --git a/p2p/switch.go b/p2p/switch.go index f7159f9d41e..68ad5669b3e 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -39,6 +39,8 @@ func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { mConfig.SendRate = cfg.SendRate mConfig.RecvRate = cfg.RecvRate mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize + mConfig.TestFuzz = cfg.TestFuzz + mConfig.TestFuzzConfig = cfg.TestFuzzConfig return mConfig } @@ -161,7 +163,7 @@ func WithMetrics(metrics *Metrics) SwitchOption { // AddReactor adds the given reactor to the switch. // NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) { +func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { for _, chDesc := range reactor.GetChannels() { chID := chDesc.ID // No two reactors can share the same channel. @@ -174,6 +176,7 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) { } sw.reactors[name] = reactor reactor.SetSwitch(sw) + return reactor } // RemoveReactor removes the given Reactor from the Switch. diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 4dce6fa105d..ad4040760fa 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -27,9 +27,7 @@ import ( p2pproto "github.com/cometbft/cometbft/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -67,9 +65,9 @@ func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { return tr.channels } -func (tr *TestReactor) AddPeer(peer Peer) {} +func (tr *TestReactor) AddPeer(Peer) {} -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} +func (tr *TestReactor) RemovePeer(Peer, interface{}) {} func (tr *TestReactor) Receive(e Envelope) { if tr.logMessages { @@ -91,16 +89,17 @@ func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { // convenience method for creating two switches connected to each other. // XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { +func MakeSwitchPair(initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { // Create two switches that will be interconnected. switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) return switches[0], switches[1] } -func initSwitchFunc(i int, sw *Switch) *Switch { +func initSwitchFunc(_ int, sw *Switch) *Switch { sw.SetAddrBook(&AddrBookMock{ Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) + OurAddrs: make(map[string]struct{}), + }) // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ @@ -116,7 +115,7 @@ func initSwitchFunc(i int, sw *Switch) *Switch { } func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) + s1, s2 := MakeSwitchPair(initSwitchFunc) t.Cleanup(func() { if err := s1.Stop(); err != nil { t.Error(err) @@ -205,7 +204,7 @@ func assertMsgReceivedWithTimeout( } func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) + s1 := MakeSwitch(cfg, 1, initSwitchFunc) // simulate s1 having a public IP by creating a remote peer with the same ID rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} @@ -241,8 +240,6 @@ func TestSwitchPeerFilter(t *testing.T) { sw = MakeSwitch( cfg, 1, - "testing", - "123.123.123", initSwitchFunc, SwitchPeerFilters(filters...), ) @@ -291,8 +288,6 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { sw = MakeSwitch( cfg, 1, - "testing", - "123.123.123", initSwitchFunc, SwitchFilterTimeout(5*time.Millisecond), SwitchPeerFilters(filters...), @@ -328,7 +323,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { } func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -376,7 +371,7 @@ func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { assert, require := assert.New(t), require.New(t) - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() if err != nil { t.Error(err) @@ -436,7 +431,7 @@ func TestSwitchStopPeerForError(t *testing.T) { p2pMetrics := PrometheusMetrics(namespace) // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + sw1, sw2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { // set metrics on sw1 if i == 0 { opt := WithMetrics(p2pMetrics) @@ -471,7 +466,7 @@ func TestSwitchStopPeerForError(t *testing.T) { } func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -521,7 +516,7 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { } func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -554,7 +549,7 @@ func TestSwitchDialPeersAsync(t *testing.T) { return } - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -620,7 +615,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { } // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) require.NoError(t, err) err = sw.Start() @@ -702,12 +697,14 @@ func (et errorTransport) NetAddress() NetAddress { panic("not implemented") } -func (et errorTransport) Accept(c peerConfig) (Peer, error) { +func (et errorTransport) Accept(peerConfig) (Peer, error) { return nil, et.acceptErr } + func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { panic("not implemented") } + func (errorTransport) Cleanup(Peer) { panic("not implemented") } @@ -749,7 +746,7 @@ type mockReactor struct { initCalledBeforeRemoveFinished uint32 } -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { +func (r *mockReactor) RemovePeer(Peer, interface{}) { atomic.StoreUint32(&r.removePeerInProgress, 1) defer atomic.StoreUint32(&r.removePeerInProgress, 0) time.Sleep(100 * time.Millisecond) @@ -774,7 +771,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { + sw := MakeSwitch(cfg, 1, func(i int, sw *Switch) *Switch { sw.AddReactor("mock", reactor) return sw }) @@ -813,7 +810,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { } func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { + s1, s2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ {ID: byte(0x00), Priority: 10}, @@ -862,8 +859,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) { } func TestSwitchRemovalErr(t *testing.T) { - - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + sw1, sw2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { return initSwitchFunc(i, sw) }) assert.Equal(t, len(sw1.Peers().List()), 1) diff --git a/p2p/test_util.go b/p2p/test_util.go index 2941c102d7c..3fbb68bb655 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -23,10 +23,10 @@ type mockNodeInfo struct { addr *NetAddress } -func (ni mockNodeInfo) ID() ID { return ni.addr.ID } -func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } -func (ni mockNodeInfo) Validate() error { return nil } -func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } +func (ni mockNodeInfo) ID() ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } +func (ni mockNodeInfo) Validate() error { return nil } +func (ni mockNodeInfo) CompatibleWith(NodeInfo) error { return nil } func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { sw.peers.Add(peer) //nolint:errcheck // ignore error @@ -83,7 +83,7 @@ func MakeConnectedSwitches(cfg *config.P2PConfig, ) []*Switch { switches := make([]*Switch, n) for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch) + switches[i] = MakeSwitch(cfg, i, initSwitch) } if err := StartSwitches(switches); err != nil { @@ -178,11 +178,9 @@ func StartSwitches(switches []*Switch) error { func MakeSwitch( cfg *config.P2PConfig, i int, - network, version string, initSwitch func(int, *Switch) *Switch, opts ...SwitchOption, ) *Switch { - nodeKey := NodeKey{ PrivKey: ed25519.GenPrivKey(), } @@ -291,7 +289,7 @@ type AddrBookMock struct { var _ AddrBook = (*AddrBookMock)(nil) -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { +func (book *AddrBookMock) AddAddress(addr *NetAddress, _ *NetAddress) error { book.Addrs[addr.String()] = struct{}{} return nil } @@ -305,6 +303,7 @@ func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { _, ok := book.Addrs[addr.String()] return ok } + func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { delete(book.Addrs, addr.String()) } diff --git a/p2p/transport.go b/p2p/transport.go index 96d3738d662..d6043da3beb 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -218,6 +218,11 @@ func (mt *MultiplexTransport) Dial( return nil, err } + if mt.mConfig.TestFuzz { + // so we have time to do peer handshakes and get set up. + c = FuzzConnAfterFromConfig(c, 10*time.Second, mt.mConfig.TestFuzzConfig) + } + // TODO(xla): Evaluate if we should apply filters if we explicitly dial. if err := mt.filterConn(c); err != nil { return nil, err diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go deleted file mode 100644 index b40d92e65ad..00000000000 --- a/p2p/upnp/probe.go +++ /dev/null @@ -1,117 +0,0 @@ -package upnp - -import ( - "fmt" - "net" - "time" - - "github.com/cometbft/cometbft/libs/log" -) - -type Capabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, fmt.Errorf("nat upnp could not be discovered: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, fmt.Errorf("external address error: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "CometBFT UPnP Probe", 0) - if err != nil { - return nat, nil, ext, fmt.Errorf("port mapping error: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, fmt.Errorf("error establishing listener: %v", err) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Listener.Accept() error: %v", err)) - return - } - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Incoming connection read error: %v", err)) - return - } - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection write error: %v", err)) - return - } - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return supportsHairpin -} - -func Probe(logger log.Logger) (caps Capabilities, err error) { - logger.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) - } - if err := listener.Close(); err != nil { - logger.Error(fmt.Sprintf("Listener closing error: %v", err)) - } - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go deleted file mode 100644 index 45da9d33cb8..00000000000 --- a/p2p/upnp/upnp.go +++ /dev/null @@ -1,404 +0,0 @@ -// Taken from taipei-torrent. -// Just enough UPnP to be able to forward ports -// For more information, see: http://www.upnp-hacks.org/upnp.html -package upnp - -// TODO: use syscalls to get actual ourIP, see issue #712 - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { - return nil, err - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - _, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - return - } - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("upnp port discovery failed") - return nat, err -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type Service struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []Service `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Contains(dl[i].DeviceType, deviceType) { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *Service { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Contains(sl[i].ServiceType, serviceType) { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) //nolint: gosec - if err != nil { - return - } - defer r.Body.Close() - - if r.StatusCode >= 400 { - err = errors.New(string(rune(r.StatusCode))) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { - err = errors.New("no InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("no WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("no WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("no WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return url, urnDomain, err -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - // req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return r, err -} - -type statusInfo struct { - externalIPAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - var envelope Envelope - data, err := io.ReadAll(response.Body) - if err != nil { - return - } - reader := bytes.NewReader(data) - err = xml.NewDecoder(reader).Decode(&envelope) - if err != nil { - return - } - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return info, err -} - -// GetExternalAddress returns an external IP. If GetExternalIPAddress action -// fails or IP returned is invalid, GetExternalAddress returns an error. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIPAddress) - if addr == nil { - err = fmt.Errorf("failed to parse IP: %v", info.externalIPAddress) - } - return -} - -func (n *upnpNAT) AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := io.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return mappedExternalPort, err -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index a7a4fbd2a03..81e69a5002f 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -389,8 +389,7 @@ func TestSignerSignVoteErrors(t *testing.T) { } } -func brokenHandler(privVal types.PrivValidator, request privvalproto.Message, - chainID string) (privvalproto.Message, error) { +func brokenHandler(_ types.PrivValidator, request privvalproto.Message, _ string) (privvalproto.Message, error) { var res privvalproto.Message var err error diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index 2b4abe2dd96..84dbde8f355 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -53,11 +53,9 @@ func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net. // TryGetConnection retrieves a connection if it is already available func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { - se.connMtx.Lock() - defer se.connMtx.Unlock() - select { - case se.conn = <-connectionAvailableCh: + case conn := <-connectionAvailableCh: + se.SetConnection(conn) case <-time.After(maxWait): return ErrConnectionTimeout } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 9b6b033cc5f..8e7f4235638 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -3,6 +3,7 @@ package privval import ( "fmt" "net" + "sync/atomic" "time" "github.com/cometbft/cometbft/libs/log" @@ -34,9 +35,10 @@ type SignerListenerEndpoint struct { connectRequestCh chan struct{} connectionAvailableCh chan net.Conn - timeoutAccept time.Duration - pingTimer *time.Ticker - pingInterval time.Duration + timeoutAccept time.Duration + acceptFailCount atomic.Uint32 + pingTimer *time.Ticker + pingInterval time.Duration instanceMtx cmtsync.Mutex // Ensures instance public methods access, i.e. SendRequest } @@ -64,10 +66,10 @@ func NewSignerListenerEndpoint( // OnStart implements service.Service. func (sl *SignerListenerEndpoint) OnStart() error { - sl.connectRequestCh = make(chan struct{}) + sl.connectRequestCh = make(chan struct{}, 1) // Buffer of 1 to allow `serviceLoop` to re-trigger itself. sl.connectionAvailableCh = make(chan net.Conn) - // NOTE: ping timeout must be less than read/write timeout + // NOTE: ping timeout must be less than read/write timeout. sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond sl.pingTimer = time.NewTicker(sl.pingInterval) @@ -159,9 +161,11 @@ func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { sl.Logger.Info("SignerListener: Listening for new connection") conn, err := sl.listener.Accept() if err != nil { + sl.acceptFailCount.Add(1) return nil, err } + sl.acceptFailCount.Store(0) return conn, nil } @@ -181,23 +185,27 @@ func (sl *SignerListenerEndpoint) serviceLoop() { for { select { case <-sl.connectRequestCh: - { - conn, err := sl.acceptNewConnection() - if err == nil { - sl.Logger.Info("SignerListener: Connected") - - // We have a good connection, wait for someone that needs one otherwise cancellation - select { - case sl.connectionAvailableCh <- conn: - case <-sl.Quit(): - return - } - } + // On start, listen timeouts can queue a duplicate connect request to queue + // while the first request connects. Drop duplicate request. + if sl.IsConnected() { + sl.Logger.Debug("SignerListener: Connected. Drop Listen Request") + continue + } - select { - case sl.connectRequestCh <- struct{}{}: - default: - } + // Listen for remote signer + conn, err := sl.acceptNewConnection() + if err != nil { + sl.Logger.Error("SignerListener: Error accepting connection", "err", err, "failures", sl.acceptFailCount.Load()) + sl.triggerConnect() + continue + } + + // We have a good connection, wait for someone that needs one otherwise cancellation + sl.Logger.Info("SignerListener: Connected") + select { + case sl.connectionAvailableCh <- conn: + case <-sl.Quit(): + return } case <-sl.Quit(): return diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index c4e4c6b247e..c0f62b0ff24 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,6 +1,7 @@ package privval import ( + "errors" "net" "testing" "time" @@ -145,6 +146,89 @@ func TestRetryConnToRemoteSigner(t *testing.T) { } } +func TestDuplicateListenReject(t *testing.T) { + for _, tc := range getDialerTestCases(t) { + var ( + logger = log.TestingLogger() + chainID = cmtrand.Str(12) + mockPV = types.NewMockPV() + endpointIsOpenCh = make(chan struct{}) + thisConnTimeout = testTimeoutReadWrite + listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + ) + listenerEndpoint.timeoutAccept = defaultTimeoutAcceptSeconds / 2 * time.Second + + dialerEndpoint := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(10)(dialerEndpoint) + + signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + t.Cleanup(func() { + if err := listenerEndpoint.Stop(); err != nil { + t.Error(err) + } + }) + + require.NoError(t, signerServer.Start()) + assert.True(t, signerServer.IsRunning()) + + <-endpointIsOpenCh + if err := signerServer.Stop(); err != nil { + t.Error(err) + } + + dialerEndpoint2 := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) + + // let some pings pass + require.NoError(t, signerServer2.Start()) + assert.True(t, signerServer2.IsRunning()) + + // wait for successful connection + for { + if listenerEndpoint.IsConnected() { + break + } + } + + // simulate ensureConnection, bypass triggerConnect default drop with multiple messages + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + + // simulate validator node running long enough for privval listen timeout multiple times + // up to 1 timeout error is possible due to timing differences + // Run 3 times longer than timeout to generate at least 2 accept errors + time.Sleep(3 * defaultTimeoutAcceptSeconds * time.Second) + t.Cleanup(func() { + if err := signerServer2.Stop(); err != nil { + t.Error(err) + } + }) + + // after connect, there should not be more than 1 accept fail + assert.LessOrEqual(t, listenerEndpoint.acceptFailCount.Load(), uint32(1)) + + // give the client some time to re-establish the conn to the remote signer + // should see sth like this in the logs: + // + // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" + // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal + time.Sleep(testTimeoutReadWrite * 2) + } +} + func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { proto, address := cmtnet.ProtocolAndAddress(addr) @@ -213,3 +297,28 @@ func getMockEndpoints( return listenerEndpoint, dialerEndpoint } + +func TestSignerListenerEndpointServiceLoop(t *testing.T) { + listenerEndpoint := NewSignerListenerEndpoint( + log.TestingLogger(), + &testListener{initialErrs: 5}, + ) + + require.NoError(t, listenerEndpoint.Start()) + require.NoError(t, listenerEndpoint.WaitForConnection(time.Second)) +} + +type testListener struct { + net.Listener + initialErrs int +} + +func (l *testListener) Accept() (net.Conn, error) { + if l.initialErrs > 0 { + l.initialErrs-- + + return nil, errors.New("accept error") + } + + return nil, nil // Note this doesn't actually return a valid connection, it just doesn't error. +} diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 28d94300d0e..27a6e65f97e 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -104,12 +104,16 @@ func TestListenerTimeoutReadWrite(t *testing.T) { // Note: this controls how long this test actually runs. timeoutReadWrite = 10 * time.Millisecond ) + for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { - _, err := dialer() + conn, err := dialer() if err != nil { panic(err) } + // Add a delay before closing the connection + time.Sleep(2 * timeoutReadWrite) + conn.Close() }(tc.dialer) c, err := tc.listener.Accept() diff --git a/proto/README.md b/proto/README.md index fcce452a244..f5fa29e8ad4 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,41 +1,56 @@ -# Protocol Buffers - -This sections defines the types and messages shared across implementations. The -definition of the data structures are located in the -[core/data\_structures](../spec/core/data_structures.md) for the core data types -and ABCI definitions are located in the [ABCI](../spec/abci/README.md) section. - -## Process of Updates - -The `.proto` files within this section are core to the protocol and updates must -be treated as such. - -### Steps - -1. Make an issue with the proposed change. Within in the issue members from - both the CometBFT and tendermint-rs team will leave comments. If there is not - consensus on the change an [RFC](../docs/rfc/README.md) may be requested. - 1. Submission of an RFC as a pull request should be made to facilitate - further discussion. - 2. Merge the RFC. -2. Make the necessary changes to the `.proto` file(s), [core data - structures](../spec/core/data_structures.md) and/or [ABCI - protocol](../spec/abci). -3. Open issues within CometBFT and Tendermint-rs repos. This is used to notify - the teams that a change occurred in the spec. - 1. Tag the issue with a spec version label. This will notify the team the - changed has been made on master but has not entered a release. - -### Versioning - -The spec repo aims to be versioned. Once it has been versioned, updates to the -protobuf files will live on master. After a certain amount of time, decided on -by CometBFT and tendermint-rs team leads, a release will be made on the spec -repo. The spec may contain minor releases as well, depending on the -implementation these changes may lead to a breaking change. If so, the -implementation team should open an issue within the spec repo requiring a major -release of the spec. - -If the steps above were followed each implementation should have issues tagged -with a spec change label. Once all issues have been completed the team should -signify their readiness for release. + +# CometBFT v0.38.x Protocol Buffers Definitions + +This is the set of [Protobuf][protobuf] definitions of types used by various +parts of [CometBFT]: + +- The [Application Blockchain Interface][abci] (ABCI), especially in the context + of _remote_ applications. +- The P2P layer, in how CometBFT nodes interact with each other over the + network. +- In interaction with remote signers ("privval"). +- The RPC, in that the native JSON serialization of certain Protobuf types is + used when accepting and responding to RPC requests. +- The storage layer, in how data is serialized to and deserialized from on-disk + storage. + +The canonical Protobuf definitions live in the `proto` folder of the relevant +release branch of CometBFT. These definitions are published to the [Buf +registry][buf] for integrators' convenience. + +## Why does CometBFT use `tendermint` Protobuf definitions? + +This is as a result of CometBFT being a fork of [Tendermint Core][tmcore] and +wanting to provide integrators with as painless a way as possible of +transitioning from Tendermint Core to CometBFT. + +As of CometBFT v1, however, the project will transition to using and providing a +`cometbft` package of Protobuf definitions (see [\#1330]). + +## How are `tendermint` Protobuf definitions versioned? + +At present, the canonical source of Protobuf definitions for all CometBFT v0.x +releases is on each respective release branch. Each respective release's +Protobuf definitions are also, for convenience, published to a corresponding +branch in the `tendermint/tendermint` Buf repository. + +| CometBFT version | Canonical Protobufs | Buf registry | +|------------------|---------------------------------------------|-------------------------------------------| +| v0.38.x | [v0.38.x Protobuf definitions][v038-protos] | [Buf repository v0.38.x branch][v038-buf] | +| v0.37.x | [v0.37.x Protobuf definitions][v037-protos] | [Buf repository v0.37.x branch][v037-buf] | +| v0.34.x | [v0.34.x Protobuf definitions][v034-protos] | [Buf repository v0.34.x branch][v034-buf] | + +[protobuf]: https://protobuf.dev/ +[CometBFT]: https://github.com/cometbft/cometbft +[abci]: https://github.com/cometbft/cometbft/tree/main/spec/abci +[buf]: https://buf.build/tendermint/tendermint +[tmcore]: https://github.com/tendermint/tendermint +[\#1330]: https://github.com/cometbft/cometbft/issues/1330 +[v034-protos]: https://github.com/cometbft/cometbft/tree/v0.34.x/proto +[v034-buf]: https://buf.build/tendermint/tendermint/docs/v0.34.x +[v037-protos]: https://github.com/cometbft/cometbft/tree/v0.37.x/proto +[v037-buf]: https://buf.build/tendermint/tendermint/docs/v0.37.x +[v038-protos]: https://github.com/cometbft/cometbft/tree/v0.38.x/proto +[v038-buf]: https://buf.build/tendermint/tendermint/docs/v0.38.x diff --git a/proto/buf.lock b/proto/buf.lock index f2b69369858..51b78ffe35a 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -4,4 +4,5 @@ deps: - remote: buf.build owner: cosmos repository: gogo-proto - commit: 6652e3443c3b4504bb3bf82e73a7e409 + commit: 5e5b9fdd01804356895f8f79a6f1ddc1 + digest: shake256:0b85da49e2e5f9ebc4806eae058e2f56096ff3b1c59d1fb7c190413dd15f45dd456f0b69ced9059341c80795d2b6c943de15b120a9e0308b499e43e4b5fc2952 diff --git a/proto/buf.yaml b/proto/buf.yaml index c6e0660f147..a646c2030a7 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -1,4 +1,5 @@ version: v1 +name: buf.build/tendermint/tendermint deps: - buf.build/cosmos/gogo-proto breaking: diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 73151984ff9..89bafb6cd54 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -156,10 +156,18 @@ message RequestProcessProposal { // Extends a vote with application-injected data message RequestExtendVote { - // the hash of the block that this vote may be referring to - bytes hash = 1; + // the hash of the block that this vote may be referring to + bytes hash = 1; // the height of the extended vote int64 height = 2; + // info of the block that this vote may be referring to + google.protobuf.Timestamp time = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + repeated bytes txs = 4; + CommitInfo proposed_last_commit = 5 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 6 [(gogoproto.nullable) = false]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; } // Verify the vote extension @@ -402,8 +410,8 @@ message ExecTxResult { bytes data = 2; string log = 3; // nondeterministic string info = 4; // nondeterministic - int64 gas_wanted = 5; - int64 gas_used = 6; + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic string codespace = 8; diff --git a/proto/tendermint/rpc/grpc/types.pb.go b/proto/tendermint/rpc/grpc/types.pb.go index 393c7394764..c27afb4e246 100644 --- a/proto/tendermint/rpc/grpc/types.pb.go +++ b/proto/tendermint/rpc/grpc/types.pb.go @@ -329,6 +329,7 @@ func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.rpc.grpc.BroadcastAPI", HandlerType: (*BroadcastAPIServer)(nil), diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto index b557aad8354..68ff0cad71b 100644 --- a/proto/tendermint/rpc/grpc/types.proto +++ b/proto/tendermint/rpc/grpc/types.proto @@ -26,6 +26,10 @@ message ResponseBroadcastTx { //---------------------------------------- // Service Definition +// BroadcastAPI +// +// Deprecated: This API will be superseded by a more comprehensive gRPC-based +// broadcast API, and is scheduled for removal after v0.38. service BroadcastAPI { rpc Ping(RequestPing) returns (ResponsePing); rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); diff --git a/proxy/client.go b/proxy/client.go index 798a67fe7c2..97f10673327 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -29,9 +29,9 @@ type localClientCreator struct { // NewLocalClientCreator returns a [ClientCreator] for the given app, which // will be running locally. // -// Maintains a single mutex over all new clients created with NewABCIClient. -// For a local client creator that uses a single mutex per new client, rather -// use [NewUnsyncLocalClientCreator]. +// Maintains a single mutex over all new clients created with NewABCIClient. For +// a local client creator that uses a single mutex per new client, rather use +// [NewConnSyncLocalClientCreator]. func NewLocalClientCreator(app types.Application) ClientCreator { return &localClientCreator{ mtx: new(cmtsync.Mutex), @@ -46,20 +46,24 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { //---------------------------------------------------- // local proxy creates a new mutex for each client -type unsyncLocalClientCreator struct { +type connSyncLocalClientCreator struct { app types.Application } -// NewUnsyncLocalClientCreator returns a [ClientCreator] for the given app. -// Unlike [NewLocalClientCreator], each call to NewABCIClient returns an ABCI -// client that maintains its own mutex over the application. -func NewUnsyncLocalClientCreator(app types.Application) ClientCreator { - return &unsyncLocalClientCreator{ +// NewConnSyncLocalClientCreator returns a local [ClientCreator] for the given +// app. +// +// Unlike [NewLocalClientCreator], this is a "connection-synchronized" local +// client creator, meaning each call to NewABCIClient returns an ABCI client +// that maintains its own mutex over the application (i.e. it is +// per-"connection" synchronized). +func NewConnSyncLocalClientCreator(app types.Application) ClientCreator { + return &connSyncLocalClientCreator{ app: app, } } -func (c *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) { +func (c *connSyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) { // Specifying nil for the mutex causes each instance to create its own // mutex. return abcicli.NewLocalClient(nil, c.app), nil @@ -101,30 +105,30 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { // Otherwise a remote client will be created. // // Each of "kvstore", "persistent_kvstore" and "e2e" also currently have an -// "_unsync" variant (i.e. "kvstore_unsync", etc.), which attempts to replicate -// the same concurrency model as the remote client. +// "_connsync" variant (i.e. "kvstore_connsync", etc.), which attempts to +// replicate the same concurrency model as the remote client. func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { switch addr { case "kvstore": return NewLocalClientCreator(kvstore.NewInMemoryApplication()) - case "kvstore_unsync": - return NewUnsyncLocalClientCreator(kvstore.NewInMemoryApplication()) + case "kvstore_connsync": + return NewConnSyncLocalClientCreator(kvstore.NewInMemoryApplication()) case "persistent_kvstore": return NewLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) - case "persistent_kvstore_unsync": - return NewUnsyncLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) + case "persistent_kvstore_connsync": + return NewConnSyncLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { panic(err) } return NewLocalClientCreator(app) - case "e2e_unsync": + case "e2e_connsync": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { panic(err) } - return NewUnsyncLocalClientCreator(app) + return NewConnSyncLocalClientCreator(app) case "noop": return NewLocalClientCreator(types.NewBaseApplication()) default: diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index 7dee0b1c5ce..801cbaf079d 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -19,6 +19,10 @@ type AppConnConsensus struct { func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.ResponseCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*types.ResponseCommit, error)); ok { @@ -45,6 +49,10 @@ func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, func (_m *AppConnConsensus) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -59,6 +67,10 @@ func (_m *AppConnConsensus) Error() error { func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + var r0 *types.ResponseExtendVote var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { @@ -85,6 +97,10 @@ func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 *types.RequestEx func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 *types.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { @@ -111,6 +127,10 @@ func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 *types.Reques func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + var r0 *types.ResponseInitChain var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { @@ -137,6 +157,10 @@ func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 *types.RequestIni func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } + var r0 *types.ResponsePrepareProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { @@ -163,6 +187,10 @@ func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 *types.Requ func (_m *AppConnConsensus) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 *types.ResponseProcessProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { @@ -189,6 +217,10 @@ func (_m *AppConnConsensus) ProcessProposal(_a0 context.Context, _a1 *types.Requ func (_m *AppConnConsensus) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 *types.ResponseVerifyVoteExtension var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { @@ -211,13 +243,12 @@ func (_m *AppConnConsensus) VerifyVoteExtension(_a0 context.Context, _a1 *types. return r0, r1 } -type mockConstructorTestingTNewAppConnConsensus interface { +// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAppConnConsensus(t interface { mock.TestingT Cleanup(func()) -} - -// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnConsensus(t mockConstructorTestingTNewAppConnConsensus) *AppConnConsensus { +}) *AppConnConsensus { mock := &AppConnConsensus{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 281ff21c9f9..7735e4a9beb 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -21,6 +21,10 @@ type AppConnMempool struct { func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *types.ResponseCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { @@ -47,6 +51,10 @@ func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTxAsync") + } + var r0 *abcicli.ReqRes var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*abcicli.ReqRes, error)); ok { @@ -73,6 +81,10 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCh func (_m *AppConnMempool) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -87,6 +99,10 @@ func (_m *AppConnMempool) Error() error { func (_m *AppConnMempool) Flush(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Flush") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -102,13 +118,12 @@ func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } -type mockConstructorTestingTNewAppConnMempool interface { +// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAppConnMempool(t interface { mock.TestingT Cleanup(func()) -} - -// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnMempool(t mockConstructorTestingTNewAppConnMempool) *AppConnMempool { +}) *AppConnMempool { mock := &AppConnMempool{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index b10838ac01e..a19634bebd8 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -19,6 +19,10 @@ type AppConnQuery struct { func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Echo") + } + var r0 *types.ResponseEcho var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*types.ResponseEcho, error)); ok { @@ -45,6 +49,10 @@ func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEc func (_m *AppConnQuery) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -59,6 +67,10 @@ func (_m *AppConnQuery) Error() error { func (_m *AppConnQuery) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Info") + } + var r0 *types.ResponseInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { @@ -85,6 +97,10 @@ func (_m *AppConnQuery) Info(_a0 context.Context, _a1 *types.RequestInfo) (*type func (_m *AppConnQuery) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 *types.ResponseQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { @@ -107,13 +123,12 @@ func (_m *AppConnQuery) Query(_a0 context.Context, _a1 *types.RequestQuery) (*ty return r0, r1 } -type mockConstructorTestingTNewAppConnQuery interface { +// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAppConnQuery(t interface { mock.TestingT Cleanup(func()) -} - -// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnQuery(t mockConstructorTestingTNewAppConnQuery) *AppConnQuery { +}) *AppConnQuery { mock := &AppConnQuery{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index cb313d522d5..8a33f8f0629 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -19,6 +19,10 @@ type AppConnSnapshot struct { func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + var r0 *types.ResponseApplySnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { @@ -45,6 +49,10 @@ func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 *types.Re func (_m *AppConnSnapshot) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -59,6 +67,10 @@ func (_m *AppConnSnapshot) Error() error { func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + var r0 *types.ResponseListSnapshots var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { @@ -85,6 +97,10 @@ func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 *types.Request func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + var r0 *types.ResponseLoadSnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { @@ -111,6 +127,10 @@ func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 *types.Req func (_m *AppConnSnapshot) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + var r0 *types.ResponseOfferSnapshot var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { @@ -133,13 +153,12 @@ func (_m *AppConnSnapshot) OfferSnapshot(_a0 context.Context, _a1 *types.Request return r0, r1 } -type mockConstructorTestingTNewAppConnSnapshot interface { +// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAppConnSnapshot(t interface { mock.TestingT Cleanup(func()) -} - -// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnSnapshot(t mockConstructorTestingTNewAppConnSnapshot) *AppConnSnapshot { +}) *AppConnSnapshot { mock := &AppConnSnapshot{} mock.Mock.Test(t) diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go index 798afe88fb2..20671e2a705 100644 --- a/proxy/mocks/client_creator.go +++ b/proxy/mocks/client_creator.go @@ -16,6 +16,10 @@ type ClientCreator struct { func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewABCIClient") + } + var r0 abcicli.Client var r1 error if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { @@ -38,13 +42,12 @@ func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { return r0, r1 } -type mockConstructorTestingTNewClientCreator interface { +// NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientCreator(t interface { mock.TestingT Cleanup(func()) -} - -// NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClientCreator(t mockConstructorTestingTNewClientCreator) *ClientCreator { +}) *ClientCreator { mock := &ClientCreator{} mock.Mock.Test(t) diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 4c8d8eed656..12a96e2537c 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -5,10 +5,12 @@ import ( "context" "fmt" "log" + "strings" "github.com/cometbft/cometbft/abci/example/kvstore" rpchttp "github.com/cometbft/cometbft/rpc/client/http" ctypes "github.com/cometbft/cometbft/rpc/core/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" rpctest "github.com/cometbft/cometbft/rpc/test" ) @@ -135,3 +137,55 @@ func ExampleHTTP_batching() { // firstName = satoshi // lastName = nakamoto } + +// Test the maximum batch request size middleware. +func ExampleHTTP_maxBatchSize() { + // Start a CometBFT node (and kvstore) in the background to test against + app := kvstore.NewInMemoryApplication() + node := rpctest.StartTendermint(app, rpctest.RecreateConfig, rpctest.SuppressStdout, rpctest.MaxReqBatchSize) + + // Change the max_request_batch_size + node.Config().RPC.MaxRequestBatchSize = 2 + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c, err := rpchttp.New(rpcAddr, "/websocket") + if err != nil { + log.Fatal(err) + } + + defer rpctest.StopTendermint(node) + + // Create a new batch + batch := c.NewBatch() + + for i := 1; i <= 5; i++ { + if _, err := batch.Health(context.Background()); err != nil { + log.Fatal(err) + } + } + + // Send the requests + results, err := batch.Send(context.Background()) + if err != nil { + log.Fatal(err) + } + + // Each result in the returned list is the deserialized result of each + // respective status response + for _, result := range results { + rpcError, ok := result.(*types.RPCError) + if !ok { + log.Fatal("invalid result type") + } + if !strings.Contains(rpcError.Data, "batch request exceeds maximum") { + fmt.Println("Error message does not contain 'Max Request Batch Exceeded'") + } else { + // The max request batch size rpcError has been returned + fmt.Println("Max Request Batch Exceeded") + } + } + + // Output: + // Max Request Batch Exceeded +} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 30a31f77b6f..f9ccaeb5a4c 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -235,7 +235,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, @@ -505,7 +506,6 @@ func (c *baseRPCClient) TxSearch( perPage *int, orderBy string, ) (*ctypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) params := map[string]interface{}{ "query": query, @@ -534,7 +534,6 @@ func (c *baseRPCClient) BlockSearch( page, perPage *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { - result := new(ctypes.ResultBlockSearch) params := map[string]interface{}{ "query": query, @@ -654,9 +653,9 @@ func (w *WSEvents) OnStop() { // Channel is never closed to prevent clients from seeing an erroneous event. // // It returns an error if WSEvents is not running. -func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - +func (w *WSEvents) Subscribe(ctx context.Context, _, query string, + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { if !w.IsRunning() { return nil, errNotRunning } @@ -684,7 +683,7 @@ func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, // subscriber from query. // // It returns an error if WSEvents is not running. -func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { +func (w *WSEvents) Unsubscribe(ctx context.Context, _, query string) error { if !w.IsRunning() { return errNotRunning } @@ -707,7 +706,7 @@ func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // given subscriber from all the queries. // // It returns an error if WSEvents is not running. -func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { +func (w *WSEvents) UnsubscribeAll(ctx context.Context, _ string) error { if !w.IsRunning() { return errNotRunning } diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 39f98b6f060..7115af1deb8 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -65,11 +65,11 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Local) Status(context.Context) (*ctypes.ResultStatus, error) { return c.env.Status(c.ctx) } -func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) { return c.env.ABCIInfo(c.ctx) } @@ -78,63 +78,64 @@ func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) } func (c *Local) ABCIQueryWithOptions( - ctx context.Context, + _ context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(_ context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(_ context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } -func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Local) NetInfo(context.Context) (*ctypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) { return c.env.GetConsensusState(c.ctx) } -func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(_ context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return c.env.ConsensusParams(c.ctx, height) } -func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Local) Health(context.Context) (*ctypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c *Local) DialSeeds(_ context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(c.ctx, seeds) } func (c *Local) DialPeers( - ctx context.Context, + _ context.Context, peers []string, persistent, unconditional, @@ -143,47 +144,47 @@ func (c *Local) DialPeers( return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) } -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(_ context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(context.Context) (*ctypes.ResultGenesis, error) { return c.env.Genesis(c.ctx) } -func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Local) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { return c.env.GenesisChunked(c.ctx, id) } -func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(_ context.Context, height *int64) (*ctypes.ResultBlock, error) { return c.env.Block(c.ctx, height) } -func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c *Local) BlockByHash(_ context.Context, hash []byte) (*ctypes.ResultBlock, error) { return c.env.BlockByHash(c.ctx, hash) } -func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockResults(_ context.Context, height *int64) (*ctypes.ResultBlockResults, error) { return c.env.BlockResults(c.ctx, height) } -func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { +func (c *Local) Header(_ context.Context, height *int64) (*ctypes.ResultHeader, error) { return c.env.Header(c.ctx, height) } -func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { +func (c *Local) HeaderByHash(_ context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { return c.env.HeaderByHash(c.ctx, hash) } -func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(_ context.Context, height *int64) (*ctypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(_ context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return c.env.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(_ context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { return c.env.Tx(c.ctx, hash, prove) } @@ -207,7 +208,7 @@ func (c *Local) BlockSearch( return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(_ context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } @@ -215,7 +216,8 @@ func (c *Local) Subscribe( ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { q, err := cmtquery.New(query) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) @@ -246,7 +248,8 @@ func (c *Local) eventsRoutine( sub types.Subscription, subscriber string, q cmtpubsub.Query, - outc chan<- ctypes.ResultEvent) { + outc chan<- ctypes.ResultEvent, +) { for { select { case msg := <-sub.Out(): diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 290ebf35e31..7d7692a7417 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -20,7 +20,7 @@ func TestMain(m *testing.M) { app := kvstore.NewPersistentApplication(dir) // If testing block event generation - // app.SetGenBlockEvents() needs to be called here + // app.SetGenBlockEvents() // needs to be called here (see TestBlockSearch in rpc_test.go) node = rpctest.StartTendermint(app) code := m.Run() diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 447ae9c694e..0607954251f 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -83,11 +83,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c Client) Status(context.Context) (*ctypes.ResultStatus, error) { return c.env.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) { return c.env.ABCIInfo(&rpctypes.Context{}) } @@ -96,55 +96,56 @@ func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) } func (c Client) ABCIQueryWithOptions( - ctx context.Context, + _ context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(_ context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } -func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c Client) NetInfo(_ context.Context) (*ctypes.ResultNetInfo, error) { return c.env.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(_ context.Context) (*ctypes.ResultConsensusState, error) { return c.env.GetConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(_ context.Context) (*ctypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(_ context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return c.env.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c Client) Health(_ context.Context) (*ctypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c Client) DialSeeds(_ context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } func (c Client) DialPeers( - ctx context.Context, + _ context.Context, peers []string, persistent, unconditional, @@ -153,30 +154,30 @@ func (c Client) DialPeers( return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) } -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(_ context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(context.Context) (*ctypes.ResultGenesis, error) { return c.env.Genesis(&rpctypes.Context{}) } -func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(_ context.Context, height *int64) (*ctypes.ResultBlock, error) { return c.env.Block(&rpctypes.Context{}, height) } -func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c Client) BlockByHash(_ context.Context, hash []byte) (*ctypes.ResultBlock, error) { return c.env.BlockByHash(&rpctypes.Context{}, hash) } -func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c Client) Commit(_ context.Context, height *int64) (*ctypes.ResultCommit, error) { return c.env.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(_ context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(_ context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index a68bcf0d7aa..69b60674778 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -17,7 +17,7 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(context.Context) (*ctypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f9cc2f3d19c..8832ef040c1 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -529,11 +529,15 @@ func TestBlockSearch(t *testing.T) { require.NoError(t, err) } require.NoError(t, client.WaitForHeight(c, 5, nil)) - // This cannot test match_events as it calls the client BlockSearch function directly - // It is the RPC request handler that processes the match_event - result, err := c.BlockSearch(context.Background(), "begin_event.foo = 100 AND begin_event.bar = 300", nil, nil, "asc") + result, err := c.BlockSearch(context.Background(), "begin_event.foo = 100", nil, nil, "asc") require.NoError(t, err) blockCount := len(result.Blocks) + // if we generate block events within the test (by uncommenting + // the code in line main_test.go:L23) then we expect len(result.Blocks) + // to be at least 5 + // require.GreaterOrEqual(t, blockCount, 5) + + // otherwise it is 0 require.Equal(t, blockCount, 0) } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 3152c080df7..f925d0fb649 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -11,9 +11,9 @@ import ( ) // ABCIQuery queries the application for some information. -// More: https://docs.cometbft.com/main/rpc/#/ABCI/abci_query +// More: https://docs.cometbft.com/v0.38.x/rpc/#/ABCI/abci_query func (env *Environment) ABCIQuery( - ctx *rpctypes.Context, + _ *rpctypes.Context, path string, data bytes.HexBytes, height int64, @@ -33,8 +33,8 @@ func (env *Environment) ABCIQuery( } // ABCIInfo gets some info about the application. -// More: https://docs.cometbft.com/main/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/ABCI/abci_info +func (env *Environment) ABCIInfo(_ *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.Info(context.TODO(), proxy.RequestInfo) if err != nil { return nil, err diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ce8b1871b5b..c7dc2e8e5ee 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -23,11 +23,11 @@ import ( // At most 20 items will be returned. Block headers are returned in descending // order (highest first). // -// More: https://docs.cometbft.com/main/rpc/#/Info/blockchain +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( - ctx *rpctypes.Context, - minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - + _ *rpctypes.Context, + minHeight, maxHeight int64, +) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 var err error minHeight, maxHeight, err = filterMinMax( @@ -49,7 +49,8 @@ func (env *Environment) BlockchainInfo( return &ctypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), - BlockMetas: blockMetas}, nil + BlockMetas: blockMetas, + }, nil } // error if either min or max are negative or min > max @@ -87,8 +88,8 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Header gets block header at a given height. // If no height is provided, it will fetch the latest header. -// More: https://docs.cometbft.com/main/rpc/#/Info/header -func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/header +func (env *Environment) Header(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -103,8 +104,8 @@ func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes } // HeaderByHash gets header by hash. -// More: https://docs.cometbft.com/main/rpc/#/Info/header_by_hash -func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/header_by_hash +func (env *Environment) HeaderByHash(_ *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. @@ -119,8 +120,8 @@ func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) // Block gets block at a given height. // If no height is provided, it will fetch the latest block. -// More: https://docs.cometbft.com/main/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/block +func (env *Environment) Block(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -135,8 +136,8 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. } // BlockByHash gets block by hash. -// More: https://docs.cometbft.com/main/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/block_by_hash +func (env *Environment) BlockByHash(_ *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { block := env.BlockStore.LoadBlockByHash(hash) if block == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil @@ -148,8 +149,8 @@ func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. -// More: https://docs.cometbft.com/main/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/commit +func (env *Environment) Commit(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -179,8 +180,8 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // Results are for the height of the block containing the txs. // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] -// More: https://docs.cometbft.com/main/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/block_results +func (env *Environment) BlockResults(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -188,6 +189,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* results, err := env.StateStore.LoadFinalizeBlockResponse(height) if err != nil { + env.Logger.Error("failed to LoadFinalizeBlockResponse", "err", err) return nil, err } @@ -197,6 +199,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* FinalizeBlockEvents: results.Events, ValidatorUpdates: results.ValidatorUpdates, ConsensusParamUpdates: results.ConsensusParamUpdates, + AppHash: results.AppHash, }, nil } @@ -208,7 +211,6 @@ func (env *Environment) BlockSearch( pagePtr, perPagePtr *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { - // skip if block indexing is disabled if _, ok := env.BlockIndexer.(*blockidxnull.BlockerIndexer); ok { return nil, errors.New("block indexing is disabled") diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 88bf57ceb4d..68c0a1facd2 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -74,6 +74,7 @@ func TestBlockResults(t *testing.T) { {Code: 0, Data: []byte{0x02}, Log: "ok"}, {Code: 1, Log: "not ok"}, }, + AppHash: make([]byte, 1), } env := &Environment{} @@ -101,6 +102,7 @@ func TestBlockResults(t *testing.T) { FinalizeBlockEvents: results.Events, ValidatorUpdates: results.ValidatorUpdates, ConsensusParamUpdates: results.ConsensusParamUpdates, + AppHash: make([]byte, 1), }}, } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index b39c090ff3e..6f1a52b168a 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -14,12 +14,12 @@ import ( // validators are sorted by their voting power - this is the canonical order // for the validators in the set as used in computing their Merkle root. // -// More: https://docs.cometbft.com/main/rpc/#/Info/validators +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/validators func (env *Environment) Validators( - ctx *rpctypes.Context, + _ *rpctypes.Context, heightPtr *int64, - pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { - + pagePtr, perPagePtr *int, +) (*ctypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) if err != nil { @@ -46,13 +46,14 @@ func (env *Environment) Validators( BlockHeight: height, Validators: v, Count: len(v), - Total: totalCount}, nil + Total: totalCount, + }, nil } // DumpConsensusState dumps consensus state. // UNSTABLE -// More: https://docs.cometbft.com/main/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/dump_consensus_state +func (env *Environment) DumpConsensusState(*rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. peers := env.P2PPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) @@ -79,13 +80,14 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul } return &ctypes.ResultDumpConsensusState{ RoundState: roundState, - Peers: peerStates}, nil + Peers: peerStates, + }, nil } // ConsensusState returns a concise summary of the consensus state. // UNSTABLE -// More: https://docs.cometbft.com/main/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/consensus_state +func (env *Environment) GetConsensusState(*rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{RoundState: bz}, err @@ -93,11 +95,11 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. -// More: https://docs.cometbft.com/main/rpc/#/Info/consensus_params +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/consensus_params func (env *Environment) ConsensusParams( - ctx *rpctypes.Context, - heightPtr *int64) (*ctypes.ResultConsensusParams, error) { - + _ *rpctypes.Context, + heightPtr *int64, +) (*ctypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -111,5 +113,6 @@ func (env *Environment) ConsensusParams( } return &ctypes.ResultConsensusParams{ BlockHeight: height, - ConsensusParams: consensusParams}, nil + ConsensusParams: consensusParams, + }, nil } diff --git a/rpc/core/dev.go b/rpc/core/dev.go index 90f035531f8..389c96ee03c 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -6,7 +6,7 @@ import ( ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(*rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &ctypes.ResultUnsafeFlushMempool{}, nil } diff --git a/rpc/core/doc.go b/rpc/core/doc.go index dbd76ac69ca..3032d1cc1dd 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -2,7 +2,7 @@ Package core defines the CometBFT RPC endpoints. CometBFT ships with its own JSONRPC library - -https://github.com/cometbft/cometbft/tree/main/rpc/jsonrpc. +https://github.com/cometbft/cometbft/tree/v0.38.x/rpc/jsonrpc. ## Get the list diff --git a/rpc/core/events.go b/rpc/core/events.go index a4da11a1b73..b130f38f0ac 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -19,7 +19,7 @@ const ( ) // Subscribe for events via WebSocket. -// More: https://docs.cometbft.com/main/rpc/#/Websocket/subscribe +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/subscribe func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() @@ -102,7 +102,7 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. } // Unsubscribe from events via WebSocket. -// More: https://docs.cometbft.com/main/rpc/#/Websocket/unsubscribe +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/unsubscribe func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) @@ -118,7 +118,7 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype } // UnsubscribeAll from all events via WebSocket. -// More: https://docs.cometbft.com/main/rpc/#/Websocket/unsubscribe_all +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/unsubscribe_all func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 38bb862562e..b742f985c17 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -10,11 +10,11 @@ import ( ) // BroadcastEvidence broadcasts evidence of the misbehavior. -// More: https://docs.cometbft.com/main/rpc/#/Evidence/broadcast_evidence +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( - ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - + _ *rpctypes.Context, + ev types.Evidence, +) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { return nil, errors.New("no evidence was provided") } diff --git a/rpc/core/health.go b/rpc/core/health.go index 322e6af7aa9..e1b0017c850 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -7,7 +7,7 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. -// More: https://docs.cometbft.com/main/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/health +func (env *Environment) Health(*rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 17e2c323910..93442dfe765 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -18,10 +18,9 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor transaction results. -// More: https://docs.cometbft.com/main/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Tx/broadcast_tx_async +func (env *Environment) BroadcastTxAsync(_ *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) - if err != nil { return nil, err } @@ -30,7 +29,7 @@ func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*c // BroadcastTxSync returns with the response from CheckTx. Does not wait for // the transaction result. -// More: https://docs.cometbft.com/main/rpc/#/Tx/broadcast_tx_sync +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Tx/broadcast_tx_sync func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx(tx, func(res *abci.ResponseCheckTx) { @@ -38,7 +37,6 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct case <-ctx.Context().Done(): case resCh <- res: } - }, mempl.TxInfo{}) if err != nil { return nil, err @@ -59,7 +57,7 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct } // BroadcastTxCommit returns with the responses from CheckTx and ExecTxResult. -// More: https://docs.cometbft.com/main/rpc/#/Tx/broadcast_tx_commit +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Tx/broadcast_tx_commit func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := ctx.RemoteAddr() @@ -147,8 +145,8 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. -// More: https://docs.cometbft.com/main/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/unconfirmed_txs +func (env *Environment) UnconfirmedTxs(_ *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit := env.validatePerPage(limitPtr) @@ -157,22 +155,24 @@ func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*c Count: len(txs), Total: env.Mempool.Size(), TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil + Txs: txs, + }, nil } // NumUnconfirmedTxs gets number of unconfirmed transactions. -// More: https://docs.cometbft.com/main/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/num_unconfirmed_txs +func (env *Environment) NumUnconfirmedTxs(*rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes()}, nil + TotalBytes: env.Mempool.SizeBytes(), + }, nil } // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. -// More: https://docs.cometbft.com/main/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Tx/check_tx +func (env *Environment) CheckTx(_ *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { res, err := env.ProxyAppMempool.CheckTx(context.TODO(), &abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err diff --git a/rpc/core/net.go b/rpc/core/net.go index 0a619910e61..a79e2eb76aa 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -11,8 +11,8 @@ import ( ) // NetInfo returns network info. -// More: https://docs.cometbft.com/main/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/net_info +func (env *Environment) NetInfo(*rpctypes.Context) (*ctypes.ResultNetInfo, error) { peersList := env.P2PPeers.Peers().List() peers := make([]ctypes.Peer, 0, len(peersList)) for _, peer := range peersList { @@ -39,7 +39,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e } // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (env *Environment) UnsafeDialSeeds(_ *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided") } @@ -53,10 +53,10 @@ func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) ( // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), // optionally making them persistent. func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, + _ *rpctypes.Context, peers []string, - persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { - + persistent, unconditional, private bool, +) (*ctypes.ResultDialPeers, error) { if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("no peers provided") } @@ -95,8 +95,8 @@ func (env *Environment) UnsafeDialPeers( } // Genesis returns genesis file. -// More: https://docs.cometbft.com/main/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/genesis +func (env *Environment) Genesis(*rpctypes.Context) (*ctypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -104,7 +104,7 @@ func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, e return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(_ *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go index 29feccecb7a..5791621129e 100644 --- a/rpc/core/net_test.go +++ b/rpc/core/net_test.go @@ -13,7 +13,7 @@ import ( ) func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) err := sw.Start() require.NoError(t, err) @@ -48,7 +48,7 @@ func TestUnsafeDialSeeds(t *testing.T) { } func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetAddrBook(&p2p.AddrBookMock{ Addrs: make(map[string]struct{}), diff --git a/rpc/core/status.go b/rpc/core/status.go index 29cba7cbeed..5e3d6d1892e 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -12,8 +12,8 @@ import ( // Status returns CometBFT status including node info, pubkey, latest block // hash, app hash, block height and time. -// More: https://docs.cometbft.com/main/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/status +func (env *Environment) Status(*rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash cmtbytes.HexBytes diff --git a/rpc/core/tx.go b/rpc/core/tx.go index d84ed3a955a..fdd38e327bc 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -16,8 +16,8 @@ import ( // Tx allows you to query the transaction results. `nil` could mean the // transaction is in the mempool, invalidated, or was not sent in the first // place. -// More: https://docs.cometbft.com/main/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/tx +func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, fmt.Errorf("transaction indexing is disabled") @@ -35,7 +35,9 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*cty var proof types.TxProof if prove { block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) + if block != nil { + proof = block.Data.Txs.Proof(int(r.Index)) + } } return &ctypes.ResultTx{ @@ -50,7 +52,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*cty // TxSearch allows you to query for multiple transactions results. It returns a // list of transactions (maximum ?per_page entries) and the total count. -// More: https://docs.cometbft.com/main/rpc/#/Info/tx_search +// More: https://docs.cometbft.com/v0.38.x/rpc/#/Info/tx_search func (env *Environment) TxSearch( ctx *rpctypes.Context, query string, @@ -58,7 +60,6 @@ func (env *Environment) TxSearch( pagePtr, perPagePtr *int, orderBy string, ) (*ctypes.ResultTxSearch, error) { - // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, errors.New("transaction indexing is disabled") @@ -115,7 +116,9 @@ func (env *Environment) TxSearch( var proof types.TxProof if prove { block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) + if block != nil { + proof = block.Data.Txs.Proof(int(r.Index)) + } } apiResults = append(apiResults, &ctypes.ResultTx{ diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 08031991dc4..d237953b1a6 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -12,12 +12,12 @@ type broadcastAPI struct { env *core.Environment } -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { +func (bapi *broadcastAPI) Ping(context.Context, *RequestPing) (*ResponsePing, error) { // kvstore so we can check if the server is up return &ResponsePing{}, nil } -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { +func (bapi *broadcastAPI) BroadcastTx(_ context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { // NOTE: there's no way to get client's remote address // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 3856d3ecca1..b2105e84120 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -11,6 +11,8 @@ import ( ) // Config is an gRPC server configuration. +// +// Deprecated: A new gRPC API will be introduced after v0.38. type Config struct { MaxOpenConnections int } @@ -18,6 +20,8 @@ type Config struct { // StartGRPCServer starts a new gRPC BroadcastAPIServer using the given // net.Listener. // NOTE: This function blocks - you may want to call it in a go-routine. +// +// Deprecated: A new gRPC API will be introduced after v0.38. func StartGRPCServer(env *core.Environment, ln net.Listener) error { grpcServer := grpc.NewServer() RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) @@ -26,8 +30,9 @@ func StartGRPCServer(env *core.Environment, ln net.Listener) error { // StartGRPCClient dials the gRPC server using protoAddr and returns a new // BroadcastAPIClient. +// +// Deprecated: A new gRPC API will be introduced after v0.38. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - //nolint: staticcheck // SA1019 Existing use of deprecated but supported dial option. conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) @@ -35,6 +40,6 @@ func StartGRPCClient(protoAddr string) BroadcastAPIClient { return NewBroadcastAPIClient(conn) } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index 393c7394764..c27afb4e246 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -329,6 +329,7 @@ func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.rpc.grpc.BroadcastAPI", HandlerType: (*BroadcastAPIServer)(nil), diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index 2ae917d97d1..d8af529febf 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -38,6 +38,19 @@ func unmarshalResponseBytes( return result, nil } +// Separate the unmarshalling actions using different functions to improve readability and maintainability. +func unmarshalIndividualResponse(responseBytes []byte) (types.RPCResponse, error) { + var singleResponse types.RPCResponse + err := json.Unmarshal(responseBytes, &singleResponse) + return singleResponse, err +} + +func unmarshalMultipleResponses(responseBytes []byte) ([]types.RPCResponse, error) { + var responses []types.RPCResponse + err := json.Unmarshal(responseBytes, &responses) + return responses, err +} + func unmarshalResponseBytesArray( responseBytes []byte, expectedIDs []types.JSONRPCIntID, @@ -48,41 +61,56 @@ func unmarshalResponseBytesArray( responses []types.RPCResponse ) - if err := json.Unmarshal(responseBytes, &responses); err != nil { - return nil, fmt.Errorf("error unmarshalling: %w", err) - } - - // No response error checking here as there may be a mixture of successful - // and unsuccessful responses. + // Try to unmarshal as multiple responses + responses, err := unmarshalMultipleResponses(responseBytes) + // if err == nil it could unmarshal in multiple responses + if err == nil { + // No response error checking here as there may be a mixture of successful + // and unsuccessful responses. + + if len(results) != len(responses) { + return nil, fmt.Errorf( + "expected %d result objects into which to inject responses, but got %d", + len(responses), + len(results), + ) + } - if len(results) != len(responses) { - return nil, fmt.Errorf( - "expected %d result objects into which to inject responses, but got %d", - len(responses), - len(results), - ) - } + // Intersect IDs from responses with expectedIDs. + ids := make([]types.JSONRPCIntID, len(responses)) + var ok bool + for i, resp := range responses { + ids[i], ok = resp.ID.(types.JSONRPCIntID) + if !ok { + return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) + } + } + if err := validateResponseIDs(ids, expectedIDs); err != nil { + return nil, fmt.Errorf("wrong IDs: %w", err) + } - // Intersect IDs from responses with expectedIDs. - ids := make([]types.JSONRPCIntID, len(responses)) - var ok bool - for i, resp := range responses { - ids[i], ok = resp.ID.(types.JSONRPCIntID) - if !ok { - return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) + for i := 0; i < len(responses); i++ { + if err := cmtjson.Unmarshal(responses[i].Result, results[i]); err != nil { + return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) + } } + + return results, nil } - if err := validateResponseIDs(ids, expectedIDs); err != nil { - return nil, fmt.Errorf("wrong IDs: %w", err) + // check if it's a single response that should be an error + singleResponse, err := unmarshalIndividualResponse(responseBytes) + if err != nil { + // Here, an error means that even single response unmarshalling failed, + // so return the error. + return nil, fmt.Errorf("error unmarshalling: %w", err) } - - for i := 0; i < len(responses); i++ { - if err := cmtjson.Unmarshal(responses[i].Result, results[i]); err != nil { - return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) - } + singleResult := make([]any, 0) + if singleResponse.Error != nil { + singleResult = append(singleResult, singleResponse.Error) + } else { + singleResult = append(singleResult, singleResponse.Result) } - - return results, nil + return singleResult, nil } func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 0e64242ea08..a41d385cc40 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "net/url" + "regexp" "strings" cmtsync "github.com/cometbft/cometbft/libs/sync" @@ -24,6 +25,8 @@ const ( protoUNIX = "unix" ) +var endsWithPortPattern = regexp.MustCompile(`:[0-9]+$`) + //------------------------------------------------------------- // Parsed URL structure @@ -89,8 +92,19 @@ func (u parsedURL) GetTrimmedHostWithPath() string { // GetDialAddress returns the endpoint to dial for the parsed URL func (u parsedURL) GetDialAddress() string { - // if it's not a unix socket we return the host, example: localhost:443 + // if it's not a unix socket we return the host with port, example: localhost:443 if !u.isUnixSocket { + hasPort := endsWithPortPattern.MatchString(u.Host) + if !hasPort { + // http and ws default to port 80, https and wss default to port 443 + // https://www.rfc-editor.org/rfc/rfc9110#section-4.2 + // https://www.rfc-editor.org/rfc/rfc6455.html#section-3 + if u.Scheme == protoHTTP || u.Scheme == protoWS { + return u.Host + `:80` + } else if u.Scheme == protoHTTPS || u.Scheme == protoWSS { + return u.Host + `:443` + } + } return u.Host } // otherwise we return the path of the unix socket, ex /tmp/socket @@ -139,6 +153,8 @@ var _ HTTPClient = (*Client)(nil) var _ Caller = (*Client)(nil) var _ Caller = (*RequestBatch)(nil) +var _ fmt.Stringer = (*Client)(nil) + // New returns a Client pointed at the given address. // An error is returned on invalid remote. The function panics when remote is nil. func New(remote string) (*Client, error) { @@ -214,15 +230,26 @@ func (c *Client) Call( if err != nil { return nil, fmt.Errorf("post failed: %w", err) } - defer httpResponse.Body.Close() responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { - return nil, fmt.Errorf("failed to read response body: %w", err) + return nil, fmt.Errorf("%s. Failed to read response body: %w", getHTTPRespErrPrefix(httpResponse), err) } - return unmarshalResponseBytes(responseBytes, id, result) + res, err := unmarshalResponseBytes(responseBytes, id, result) + if err != nil { + return nil, fmt.Errorf("%s. %w", getHTTPRespErrPrefix(httpResponse), err) + } + return res, nil +} + +func getHTTPRespErrPrefix(resp *http.Response) string { + return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) +} + +func (c *Client) String() string { + return fmt.Sprintf("&Client{user=%v, addr=%v, client=%v, nextReqID=%v}", c.username, c.address, c.client, c.nextReqID) } // NewRequestBatch starts a batch of requests for this client. @@ -399,6 +426,7 @@ func DefaultHTTPClient(remoteAddr string) (*http.Client, error) { // Set to true to prevent GZIP-bomb DoS attacks DisableCompression: true, Dial: dialFn, + Proxy: http.ProxyFromEnvironment, }, } diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 03134dff583..29d31476f1f 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -52,20 +52,34 @@ func Test_parsedURL(t *testing.T) { }, "http endpoint": { + url: "http://example.com", + expectedURL: "http://example.com", + expectedHostWithPath: "example.com", + expectedDialAddress: "example.com:80", + }, + + "http endpoint with port": { + url: "http://example.com:8080", + expectedURL: "http://example.com:8080", + expectedHostWithPath: "example.com:8080", + expectedDialAddress: "example.com:8080", + }, + + "https endpoint": { url: "https://example.com", expectedURL: "https://example.com", expectedHostWithPath: "example.com", - expectedDialAddress: "example.com", + expectedDialAddress: "example.com:443", }, - "http endpoint with port": { + "https endpoint with port": { url: "https://example.com:8080", expectedURL: "https://example.com:8080", expectedHostWithPath: "example.com:8080", expectedDialAddress: "example.com:8080", }, - "http path routed endpoint": { + "https path routed endpoint": { url: "https://example.com:8080/rpc", expectedURL: "https://example.com:8080/rpc", expectedHostWithPath: "example.com:8080/rpc", diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index 77a189eb599..ea14d0229d4 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "encoding/base64" "encoding/json" "fmt" "net" @@ -35,7 +36,10 @@ type WSClient struct { //nolint: maligned Address string // IP:PORT or /path/to/socket Endpoint string // /websocket/url/endpoint - Dialer func(string, string) (net.Conn, error) + Username string + Password string + + Dialer func(string, string) (net.Conn, error) // Single user facing channel to read RPCResponses from, closed only when the // client is being stopped. @@ -96,16 +100,25 @@ func NewWS(remoteAddr, endpoint string, options ...func(*WSClient)) (*WSClient, parsedURL.Scheme = protoWS } + // extract username and password from URL if any + username := "" + password := "" + if parsedURL.User.String() != "" { + username = parsedURL.User.Username() + password, _ = parsedURL.User.Password() + } + dialFn, err := makeHTTPDialer(remoteAddr) if err != nil { return nil, err } c := &WSClient{ - Address: parsedURL.GetTrimmedHostWithPath(), - Dialer: dialFn, - Endpoint: endpoint, - PingPongLatencyTimer: metrics.NewTimer(), + Address: parsedURL.GetTrimmedHostWithPath(), + Username: username, + Password: password, + Dialer: dialFn, + Endpoint: endpoint, maxReconnectAttempts: defaultMaxReconnectAttempts, readWait: defaultReadWait, @@ -176,6 +189,7 @@ func (c *WSClient) OnStart() error { } c.ResponsesCh = make(chan types.RPCResponse) + c.PingPongLatencyTimer = metrics.NewTimer() c.send = make(chan types.RPCRequest) // 1 additional error may come from the read/write @@ -267,6 +281,12 @@ func (c *WSClient) dial() error { Proxy: http.ProxyFromEnvironment, } rHeader := http.Header{} + + // Set basic auth header if username and password are provided + if c.Username != "" && c.Password != "" { + rHeader.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(c.Username+":"+c.Password))) + } + conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) //nolint:bodyclose if err != nil { return err @@ -452,6 +472,7 @@ func (c *WSClient) readRoutine() { // ignore error; it will trigger in tests // likely because it's closing an already closed connection // } + c.PingPongLatencyTimer.Stop() c.wg.Done() }() diff --git a/rpc/jsonrpc/doc.go b/rpc/jsonrpc/doc.go index 71140a2ae8a..652ab685aa2 100644 --- a/rpc/jsonrpc/doc.go +++ b/rpc/jsonrpc/doc.go @@ -80,5 +80,5 @@ // // # Examples // -// - [CometBFT](https://github.com/cometbft/cometbft/blob/main/rpc/core/routes.go) +// - [CometBFT](https://github.com/cometbft/cometbft/blob/v0.38.x/rpc/core/routes.go) package jsonrpc diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 093b0491fef..1f12c817a0d 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -71,27 +71,27 @@ var Routes = map[string]*server.RPCFunc{ "echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")), } -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoResult(_ *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoWSResult(_ *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(_ *types.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(_ *types.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *types.Context, v cmtbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(_ *types.Context, v cmtbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } -func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) { +func EchoWithDefault(_ *types.Context, v *int) (*ResultEchoWithDefault, error) { val := -1 if v != nil { val = *v diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 8d746c7a224..0151d12e51d 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -25,12 +25,12 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusBadRequest, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } - // if its an empty request (like from a browser), just display a list of + // if it's an empty request (like from a browser), just display a list of // functions if len(b) == 0 { writeListOfEndpoints(w, r, funcMap) @@ -48,7 +48,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if err := json.Unmarshal(b, &request); err != nil { res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -122,7 +122,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han wErr = WriteRPCResponseHTTP(w, responses...) } if wErr != nil { - logger.Error("failed to write responses", "res", responses, "err", wErr) + logger.Error("failed to write responses", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 2d29cb4bbcc..9bb7948b5dd 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -195,17 +195,16 @@ func TestRPCNotificationInBatch(t *testing.T) { if tt.expectCount > 1 { t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) continue - } else { - // we were expecting an error here, so let's unmarshal a single response - var response types.RPCResponse - err = json.Unmarshal(blob, &response) - if err != nil { - t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) - continue - } - // have a single-element result - responses = []types.RPCResponse{response} } + // we were expecting an error here, so let's unmarshal a single response + var response types.RPCResponse + err = json.Unmarshal(blob, &response) + if err != nil { + t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) + continue + } + // have a single-element result + responses = []types.RPCResponse{response} } if tt.expectCount != len(responses) { t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 3b7cbfec87c..e1e86c09269 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -3,9 +3,11 @@ package server import ( "bufio" + "bytes" "encoding/json" "errors" "fmt" + "io" "net" "net/http" "os" @@ -32,16 +34,19 @@ type Config struct { MaxBodyBytes int64 // mirrors http.Server#MaxHeaderBytes MaxHeaderBytes int + // maximum number of requests in a batch request + MaxRequestBatchSize int } // DefaultConfig returns a default configuration. func DefaultConfig() *Config { return &Config{ - MaxOpenConnections: 0, // unlimited - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxOpenConnections: 0, // unlimited + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxRequestBatchSize: 10, // default to max 10 requests per batch } } @@ -53,7 +58,7 @@ func DefaultConfig() *Config { func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info("serve", "msg", log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: PreChecksHandler(RecoverAndLogHandler(defaultHandler{h: handler}, logger), config), ReadTimeout: config.ReadTimeout, ReadHeaderTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, @@ -64,7 +69,7 @@ func Serve(listener net.Listener, handler http.Handler, logger log.Logger, confi return err } -// Serve creates a http.Server and calls ServeTLS with the given listener, +// ServeTLS creates a http.Server and calls ServeTLS with the given listener, // certFile and keyFile. It wraps handler with RecoverAndLogHandler and a // handler, which limits the max body size to config.MaxBodyBytes. // @@ -79,7 +84,7 @@ func ServeTLS( logger.Info("serve tls", "msg", log.NewLazySprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: PreChecksHandler(RecoverAndLogHandler(defaultHandler{h: handler}, logger), config), ReadTimeout: config.ReadTimeout, ReadHeaderTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, @@ -188,7 +193,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } else { // Panics can contain anything, attempt to normalize it as an error. @@ -207,7 +212,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler res := types.RPCInternalError(types.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } @@ -246,13 +251,11 @@ func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } -type maxBytesHandler struct { +type defaultHandler struct { h http.Handler - n int64 } -func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.Body = http.MaxBytesReader(w, r.Body, h.n) +func (h defaultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.h.ServeHTTP(w, r) } @@ -277,3 +280,50 @@ func Listen(addr string, maxOpenConnections int) (listener net.Listener, err err return listener, nil } + +// Middleware + +// PreChecksHandler is a middleware function that checks the size of batch requests and returns an error +// if it exceeds the maximum configured size. It also checks if the request body is not greater than the +// configured maximum request body bytes limit. +func PreChecksHandler(next http.Handler, config *Config) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ensure that the current request body bytes is not greater than the configured maximum request body bytes + r.Body = http.MaxBytesReader(w, r.Body, config.MaxBodyBytes) + + // if maxBatchSize is 0 then don't constraint the limit of requests per batch + // It cannot be negative because the config.toml validation requires it to be + // greater than or equal to 0 + if config.MaxRequestBatchSize > 0 { + var requests []types.RPCRequest + var responses []types.RPCResponse + var err error + + data, err := io.ReadAll(r.Body) + if err != nil { + res := types.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err)) + _ = WriteRPCResponseHTTPError(w, http.StatusBadRequest, res) + return + } + + err = json.Unmarshal(data, &requests) + // if no err it means multiple requests, check if the number of request exceeds + // the maximum batch size configured + if err == nil { + // if the number of requests in batch exceed the maximum configured then return an error + if len(requests) > config.MaxRequestBatchSize { + res := types.RPCInvalidRequestError(nil, fmt.Errorf("batch request exceeds maximum (%d) allowed number of requests", config.MaxRequestBatchSize)) + responses = append(responses, res) + _ = WriteRPCResponseHTTP(w, responses...) + return + } + } + + // ensure the request body can be read again by other handlers + r.Body = io.NopCloser(bytes.NewBuffer(data)) + } + + // next handler + next.ServeHTTP(w, r) + }) +} diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 134eff20f03..6381d91d70f 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -27,7 +27,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { res := types.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } @@ -45,7 +45,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -58,7 +58,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit if err != nil { if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.RPCInternalError(dummyID, err)); err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } return @@ -71,7 +71,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit err = WriteRPCResponseHTTP(w, resp) } if err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } } diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index b3cb9cf03c5..74ed3495185 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -15,7 +15,7 @@ var routes = map[string]*rpcserver.RPCFunc{ "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), } -func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { +func HelloWorld(_ *rpctypes.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index b0705576857..568ff1a8471 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -62,10 +62,10 @@ info: echo '{ "jsonrpc": "2.0","method": "subscribe","id": 0,"params": {"query": "tm.event='"'NewBlock'"'"} }' | websocat -n -t ws://127.0.0.1:26657/websocket - version: "main" + version: "v0.38.x" license: name: Apache 2.0 - url: https://github.com/cometbft/cometbft/blob/main/LICENSE + url: https://github.com/cometbft/cometbft/blob/v0.38.x/LICENSE servers: - url: https://rpc.cosmos.directory/cosmoshub description: Interact with the CometBFT RPC from a public node in the Cosmos registry @@ -92,7 +92,7 @@ paths: description: | If you want to be sure that the transaction is included in a block, you can subscribe for the result using JSONRPC via a websocket. See - https://docs.cometbft.com/main/core/subscription.html + https://docs.cometbft.com/v0.38.x/core/subscription.html If you haven't received anything after a couple of blocks, resend it. If the same happens again, send it to some other node. A few reasons why it could happen: @@ -103,7 +103,7 @@ paths: (https://github.com/tendermint/tendermint/issues/3322) - Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + Please refer to [formatting/encoding rules](https://docs.cometbft.com/v0.38.x/core/using-cometbft.html#formatting) for additional details parameters: @@ -136,7 +136,7 @@ paths: description: | If you want to be sure that the transaction is included in a block, you can subscribe for the result using JSONRPC via a websocket. See - https://docs.cometbft.com/main/core/subscription.html + https://docs.cometbft.com/v0.38.x/core/subscription.html If you haven't received anything after a couple of blocks, resend it. If the same happens again, send it to some other node. A few reasons why it could happen: @@ -147,7 +147,7 @@ paths: (https://github.com/tendermint/tendermint/issues/3322) 3. node can be offline - Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + Please refer to [formatting/encoding rules](https://docs.cometbft.com/v0.38.x/core/using-cometbft.html#formatting) for additional details parameters: @@ -181,7 +181,7 @@ paths: IMPORTANT: use only for testing and development. In production, use BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction result using JSONRPC via a websocket. See - https://docs.cometbft.com/main/core/subscription.html + https://docs.cometbft.com/v0.38.x/core/subscription.html CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout waiting for tx to commit. @@ -189,7 +189,7 @@ paths: If CheckTx or DeliverTx fail, no error will be returned, but the returned result will contain a non-OK ABCI code. - Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + Please refer to [formatting/encoding rules](https://docs.cometbft.com/v0.38.x/core/using-cometbft.html#formatting) for additional details parameters: @@ -222,7 +222,7 @@ paths: description: | The transaction won't be added to the mempool. - Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + Please refer to [formatting/encoding rules](https://docs.cometbft.com/v0.38.x/core/using-cometbft.html#formatting) for additional details Upon success, the `Cache-Control` header will be set with the default @@ -882,7 +882,7 @@ paths: required: true schema: type: string - example: "tx.height=1000" + example: '"tx.height=1000"' - in: query name: prove description: Include proofs of the transactions inclusion in the block @@ -932,9 +932,9 @@ paths: $ref: "#/components/schemas/ErrorResponse" /block_search: get: - summary: Search for blocks by BeginBlock and EndBlock events + summary: Search for blocks by FinalizeBlock events description: | - Search for blocks by BeginBlock and EndBlock events. + Search for blocks by FinalizeBlock events. See /subscribe for the query syntax. operationId: block_search @@ -945,7 +945,7 @@ paths: required: true schema: type: string - example: "block.height > 1000 AND valset.changed > 0" + example: '"block.height > 1000"' - in: query name: page description: "Page number (1-based)" @@ -1061,7 +1061,7 @@ paths: required: true schema: type: string - example: "/a/b/c" + example: '"/a/b/c"' - in: query name: data description: Data @@ -1580,21 +1580,7 @@ components: codespace: type: string example: "ibc" - begin_block_events: - type: array - nullable: true - items: - type: object - properties: - type: - type: string - example: "app" - attributes: - type: array - nullable: false - items: - $ref: "#/components/schemas/Event" - end_block_events: + finalize_block_events: type: array nullable: true items: @@ -2430,8 +2416,10 @@ components: response: required: - "data" - - "app_version" - "version" + - "app_version" + - "last_block_height" + - "last_block_app_hash" properties: data: type: string @@ -2440,8 +2428,14 @@ components: type: string example: "0.16.1" app_version: + type: string + example: "1" + last_block_height: type: string example: "1314126" + last_block_app_hash: + type: string + example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" type: object type: object @@ -2808,10 +2802,10 @@ components: properties: key: type: string - example: "YWN0aW9u" + example: "action" value: type: string - example: "c2VuZA==" + example: "send" index: type: boolean example: false diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 1103edffac7..1e67f7b4993 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -26,15 +26,18 @@ import ( // Options helps with specifying some parameters for our RPC testing for greater // control. type Options struct { - suppressStdout bool - recreateConfig bool + suppressStdout bool + recreateConfig bool + maxReqBatchSize int } -var globalConfig *cfg.Config -var defaultOptions = Options{ - suppressStdout: false, - recreateConfig: false, -} +var ( + globalConfig *cfg.Config + defaultOptions = Options{ + suppressStdout: false, + recreateConfig: false, + } +) func waitForRPC() { laddr := GetConfig().RPC.ListenAddress @@ -113,6 +116,7 @@ func GetConfig(forceCreate ...bool) *cfg.Config { func GetGRPCClient() core_grpc.BroadcastAPIClient { grpcAddr := globalConfig.RPC.GRPCListenAddress + //nolint:staticcheck // SA1019: core_grpc.StartGRPCClient is deprecated: A new gRPC API will be introduced after v0.38. return core_grpc.StartGRPCClient(grpcAddr) } @@ -160,6 +164,9 @@ func NewTendermint(app abci.Application, opts *Options) *nm.Node { logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) logger = log.NewFilter(logger, log.AllowError()) } + if opts.maxReqBatchSize > 0 { + config.RPC.MaxRequestBatchSize = opts.maxReqBatchSize + } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) @@ -190,3 +197,8 @@ func SuppressStdout(o *Options) { func RecreateConfig(o *Options) { o.recreateConfig = true } + +// MaxReqBatchSize is an option to limit the maximum number of requests per batch. +func MaxReqBatchSize(o *Options) { + o.maxReqBatchSize = 2 +} diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go index 1da45cb83f7..b8ce0091c0c 100644 --- a/scripts/metricsgen/metricsgen.go +++ b/scripts/metricsgen/metricsgen.go @@ -145,6 +145,7 @@ func main() { log.Fatalf("Generating code: %v", err) } } + func ignoreTestFiles(f fs.FileInfo) bool { return !strings.Contains(f.Name(), "_test.go") } @@ -167,7 +168,7 @@ func ParseMetricsDir(dir string, structName string) (TemplateData, error) { // Grab the package name. var pkgName string - var pkg *ast.Package + var pkg *ast.Package //nolint:staticcheck for pkgName, pkg = range d { } td := TemplateData{ @@ -210,9 +211,7 @@ func GenerateMetricsFile(w io.Writer, td TemplateData) error { } func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.StructType, string, error) { - var ( - st *ast.StructType - ) + var st *ast.StructType for _, file := range files { mPkgName, err := extractMetricsPackageName(file.Imports) if err != nil { diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 2509e0cdbeb..16e205311f9 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -2,5 +2,5 @@ # # Invoke Mockery v2 to update generated mocks for the given type. -go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name "$*" +go run github.com/vektra/mockery/v2@latest --disable-version-string --case underscore --name "$*" diff --git a/scripts/qa/reporting/README.md b/scripts/qa/reporting/README.md index 2ae33380cbc..d8598e22145 100644 --- a/scripts/qa/reporting/README.md +++ b/scripts/qa/reporting/README.md @@ -69,7 +69,7 @@ Example: # cXrY_merged.png - Independent plot of experiments of configuration (c=X,r=Y) combined as single curve. # e_ID.png - independent plot with just experiment with id ID as a single curve. - +mkdir -p imgs python3 latency_plotter.py /path/to/csv/files/raw.csv ``` @@ -79,7 +79,8 @@ python3 latency_plotter.py /path/to/csv/files/raw.csv 2. Tweak the script to your needs 1. Adjust the time window 2. Select the right fork - 3. Tweak/add/remove metrics + 3. Select the right test case + 4. Tweak/add/remove metrics 3. Run the script as follows ```bash # Do the following while ensuring that the virtual environment is activated (see diff --git a/scripts/qa/reporting/latency_plotter.py b/scripts/qa/reporting/latency_plotter.py index 7d62287ef14..3b42eedff89 100644 --- a/scripts/qa/reporting/latency_plotter.py +++ b/scripts/qa/reporting/latency_plotter.py @@ -9,7 +9,7 @@ import numpy as np import pandas as pd -release = 'abci++vef_Smoke' +release = 'v0.38.0-alpha2' #FIXME: figure out in which timezone prometheus was running to adjust to UTC. tz = pytz.timezone('America/Sao_Paulo') @@ -56,21 +56,25 @@ paramGroups = group.groupby(['connections','rate']) for (subKey) in paramGroups.groups.keys(): subGroup = paramGroups.get_group(subKey) - startTime = subGroup['block_time'].min() - dt = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) - print('exp ' + key + ' starts at ' + dt.strftime("%Y-%m-%dT%H:%M:%SZ")) - subGroupMod = subGroup['block_time'].apply(lambda x: x - startTime) + startTime = subGroup.block_time.min() + endTime = subGroup.block_time.max() + localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) + localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) + subGroup.block_time.apply(lambda x: x - startTime ) + mean = subGroup.duration_ns.mean() + print('exp', key ,'start', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) (con,rate) = subKey label = 'c='+str(con) + ' r='+ str(rate) - ax.scatter(subGroupMod, subGroup.duration_ns, label=label) + ax.axhline(y = mean, color = 'r', linestyle = '-', label="mean") + ax.scatter(subGroup.block_time, subGroup.duration_ns, label=label) ax.legend() #Save individual axes extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - fig.savefig(os.path.join(path,'e_'+key + '.png'), bbox_inches=extent.expanded(1.2, 1.2)) + fig.savefig(os.path.join(path,'e_'+key + '.png'), bbox_inches=extent.expanded(1.2, 1.3)) -fig.suptitle('200-node testnet experiments - ' + release) +fig.suptitle('Vote Extensions Testnet - ' + release) # Save the figure with subplots fig.savefig(os.path.join(path,'all_experiments.png')) @@ -100,16 +104,18 @@ paramGroups = group.groupby(['experiment_id']) for (subKey) in paramGroups.groups.keys(): subGroup = paramGroups.get_group(subKey) - startTime = subGroup['block_time'].min() - subGroupMod = subGroup['block_time'].apply(lambda x: x - startTime) + startTime = subGroup.block_time.min() + subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) ax.scatter(subGroupMod, subGroup.duration_ns, label=label) #ax.legend() + #Save individual axes extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - fig.savefig(os.path.join(path,'c'+str(con) + 'r'+ str(rate) + '.png'), bbox_inches=extent.expanded(1.2, 1.2)) + fig.savefig(os.path.join(path,'c'+str(con) + 'r'+ str(rate) + '.png'), bbox_inches=extent.expanded(1.2, 1.3)) + +fig.suptitle('Vote Extensions Testnet - ' + release) -fig.suptitle('200-node testnet configurations - ' + release) # Save the figure with subplots fig.savefig(os.path.join(path,'all_configs.png')) @@ -132,8 +138,8 @@ paramGroups = group.groupby(['experiment_id']) for (subKey) in paramGroups.groups.keys(): subGroup = paramGroups.get_group(subKey) - startTime = subGroup['block_time'].min() - subGroupMod = subGroup['block_time'].apply(lambda x: x - startTime) + startTime = subGroup.block_time.min() + subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) ax.scatter(subGroupMod, subGroup.duration_ns, marker='o',c='#1f77b4') #Save individual axes diff --git a/scripts/qa/reporting/latency_throughput.py b/scripts/qa/reporting/latency_throughput.py index adaa4b76ca2..cf890e0fd01 100755 --- a/scripts/qa/reporting/latency_throughput.py +++ b/scripts/qa/reporting/latency_throughput.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ A simple script to parse the CSV output from the loadtime reporting tool (see -https://github.com/cometbft/cometbft/tree/main/test/loadtime/cmd/report). +https://github.com/cometbft/cometbft/tree/v0.38.x/test/loadtime/cmd/report). Produces a plot of average transaction latency vs total transaction throughput according to the number of load testing tool WebSocket connections to the diff --git a/scripts/qa/reporting/prometheus_plotter.py b/scripts/qa/reporting/prometheus_plotter.py index e504441bf69..fbc62050f89 100644 --- a/scripts/qa/reporting/prometheus_plotter.py +++ b/scripts/qa/reporting/prometheus_plotter.py @@ -5,6 +5,7 @@ import matplotlib as mpl import matplotlib.pyplot as plt +import matplotlib.dates as md import numpy as np import pandas as pd @@ -14,16 +15,22 @@ from prometheus_pandas import query -release = 'v0.37.x-alpha3' +#release = 'v0.37.0-alpha.2' +release = 'v0.38.0-alpha.2' path = os.path.join('imgs') prometheus = query.Prometheus('http://localhost:9090') # Time window #window_size = dict(seconds=150) #CMT 0.37.x-alpha3 -window_size = dict(seconds=126) #TM v0.37 (200 nodes) baseline +#window_size = dict(seconds=126) #TM v0.37 (200 nodes) baseline +#window_size = dict(hours=1, minutes=28, seconds=25) #TM v0.37.0-alpha.2 (rotating) #window_size = dict(seconds=130) #homogeneous #window_size = dict(seconds=127) #baseline -ext_window_size = dict(seconds=180) +#window_size = dict(seconds=115) #CMT v0.38.0-alpha.2 (200 nodes) +#window_size = dict(hours=1, minutes=46) #CMT v0.38.0-alpha.2 (rotating) +window_size = dict(seconds=150) #CMT v0.38.0-alpha.2 (ve baseline) + +ext_window_size = dict(seconds=200) # Use the time provided by latency_plotter for the selected experiment. #left_end = '2023-02-08T13:12:20Z' #cmt2 tm1 @@ -31,23 +38,42 @@ #left_end = '2023-02-14T15:18:00Z' #cmt1 tm1 #left_end = '2023-02-07T18:07:00Z' #homogeneous #left_end = '2022-10-13T19:41:23Z' #baseline -#left_end = '2023-02-22T18:56:29Z' #CMT 0.37.x-alpha3 +#left_end = '2023-02-22T18:56:29Z' #CMT v0.37.x-alpha3 #left_end = '2022-10-13T15:57:50Z' #TM v0.37 (200 nodes) baseline -left_end = '2023-03-20T19:45:35Z' #feature/abci++vef merged with main (7d8c9d426) - -right_end = pd.to_datetime(left_end) + pd.Timedelta(**window_size) -time_window = (left_end, right_end.strftime('%Y-%m-%dT%H:%M:%SZ')) +#left_end = '2023-03-20T19:45:35Z' #feature/abci++vef merged with main (7d8c9d426) +#left_end = '2023-05-22T09:39:20Z' #CMT v0.38.0-alpha.2 - 200 nodes +#left_end = '2022-10-10T15:47:15Z' #TM v0.37.0-alpha.2 - rotating +#left_end = '2023-05-23T08:09:50Z' #CMT v0.38.0-alpha.2 - rotating + +#left_end = '2023-05-25T18:18:04Z' #CMT v0.38.0-alpha.2 - ve baseline +#left_end = '2023-05-30T19:05:32Z' #CMT v0.38.0-alpha.2 - ve 2k +left_end = '2023-05-30T20:44:46Z' #CMT v0.38.0-alpha.2 - ve 4k +#left_end = '2023-05-25T19:42:08Z' #CMT v0.38.0-alpha.2 - ve 8k +#left_end = '2023-05-26T00:28:12Z' #CMT v0.38.0-alpha.2 - ve 16k +#left_end = '2023-05-26T02:12:27Z' #CMT v0.38.0-alpha.2 - ve 32k + +useManualrightEnd = False +if useManualrightEnd: + #right_end = '2023-05-25T18:54:04Z' #CMT v0.38.0-alpha.2 - ve baseline + #right_end = '2023-05-30T19:40:41Z' #CMT v0.38.0-alpha.2 - ve 2k + right_end = '2023-05-30T21:15:37Z' #CMT v0.38.0-alpha.2 - ve 4k + #right_end = '2023-05-25T20:16:00Z' #CMT v0.38.0-alpha.2 - ve 8k + #right_end = '2023-05-26T01:01:57Z' #CMT v0.38.0-alpha.2 - ve 16k + #right_end = '2023-05-26T02:46:19Z' #CMT v0.38.0-alpha.2 - ve 32k + time_window = (left_end, right_end) +else: + right_end = pd.to_datetime(left_end) + pd.Timedelta(**window_size) + time_window = (left_end, right_end.strftime('%Y-%m-%dT%H:%M:%SZ')) ext_right_end = pd.to_datetime(left_end) + pd.Timedelta(**ext_window_size) ext_time_window = (left_end, ext_right_end.strftime('%Y-%m-%dT%H:%M:%SZ')) - fork='cometbft' #fork='tendermint' -# Do prometheus queries -queries = [ +# Do prometheus queries, depending on the test case +queries200Nodes = [ (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, kind='area',stacked=True), False), (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True), True), (( 'avg(' + fork + '_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True), False), @@ -69,23 +95,56 @@ (( 'rate(' + fork + '_consensus_total_txs[20s])*60', ext_time_window[0], ext_time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), ] -for (query, file_name, pandas_params, plot_average) in queries: +queriesRotating = [ + (( 'rate(' + fork + '_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'rotating_block_rate', dict(ylabel='blocks/min', xlabel='time', title='Rate of Block Creation', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(' + fork + '_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'rotating_txs_rate', dict(ylabel='TXs/min', xlabel='time', title='Rate of Transaction processing', legend=False, figsize=(10,6), grid=True), False), + (( fork + '_consensus_height{job=~"ephemeral.*"} or ' + fork + '_blocksync_latest_block_height{job=~"ephemeral.*"}', + time_window[0], time_window[1], '1s'), 'rotating_eph_heights', dict(ylabel='height', xlabel='time', title='Heights of Ephemeral Nodes', legend=False, figsize=(10,6), grid=True), False), + (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'rotating_peers', dict(ylabel='# peers', xlabel='time', title='Peers', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'rotating_avg_memory', dict(ylabel='memory (bytes)', xlabel='time', title='Average Memory Usage', legend=False, figsize=(10,6), grid=True), False), + (( 'node_load1', time_window[0], time_window[1], '1s'), 'rotating_cpu', dict(ylabel='load', xlabel='time', title='Node Load', legend=False, figsize=(10,6), grid=True), False), +] + +queriesVExtension= [ + (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, kind='area',stacked=True), False), + (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size_not_stacked', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, stacked=False), False), + (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True), True), + (( 'avg(' + fork + '_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True), False), + (( fork + '_consensus_rounds', time_window[0], time_window[1], '1s'), 'rounds', dict(ylabel='# Rounds', xlabel='time (s)', title='Rounds per block', legend=False, figsize=(10,6), grid=True), False), + (( 'process_resident_memory_bytes', time_window[0], time_window[1], '1s'), 'memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Memory usage', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'avg_memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Average Memory usage', legend=False, figsize=(10,6), grid=True), False), + (( 'node_load1', time_window[0], time_window[1], '1s'), 'cpu', dict(ylabel='Load', xlabel='time (s)', title='Node load', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(node_load1)', time_window[0], time_window[1], '1s'), 'avg_cpu', dict(ylabel='Load', xlabel='time (s)', title='Average Node load', legend=False, figsize=(10,6), grid=True), False), + (( fork + '_consensus_height', time_window[0], time_window[1], '1s'), 'blocks', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(' + fork + '_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'block_rate', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), + (( fork + '_consensus_total_txs', time_window[0], time_window[1], '1s'), 'total_txs', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(' + fork + '_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), +] + +#queries = queries200Nodes +#queries = queriesRotating +queries = queriesVExtension + + +for (query, file_name, pandas_params, plot_average) in queries: print(query) data_frame = prometheus.query_range(*query) #Tweak the x ticks - delta_index = pd.to_timedelta(data_frame.index.strftime('%H:%M:%S')) - data_frame = data_frame.set_index(delta_index) + data_frame = data_frame.set_index(md.date2num(data_frame.index)) + - data_frame.plot(**pandas_params) + pandas_params["title"] += " - " + release + ax = data_frame.plot(**pandas_params) if plot_average: average = data_frame.mean(axis=1) data_frame['__average__'] = average pandas_params['lw'] = 8 pandas_params['style'] = ['--'] pandas_params['color'] = ['red'] - data_frame['__average__'].plot(**pandas_params) + ax = data_frame['__average__'].plot(**pandas_params) + ax.xaxis.set_major_formatter(md.DateFormatter('%H:%M:%S')) plt.savefig(os.path.join(path, file_name + '.png')) plt.plot() diff --git a/scripts/qa/reporting/requirements.txt b/scripts/qa/reporting/requirements.txt index f499ed1d36d..d7205cb5be1 100644 --- a/scripts/qa/reporting/requirements.txt +++ b/scripts/qa/reporting/requirements.txt @@ -5,10 +5,10 @@ kiwisolver==1.4.4 matplotlib==3.6.3 numpy==1.24.2 packaging==21.3 -Pillow==9.3.0 +Pillow==10.0.1 pyparsing==3.0.9 python-dateutil==2.8.2 six==1.16.0 pandas==1.5.3 prometheus-pandas==0.3.2 -requests==2.28.2 +requests==2.31.0 diff --git a/spec/README.md b/spec/README.md index 921c68b7cb0..04d2720e21c 100644 --- a/spec/README.md +++ b/spec/README.md @@ -14,7 +14,7 @@ and how they are communicated over the network. If you find discrepancies between the spec and the code that do not have an associated issue or pull request on github, -please submit them to our [bug bounty](https://cometbft.com/security)! +please submit them to our [bug bounty](https://github.com/cometbft/cometbft#security)! ## Contents @@ -35,12 +35,12 @@ please submit them to our [bug bounty](https://cometbft.com/security)! ### P2P and Network Protocols -- [The Base P2P Layer](./p2p/node.md): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections -- [Peer Exchange (PEX)](./p2p/messages/pex.md): gossip known peer addresses so peers can find each other -- [Block Sync](./p2p/messages/block-sync.md): gossip blocks so peers can catch up quickly -- [Consensus](./p2p/messages/consensus.md): gossip votes and block parts so new blocks can be committed -- [Mempool](./p2p/messages/mempool.md): gossip transactions so they get included in blocks -- [Evidence](./p2p/messages/evidence.md): sending invalid evidence will stop the peer +- [The Base P2P Layer](./p2p/legacy-docs/node.md): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections +- [Peer Exchange (PEX)](./p2p/legacy-docs/messages/pex.md): gossip known peer addresses so peers can find each other +- [Block Sync](./p2p/legacy-docs/messages/block-sync.md): gossip blocks so peers can catch up quickly +- [Consensus](./p2p/legacy-docs/messages/consensus.md): gossip votes and block parts so new blocks can be committed +- [Mempool](./p2p/legacy-docs/messages/mempool.md): gossip transactions so they get included in blocks +- [Evidence](./p2p/legacy-docs/messages/evidence.md): sending invalid evidence will stop the peer ### RPC diff --git a/spec/abci/README.md b/spec/abci/README.md index 4c29cc547f8..dc2fc9eb5df 100644 --- a/spec/abci/README.md +++ b/spec/abci/README.md @@ -20,7 +20,7 @@ for handling all ABCI++ methods. Thus, CometBFT always sends the `Request*` messages and receives the `Response*` messages in return. -All ABCI++ messages and methods are defined in [protocol buffers](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). +All ABCI++ messages and methods are defined in [protocol buffers](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). This allows CometBFT to run with applications written in many programming languages. This specification is split as follows: diff --git a/spec/abci/abci++_app_requirements.md b/spec/abci/abci++_app_requirements.md index 7ed84e83378..3c5c5e1c775 100644 --- a/spec/abci/abci++_app_requirements.md +++ b/spec/abci/abci++_app_requirements.md @@ -5,34 +5,59 @@ title: Requirements for the Application # Requirements for the Application -- [Formal Requirements](#formal-requirements) -- [Managing the Application state and related topics](#managing-the-application-state-and-related-topics) - - [Connection State](#connection-state) - - [Concurrency](#concurrency) - - [Finalize Block](#finalizeblock) - - [Commit](#commit) - - [Candidate States](#candidate-states) - - [States and ABCI++ Connections](#states-and-abci%2B%2B-connections) - - [Consensus Connection](#consensus-connection) - - [Mempool Connection](#mempool-connection) - - [Info/Query Connection](#infoquery-connection) - - [Snapshot Connection](#snapshot-connection) - - [Transaction Results](#transaction-results) - - [Updating the Validator Set](#updating-the-validator-set) - - [Consensus Parameters](#consensus-parameters) - - [List of Parameters](#list-of-parameters) - - [Updating Consensus Parameters](#updating-consensus-parameters) - - [Query](#query) - - [Query Proofs](#query-proofs) - - [Peer Filtering](#peer-filtering) - - [Paths](#paths) - - [Crash Recovery](#crash-recovery) - - [State Sync](#state-sync) -- [Application configuration required to switch to ABCI2.0](#application-configuration-required-to-switch-to-abci-20) +- [Requirements for the Application](#requirements-for-the-application) + - [Formal Requirements](#formal-requirements) + - [Consensus Connection Requirements](#consensus-connection-requirements) + - [Mempool Connection Requirements](#mempool-connection-requirements) + - [Managing the Application state and related topics](#managing-the-application-state-and-related-topics) + - [Connection State](#connection-state) + - [Concurrency](#concurrency) + - [FinalizeBlock](#finalizeblock) + - [Commit](#commit) + - [Candidate States](#candidate-states) + - [States and ABCI++ Connections](#states-and-abci-connections) + - [Consensus Connection](#consensus-connection) + - [Mempool Connection](#mempool-connection) + - [Replay Protection](#replay-protection) + - [Info/Query Connection](#infoquery-connection) + - [Snapshot Connection](#snapshot-connection) + - [Transaction Results](#transaction-results) + - [Gas](#gas) + - [Specifics of `ResponseCheckTx`](#specifics-of-responsechecktx) + - [Specifics of `ExecTxResult`](#specifics-of-exectxresult) + - [Updating the Validator Set](#updating-the-validator-set) + - [Consensus Parameters](#consensus-parameters) + - [List of Parameters](#list-of-parameters) + - [ABCIParams.VoteExtensionsEnableHeight](#abciparamsvoteextensionsenableheight) + - [BlockParams.MaxBytes](#blockparamsmaxbytes) + - [BlockParams.MaxGas](#blockparamsmaxgas) + - [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) + - [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) + - [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) + - [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) + - [VersionParams.App](#versionparamsapp) + - [Updating Consensus Parameters](#updating-consensus-parameters) + - [`InitChain`](#initchain) + - [`FinalizeBlock`, `PrepareProposal`/`ProcessProposal`](#finalizeblock-prepareproposalprocessproposal) + - [`Query`](#query) + - [Query Proofs](#query-proofs) + - [Peer Filtering](#peer-filtering) + - [Paths](#paths) + - [Crash Recovery](#crash-recovery) + - [State Sync](#state-sync) + - [Taking Snapshots](#taking-snapshots) + - [Bootstrapping a Node](#bootstrapping-a-node) + - [Snapshot Discovery](#snapshot-discovery) + - [Snapshot Restoration](#snapshot-restoration) + - [Snapshot Verification](#snapshot-verification) + - [Transition to Consensus](#transition-to-consensus) + - [Application configuration required to switch to ABCI 2.0](#application-configuration-required-to-switch-to-abci-20) ## Formal Requirements +### Consensus Connection Requirements + This section specifies what CometBFT expects from the Application. It is structured as a set of formal requirements that can be used for testing and verification of the Application's logic. @@ -49,7 +74,7 @@ returns via `ResponsePrepareProposal` to CometBFT, also known as the prepared pr Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer. -* Requirement 1 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in +- Requirement 1 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in `PrepareProposal` and the network is in a synchronous period while processes *p* and *q* are in *rp*, then the value of *TimeoutPropose* at *q* must be such that *q*'s propose timer does not time out (which would result in *q* prevoting `nil` in *rp*). @@ -57,23 +82,24 @@ Process *p*'s prepared proposal can differ in two different rounds where *p* is Full execution of blocks at `PrepareProposal` time stands on CometBFT's critical path. Thus, Requirement 1 ensures the Application or operator will set a value for `TimeoutPropose` such that the time it takes to fully execute blocks in `PrepareProposal` does not interfere with CometBFT's propose timer. -Note that the violation of Requirement 1 may lead to further rounds, but will not +Note that the violation of Requirement 1 may lead to further rounds, but will not compromise liveness because even though `TimeoutPropose` is used as the initial value for proposal timeouts, CometBFT will be dynamically adjust these timeouts such that they will eventually be enough for completing `PrepareProposal`. -* Requirement 2 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the +- Requirement 2 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. -Busy blockchains might seek to maximize the amount of transactions included in each block. Under those conditions, -CometBFT might choose to increase the transactions passed to the Application via `RequestPrepareProposal.txs` -beyond the `RequestPrepareProposal.max_tx_bytes` limit. The idea is that, if the Application drops some of -those transactions, it can still return a transaction list whose byte size is as close to -`RequestPrepareProposal.max_tx_bytes` as possible. Thus, Requirement 2 ensures that the size in bytes of the -transaction list returned by the application will never cause the resulting block to go beyond its byte size -limit. +Busy blockchains might seek to gain full visibility into transactions in CometBFT's mempool, +rather than having visibility only on *a* subset of those transactions that fit in a block. +The application can do so by setting `ConsensusParams.Block.MaxBytes` to -1. +This instructs CometBFT (a) to enforce the maximum possible value for `MaxBytes` (100 MB) at CometBFT level, +and (b) to provide *all* transactions in the mempool when calling `RequestPrepareProposal`. +Under these settings, the aggregated size of all transactions may exceed `RequestPrepareProposal.max_tx_bytes`. +Hence, Requirement 2 ensures that the size in bytes of the transaction list returned by the application will never +cause the resulting block to go beyond its byte size limit. -* Requirement 3 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, +- Requirement 3 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, if *q*'s CometBFT calls `RequestProcessProposal` on *up*, *q*'s Application returns Accept in `ResponseProcessProposal`. @@ -86,12 +112,12 @@ likely hit the bug at the same time. This would result in most (or all) processe serious consequences on CometBFT's liveness that this entails. Due to its criticality, Requirement 3 is a target for extensive testing and automated verification. -* Requirement 4 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current +- Requirement 4 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *u*, if *p*'s CometBFT calls `RequestProcessProposal` on *u* at height *h*, then *p*'s Application's acceptance or rejection **exclusively** depends on *u* and *sp,h-1*. -* Requirement 5 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary +- Requirement 5 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary block *u*, if *p*'s (resp. *q*'s) CometBFT calls `RequestProcessProposal` on *u* at height *h*, then *p*'s Application accepts *u* if and only if *q*'s Application accepts *u*. @@ -119,7 +145,7 @@ Let *erp* be the vote extension that the Application of a Let *wrp* be the proposed block that *p*'s CometBFT passes to the Application via `RequestExtendVote` in round *r*, height *h*. -* Requirement 6 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct +- Requirement 6 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct processes *p* and *q*, if *q* receives *erp* from *p* in height *h*, *q*'s Application returns Accept in `ResponseVerifyVoteExtension`. @@ -131,13 +157,13 @@ However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtensi we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote extensions will be discarded. -* Requirement 7 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of +- Requirement 7 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of the current state, the vote extension received, and the prepared proposal that the extension refers to. In other words, for any correct process *p*, and any arbitrary vote extension *e*, and any arbitrary block *w*, if *p*'s (resp. *q*'s) CometBFT calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, then *p*'s Application's acceptance or rejection **exclusively** depends on *e*, *w* and *sp,h-1*. -* Requirement 8 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, +- Requirement 8 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary vote extension *e*, and any arbitrary block *w*, if *p*'s (resp. *q*'s) CometBFT calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, then *p*'s Application accepts *e* if and only if *q*'s Application accepts *e*. @@ -152,12 +178,12 @@ Requirements 7 and 8 can be violated by a bug inducing non-determinism in Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension`. As a general rule, `VerifyVoteExtension` SHOULD always accept the vote extension. -* Requirement 9 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`, +- Requirement 9 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`, `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height *h* do not modify *sp,h-1*. -* Requirement 10 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, +- Requirement 10 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, and any vote extension *e* that *p* received at height *h*, the computation of *sp,h* does not depend on *e*. @@ -165,29 +191,63 @@ The call to correct process *p*'s `RequestFinalizeBlock` at height *h*, with blo passed as parameter, creates state *sp,h*. Additionally, *p*'s `FinalizeBlock` creates a set of transaction results *Tp,h*. -* Requirement 11 [`FinalizeBlock`, determinism-1]: For any correct process *p*, +- Requirement 11 [`FinalizeBlock`, determinism-1]: For any correct process *p*, *sp,h* exclusively depends on *sp,h-1* and *vp,h*. -* Requirement 12 [`FinalizeBlock`, determinism-2]: For any correct process *p*, +- Requirement 12 [`FinalizeBlock`, determinism-2]: For any correct process *p*, the contents of *Tp,h* exclusively depend on *sp,h-1* and *vp,h*. Note that Requirements 11 and 12, combined with the Agreement property of consensus ensure state machine replication, i.e., the Application state evolves consistently at all correct processes. -Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related +Also, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related requirements associated. Indeed, `PrepareProposal` is not required to be deterministic: -* *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. -* *vp = vq ⇏ up = uq*. +- *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. +- *vp = vq ⇏ up = uq*. Likewise, `ExtendVote` can also be non-deterministic: -* *erp* may depend on *wrp* and *sp,h-1*, +- *erp* may depend on *wrp* and *sp,h-1*, but may also depend on other values or operations. -* *wrp = wrq ⇏ +- *wrp = wrq ⇏ erp = erq* +### Mempool Connection Requirements + +Let *CheckTxCodestx,p,h* denote the set of result codes returned by *p*'s Application, +via `ResponseCheckTx`, +to successive calls to `RequestCheckTx` occurring while the Application is at height *h* +and having transaction *tx* as parameter. +*CheckTxCodestx,p,h* is a set since *p*'s Application may +return different result codes during height *h*. +If *CheckTxCodestx,p,h* is a singleton set, i.e. the Application always returned +the same result code in `ResponseCheckTx` while at height *h*, +we define *CheckTxCodetx,p,h* as the singleton value of *CheckTxCodestx,p,h*. +If *CheckTxCodestx,p,h* is not a singleton set, *CheckTxCodetx,p,h* is undefined. +Let predicate *OK(CheckTxCodetx,p,h)* denote whether *CheckTxCodetx,p,h* is `SUCCESS`. + +- Requirement 13 [`CheckTx`, eventual non-oscillation]: For any transaction *tx*, + there exists a boolean value *b*, + and a height *hstable* such that, + for any correct process *p*, + *CheckTxCodetx,p,h* is defined, and + *OK(CheckTxCodetx,p,h) = b* + for any height *h ≥ hstable*. + +Requirement 13 ensures that +a transaction will eventually stop oscillating between `CheckTx` success and failure +if it stays in *p's* mempool for long enough. +This condition on the Application's behavior allows the mempool to ensure that +a transaction will leave the mempool of all full nodes, +either because it is expunged everywhere due to failing `CheckTx` calls, +or because it stays valid long enough to be gossipped, proposed and decided. +Although Requirement 13 defines a global *hstable*, application developers +can consider such stabilization height as local to process *p* (*hp,stable*), +without loss for generality. +In contrast, the value of *b* MUST be the same across all processes. + ## Managing the Application state and related topics ### Connection State @@ -205,9 +265,9 @@ the state for each connection, which are synchronized upon `Commit` calls. In principle, each of the four ABCI++ connections operates concurrently with one another. This means applications need to ensure access to state is thread safe. Both the -[default in-process ABCI client](https://github.com/cometbft/cometbft/blob/main/abci/client/local_client.go#L13) +[default in-process ABCI client](https://github.com/cometbft/cometbft/blob/v0.38.x/abci/client/local_client.go#L13) and the -[default Go ABCI server](https://github.com/cometbft/cometbft/blob/main/abci/server/socket_server.go#L20) +[default Go ABCI server](https://github.com/cometbft/cometbft/blob/v0.38.x/abci/server/socket_server.go#L20) use a global lock to guard the handling of events across all connections, so they are not concurrent at all. This means whether your app is compiled in-process with CometBFT using the `NewLocalClient`, or run out-of-process using the `SocketServer`, @@ -216,20 +276,6 @@ time. The existence of this global mutex means Go application developers can get thread safety for application state by routing all reads and writes through the ABCI system. Thus it may be unsafe to expose application state directly to an RPC interface, and unless explicit measures are taken, all queries should be routed through the ABCI Query method. - - - - - #### FinalizeBlock When the consensus algorithm decides on a block, CometBFT uses `FinalizeBlock` to send the @@ -267,22 +313,22 @@ Likewise, CometBFT calls `ProcessProposal` upon reception of a proposed block fr network. The proposed block's data that is disclosed to the Application by these two methods is the following: -* the transaction list -* the `LastCommit` referring to the previous block -* the block header's hash (except in `PrepareProposal`, where it is not known yet) -* list of validators that misbehaved -* the block's timestamp -* `NextValidatorsHash` -* Proposer address +- the transaction list +- the `LastCommit` referring to the previous block +- the block header's hash (except in `PrepareProposal`, where it is not known yet) +- list of validators that misbehaved +- the block's timestamp +- `NextValidatorsHash` +- Proposer address The Application may decide to *immediately* execute the given block (i.e., upon `PrepareProposal` or `ProcessProposal`). There are two main reasons why the Application may want to do this: -* *Avoiding invalid transactions in blocks*. +- *Avoiding invalid transactions in blocks*. In order to be sure that the block does not contain *any* invalid transaction, there may be no way other than fully executing the transactions in the block as though it was the *decided* block. -* *Quick `FinalizeBlock` execution*. +- *Quick `FinalizeBlock` execution*. Upon reception of the decided block via `FinalizeBlock`, if that same block was executed upon `PrepareProposal` or `ProcessProposal` and the resulting state was kept in memory, the Application can simply apply that state (faster) to the main state, rather than reexecuting @@ -305,7 +351,7 @@ to bound memory usage. As a general rule, the Application should be ready to dis before `FinalizeBlock`, even if one of them might end up corresponding to the decided block and thus have to be reexecuted upon `FinalizeBlock`. -### States and ABCI++ Connections +### [States and ABCI++ Connections](#states-and-abci-connections) #### Consensus Connection @@ -421,8 +467,8 @@ or validation should fail before it can use more resources than it requested. When `MaxGas > -1`, CometBFT enforces the following rules: -* `GasWanted <= MaxGas` for every transaction in the mempool -* `(sum of GasWanted in a block) <= MaxGas` when proposing a block +- `GasWanted <= MaxGas` for every transaction in the mempool +- `(sum of GasWanted in a block) <= MaxGas` when proposing a block If `MaxGas == -1`, no rules about gas are enforced. @@ -440,7 +486,7 @@ it can use `PrepareProposal` and `ProcessProposal` to enforce that `(sum of GasW in all proposed or prevoted blocks, we have: -* `(sum of GasUsed in a block) <= MaxGas` for every block +- `(sum of GasUsed in a block) <= MaxGas` for every block The `GasUsed` field is ignored by CometBFT. @@ -501,21 +547,21 @@ duplicates, the block execution will fail irrecoverably. Structure `ValidatorUpdate` contains a public key, which is used to identify the validator: The public key currently supports three types: -* `ed25519` -* `secp256k1` -* `sr25519` +- `ed25519` +- `secp256k1` +- `sr25519` Structure `ValidatorUpdate` also contains an `ìnt64` field denoting the validator's new power. Applications must ensure that `ValidatorUpdate` structures abide by the following rules: -* power must be non-negative -* if power is set to 0, the validator must be in the validator set; it will be removed from the set -* if power is greater than 0: - * if the validator is not in the validator set, it will be added to the +- power must be non-negative +- if power is set to 0, the validator must be in the validator set; it will be removed from the set +- if power is greater than 0: + - if the validator is not in the validator set, it will be added to the set with the given power - * if the validator is in the validator set, its power will be adjusted to the given power -* the total power of the new validator set must not exceed `MaxTotalVotingPower`, where + - if the validator is in the validator set, its power will be adjusted to the given power +- the total power of the new validator set must not exceed `MaxTotalVotingPower`, where `MaxTotalVotingPower = MaxInt64 / 8` Note the updates returned after processing the block at height `H` will only take effect @@ -534,35 +580,71 @@ all full nodes have the same value at a given height. #### List of Parameters -These are the current consensus parameters (as of v0.37.x): - -1. [BlockParams.MaxBytes](#blockparamsmaxbytes) -2. [BlockParams.MaxGas](#blockparamsmaxgas) -3. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) -4. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) -5. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) -6. [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) -7. [VersionParams.App](#versionparamsapp) - +These are the current consensus parameters (as of v0.38.x): + +1. [ABCIParams.VoteExtensionsEnableHeight](#abciparamsvoteextensionsenableheight) +2. [BlockParams.MaxBytes](#blockparamsmaxbytes) +3. [BlockParams.MaxGas](#blockparamsmaxgas) +4. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) +5. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) +6. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) +7. [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) +8. [VersionParams.App](#versionparamsapp) + +##### ABCIParams.VoteExtensionsEnableHeight + +This parameter is either 0 or a positive height at which vote extensions +become mandatory. If the value is zero (which is the default), vote +extensions are not expected. Otherwise, at all heights greater than the +configured height `H` vote extensions must be present (even if empty). +When the configured height `H` is reached, `PrepareProposal` will not +include vote extensions yet, but `ExtendVote` and `VerifyVoteExtension` will +be called. Then, when reaching height `H+1`, `PrepareProposal` will +include the vote extensions from height `H`. For all heights after `H` + +- vote extensions cannot be disabled, +- they are mandatory: all precommit messages sent MUST have an extension + attached. Nevertheless, the application MAY provide 0-length + extensions. + +Must always be set to a future height, 0, or the same height that was previously set. +Once the chain's height reaches the value set, it cannot be changed to a different value. ##### BlockParams.MaxBytes The maximum size of a complete Protobuf encoded block. This is enforced by the consensus algorithm. -This implies a maximum transaction size that is this `MaxBytes`, less the expected size of +This implies a maximum transaction size that is `MaxBytes`, less the expected size of the header, the validator set, and any included evidence in the block. -Must have `0 < MaxBytes < 100 MB`. +The Application should be aware that honest validators *may* produce and +broadcast blocks with up to the configured `MaxBytes` size. +As a result, the consensus +[timeout parameters](../../docs/core/configuration.md#consensus-timeouts-explained) +adopted by nodes should be configured so as to account for the worst-case +latency for the delivery of a full block with `MaxBytes` size to all validators. + +If the Application wants full control over the size of blocks, +it can do so by enforcing a byte limit set up at the Application level. +This Application-internal limit is used by `PrepareProposal` to bound the total size +of transactions it returns, and by `ProcessProposal` to reject any received block +whose total transaction size is bigger than the enforced limit. +In such case, the Application MAY set `MaxBytes` to -1. + +If the Application sets value -1, consensus will: + +- consider that the actual value to enforce is 100 MB +- will provide *all* transactions in the mempool in calls to `PrepareProposal` + +Must have `MaxBytes == -1` OR `0 < MaxBytes <= 100 MB`. + +> Bear in mind that the default value for the `BlockParams.MaxBytes` consensus +> parameter accepts as valid blocks with size up to 21 MB. +> If the Application's use case does not need blocks of that size, +> or if the impact (specially on bandwidth consumption and block latency) +> of propagating blocks of that size was not evaluated, +> it is strongly recommended to wind down this default value. ##### BlockParams.MaxGas @@ -614,103 +696,6 @@ The parameter restricts the type of keys validators can use. The parameter uses ##### VersionParams.App This is the version of the ABCI application. - - -##### ABCIParams.VoteExtensionsEnableHeight - -This parameter is either 0 or a positive height at which vote extensions -become mandatory. If the value is zero (which is the default), vote -extensions are not required. Otherwise, at all heights greater than the -configured height `H` vote extensions must be present (even if empty). -When the configured height `H` is reached, `PrepareProposal` will not -include vote extensions yet, but `ExtendVote` and `VerifyVoteExtension` will -be called. Then, when reaching height `H+1`, `PrepareProposal` will -include the vote extensions from height `H`. For all heights after `H` - -* vote extensions cannot be disabled, -* they are mandatory: all precommit messages sent MUST have an extension - attached. Nevertheless, the application MAY provide 0-length - extensions. - -Must always be set to a future height. Once set to a value different from -0, its value must not be changed. #### Updating Consensus Parameters @@ -777,9 +762,9 @@ For such applications, the `AppHash` provides a much more efficient way to verif ABCI applications can take advantage of more efficient light-client proofs for their state as follows: -* return the Merkle root of the deterministic application state in +- return the Merkle root of the deterministic application state in `ResponseFinalizeBlock.Data`. This Merkle root will be included as the `AppHash` in the next block. -* return efficient Merkle proofs about that application state in `ResponseQuery.Proof` +- return efficient Merkle proofs about that application state in `ResponseQuery.Proof` that can be verified using the `AppHash` of the corresponding block. For instance, this allows an application's light-client to verify proofs of @@ -814,9 +799,9 @@ the list should match the `AppHash` being verified against. When CometBFT connects to a peer, it sends two queries to the ABCI application using the following paths, with no additional data: -* `/p2p/filter/addr/`, where `` denote the IP address and +- `/p2p/filter/addr/`, where `` denote the IP address and the port of the connection -* `p2p/filter/id/`, where `` is the peer node ID (ie. the +- `p2p/filter/id/`, where `` is the peer node ID (ie. the pubkey.Address() for the peer's PubKey) If either of these queries return a non-zero ABCI code, CometBFT will refuse @@ -834,33 +819,34 @@ implementation of ### Crash Recovery -CometBFT and the application are expected to crash together and there should not +CometBFT and the application are expected to crash together and there should not exist a scenario where the application has persisted state of a height greater than the latest height persisted by CometBFT. -In practice, persisting the state of a height consists of three steps, the last of which +In practice, persisting the state of a height consists of three steps, the last of which is the call to the application's `Commit` method, the only place where the application is expected to persist/commit its state. On startup (upon recovery), CometBFT calls the `Info` method on the Info Connection to get the latest committed state of the app. The app MUST return information consistent with the -last block for which it successfully completed `Commit`. +last block for which it successfully completed `Commit`. + +The three steps performed before the state of a height is considered persisted are: -The three steps performed before the state of a height is considered persisted are: - The block is stored by CometBFT in the blockstore - CometBFT has stored the state returned by the application through `FinalizeBlockResponse` -- The application has committed its state within `Commit`. - +- The application has committed its state within `Commit`. + The following diagram depicts the order in which these events happen, and the corresponding ABCI functions that are called and executed by CometBFT and the application: -``` +``` APP: Execute block Persist application state / return ResultFinalizeBlock / - / / + / / Event: ------------- block_stored ------------ / ------------ state_stored --------------- / ----- app_persisted_state | / | / | -CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ---------- Call Commit -- +CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ---------- Call Commit -- on in the (txResults, validator Block block store updates...) @@ -868,26 +854,27 @@ CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ----- As these three steps are not atomic, we observe different cases based on which steps have been executed before the crash occurred -(we assume that at least `block_stored` has been executed, otherwise, there is no state persisted, +(we assume that at least `block_stored` has been executed, otherwise, there is no state persisted, and the operations for this height are repeated entirely): - `block_stored`: we replay `FinalizeBlock` and the steps afterwards. - `block_stored` and `state_stored`: As the app did not persist its state within `Commit`, we need to re-execute - `FinalizeBlock` to retrieve the results and compare them to the state stored by CometBFT within `state_stored`. + `FinalizeBlock` to retrieve the results and compare them to the state stored by CometBFT within `state_stored`. The expected case is that the states will match, otherwise CometBFT panics. -- `block_stored`, `state_stored`, `app_persisted_state`: we move on to the next height. +- `block_stored`, `state_stored`, `app_persisted_state`: we move on to the next height. Based on the sequence of these events, CometBFT will panic if any of the steps in the sequence happen out of order, -that is if: +that is if: + - The application has persisted a block at a height higher than the blocked saved during `state_stored`. - The `block_stored` step persisted a block at a height smaller than the `state_stored` -- And the difference between the heights of the blocks persisted by `state_stored` and `block_stored` is more +- And the difference between the heights of the blocks persisted by `state_stored` and `block_stored` is more than 1 (this corresponds to a scenario where we stored two blocks in the block store but never persisted the state of the first block, which should never happen). -A special case is when a crash happens before the first block is committed - that is, after calling +A special case is when a crash happens before the first block is committed - that is, after calling `InitChain`. In that case, the application's state should still be at height 0 and thus `InitChain` -will be called again. +will be called again. ### State Sync @@ -913,20 +900,20 @@ Applications that want to support state syncing must take state snapshots at reg this is accomplished is entirely up to the application. A snapshot consists of some metadata and a set of binary chunks in an arbitrary format: -* `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given +- `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given height has been committed, and must not contain data from any later heights. -* `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot +- `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use this when restoring to choose whether to accept or reject a snapshot. -* `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary +- `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary data, and should be less than 16 MB; 10 MB is a good starting point. -* `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is +- `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is the same across nodes when downloading chunks. -* `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other +- `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other necessary info. For a snapshot to be considered the same across nodes, all of these fields must be identical. When @@ -937,14 +924,14 @@ application via the ABCI `ListSnapshots` method to discover available snapshots, snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this and which formats to use, but must provide the following guarantees: -* **Consistent:** A snapshot must be taken at a single isolated height, unaffected by +- **Consistent:** A snapshot must be taken at a single isolated height, unaffected by concurrent writes. This can be accomplished by using a data store that supports ACID transactions with snapshot isolation. -* **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, +- **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, for example by running in a separate thread. -* **Deterministic:** A snapshot taken at the same height in the same format must be identical +- **Deterministic:** A snapshot taken at the same height in the same format must be identical (at the byte level) across nodes, including all metadata. This ensures good availability of chunks, and that they fit together across nodes. @@ -1029,17 +1016,18 @@ Once the snapshots have all been restored, CometBFT gathers additional informati bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) from the genesis file and light client RPC servers. It also calls `Info` to verify the following: -* that the app hash from the snapshot it has delivered to the Application matches the apphash +- that the app hash from the snapshot it has delivered to the Application matches the apphash stored in the next height's block -* that the version that the Application returns in `ResponseInfo` matches the version in the + +- that the version that the Application returns in `ResponseInfo` matches the version in the current height's block header Once the state machine has been restored and CometBFT has gathered this additional -information, it transitions to consensus. As of ABCI 2.0, CometBFT ensures the neccessary conditions -to switch are met [RFC-100](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history). -From the application's point of view, these operations are transparent, unless the application has just upgraded to ABCI 2.0. +information, it transitions to consensus. As of ABCI 2.0, CometBFT ensures the necessary conditions +to switch are met [RFC-100](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history). +From the application's point of view, these operations are transparent, unless the application has just upgraded to ABCI 2.0. In that case, the application needs to be properly configured and aware of certain constraints in terms of when -to provide vote extensions. More details can be found in the section below. +to provide vote extensions. More details can be found in the section below. Once a node switches to consensus, it operates like any other node, apart from having a truncated block history at the height of the restored snapshot. @@ -1047,21 +1035,22 @@ Once a node switches to consensus, it operates like any other node, apart from h Introducing vote extensions requires changes to the configuration of the application. -First of all, switching to a version of CometBFT with vote extensions, requires a coordinated upgrade. -For a detailed description on the upgrade path, please refer to the corresponding -[section](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#upgrade-path) in RFC-100. +First of all, switching to a version of CometBFT with vote extensions, requires a coordinated upgrade. +For a detailed description on the upgrade path, please refer to the corresponding +[section](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/rfc/rfc-100-abci-vote-extension-propag.md#upgrade-path) in RFC-100. -There is a newly introduced [**consensus parameter**](./abci%2B%2B_app_requirements.md#abciparamsvoteextensionsenableheight): `VoteExtensionsEnableHeight`. -This parameter represents the height at which vote extensions are +There is a newly introduced [**consensus parameter**](./abci%2B%2B_app_requirements.md#abciparamsvoteextensionsenableheight): `VoteExtensionsEnableHeight`. +This parameter represents the height at which vote extensions are required for consensus to proceed, with 0 being the default value (no vote extensions). A chain can enable vote extensions either: -* at genesis by setting `VoteExtensionsEnableHeight` to be equal, e.g., to the `InitialHeight` -* or via the application logic by changing the `ConsensusParam` to configure the + +- at genesis by setting `VoteExtensionsEnableHeight` to be equal, e.g., to the `InitialHeight` +- or via the application logic by changing the `ConsensusParam` to configure the `VoteExtensionsEnableHeight`. Once the (coordinated) upgrade to ABCI 2.0 has taken place, at height *hu*, the value of `VoteExtensionsEnableHeight` MAY be set to some height, *he*, -which MUST be higher than the current height of the chain. Thus the earliest value for +which MUST be higher than the current height of the chain. Thus the earliest value for *he* is *hu* + 1. Once a node reaches the configured height, @@ -1073,7 +1062,7 @@ Likewise, for all heights *h < he*, any precommit messages that *do* will also be rejected as malformed. Height *he* is somewhat special, as calls to `PrepareProposal` MUST NOT have vote extension data, but all precommit votes in that height MUST carry a vote extension, -even if the extension is `nil`. +even if the extension is `nil`. Height *he + 1* is the first height for which `PrepareProposal` MUST have vote extension data and all precommit votes in that height MUST have a vote extension. diff --git a/spec/abci/abci++_basic_concepts.md b/spec/abci/abci++_basic_concepts.md index 08675aeffa0..3ed7e418e5b 100644 --- a/spec/abci/abci++_basic_concepts.md +++ b/spec/abci/abci++_basic_concepts.md @@ -6,25 +6,25 @@ title: Overview and basic concepts ## Outline - [Overview and basic concepts](#overview-and-basic-concepts) - - [ABCI++ vs. ABCI](#abci-vs-abci) - - [Method overview](#method-overview) - - [Consensus/block execution methods](#consensusblock-execution-methods) - - [Mempool methods](#mempool-methods) - - [Info methods](#info-methods) - - [State-sync methods](#state-sync-methods) - - [Other methods](#other-methods) - - [Proposal timeout](#proposal-timeout) - - [Deterministic State-Machine Replication](#deterministic-state-machine-replication) - - [Events](#events) - - [Evidence](#evidence) - - [Errors](#errors) - - [`CheckTx`](#checktx) - - [`ExecTxResult` (as part of `FinalizeBlock`)](#exectxresult-as-part-of-finalizeblock) - - [`Query`](#query) + - [ABCI++ vs. ABCI](#abci-vs-abci) + - [Methods overview](#methods-overview) + - [Consensus/block execution methods](#consensusblock-execution-methods) + - [Mempool methods](#mempool-methods) + - [Info methods](#info-methods) + - [State-sync methods](#state-sync-methods) + - [Other methods](#other-methods) + - [Proposal timeout](#proposal-timeout) + - [Deterministic State-Machine Replication](#deterministic-state-machine-replication) + - [Events](#events) + - [Evidence](#evidence) + - [Errors](#errors) + - [`CheckTx`](#checktx) + - [`ExecTxResult` (as part of `FinalizeBlock`)](#exectxresult-as-part-of-finalizeblock) + - [`Query`](#query) # Overview and basic concepts -## ABCI++ vs. ABCI +## ABCI 2.0 vs. ABCI {#abci-vs-abci} [↑ Back to Outline](#outline) @@ -40,19 +40,18 @@ as the Application cannot require validators to do more than executing the trans finalized blocks. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. -ABCI++ addresses these limitations by allowing the application to intervene at three key places of +ABCI 2.0 addresses these limitations by allowing the application to intervene at three key places of consensus execution: (a) at the moment a new proposal is to be created, (b) at the moment a -proposal is to be validated, and (c) at the moment a (precommit) vote is sent/received. +proposal is to be validated, and (c) at the moment a (precommit) vote is sent/received. The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method (a); and validators to perform application-dependent work -and checks in a proposed block through the `ProcessProposal` method (b); and applications to require their validators -to do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtensions` methods (c). +and checks in a proposed block through the `ProcessProposal` method (b); and applications to require their validators +to do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtensions` methods (c). Furthermore, ABCI 2.0 coalesces {`BeginBlock`, [`DeliverTx`], `EndBlock`} into `FinalizeBlock`, as a simplified, efficient way to deliver a decided block to the Application. -## Method overview - +## Methods overview [↑ Back to Outline](#outline) @@ -60,12 +59,12 @@ Methods can be classified into four categories: *consensus*, *mempool*, *info*, ### Consensus/block execution methods -The first time a new blockchain is started, CometBFT calls `InitChain`. From then on, method +The first time a new blockchain is started, CometBFT calls `InitChain`. From then on, method `FinalizeBlock` is executed upon the decision of each block, resulting in an updated Application state. During the execution of an instance of consensus, which decides the block for a given height, and before method `FinalizeBlock` is called, methods `PrepareProposal`, `ProcessProposal`, `ExtendVote`, and `VerifyVoteExtension` may be called several times. See -[CometBFT's expected behavior](abci++_comet_expected_behavior.md) for details on the possible +[CometBFT's expected behavior](./abci++_comet_expected_behavior.md) for details on the possible call sequences of these methods. - [**InitChain:**](./abci++_methods.md#initchain) This method initializes the blockchain. @@ -75,29 +74,29 @@ call sequences of these methods. proposer to perform application-dependent work in a block before proposing it. This enables, for instance, batch optimizations to a block, which has been empirically demonstrated to be a key component for improved performance. Method `PrepareProposal` is called - every time CometBFT is about to broadcast a Proposal message and _validValue_ is `nil`. + every time CometBFT is about to broadcast a Proposal message and *validValue* is `nil`. CometBFT gathers outstanding transactions from the mempool, generates a block header, and uses them to create a block to propose. Then, it calls `RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application - can make changes to the raw proposal, such as modifying the set of transactions or the order - in which they appear, and returns the - (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` - call. The logic modifying the raw proposal can be non-deterministic. + can make changes to the raw proposal, such as reordering, adding and removing transactions, before returning the + (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal`. + The logic modifying the raw proposal MAY be non-deterministic. - [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as immediate block execution, and allows the Application to reject invalid blocks. - CometBFT calls it when it receives a proposal and _validValue_ is `nil`. - The Application cannot modify the proposal at this point but can reject it if it is + CometBFT calls it when it receives a proposal and *validValue* is `nil`. + The Application cannot modify the proposal at this point but can reject it if invalid. If that is the case, the consensus algorithm will prevote `nil` on the proposal, which has strong liveness implications for CometBFT. As a general rule, the Application SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of the proposal is invalid (e.g., an invalid transaction); the Application can ignore the invalid part of the prepared proposal at block execution time. + The logic in `ProcessProposal` MUST be deterministic. -- [**ExtendVote:**](./abci++_methods.md#extendvote) It allows applications to force their - validators to do more than just validate within consensus. `ExtendVote` allows applications to +- [**ExtendVote:**](./abci++_methods.md#extendvote) It allows applications to let their + validators do more than just validate within consensus. `ExtendVote` allows applications to include non-deterministic data, opaque to the consensus algorithm, to precommit messages (the final round of voting). The data, called *vote extension*, will be broadcast and received together with the vote it is extending, and will be made available to the Application in the next height, @@ -105,6 +104,7 @@ call sequences of these methods. CometBFT calls `ExtendVote` when the consensus algorithm is about to send a non-`nil` precommit message. If the Application does not have vote extension information to provide at that time, it returns a 0-length byte array as its vote extension. + The logic in `ExtendVote` MAY be non-deterministic. - [**VerifyVoteExtension:**](./abci++_methods.md#verifyvoteextension) It allows validators to validate the vote extension data attached to a precommit message. If the validation @@ -112,16 +112,19 @@ call sequences of these methods. This has a negative impact on liveness, i.e., if vote extensions repeatedly cannot be verified by correct validators, the consensus algorithm may not be able to finalize a block even if sufficiently many (+2/3) validators send precommit votes for that block. Thus, `VerifyVoteExtension` - should be used with special care. + should be implemented with special care. As a general rule, an Application that detects an invalid vote extension SHOULD accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. CometBFT calls it when - a process receives a precommit message with a (possibly empty) vote extension. + a process receives a precommit message with a (possibly empty) vote extension, for the current height. It is not called for precommit votes received after the height is concluded but while waiting to accumulate more precommit votes. + The logic in `VerifyVoteExtension` MUST be deterministic. - [**FinalizeBlock:**](./abci++_methods.md#finalizeblock) It delivers a decided block to the Application. The Application must execute the transactions in the block deterministically and update its state accordingly. Cryptographic commitments to the block and transaction results, returned via the corresponding parameters in `ResponseFinalizeBlock`, are included in the header of the next block. CometBFT calls it when a new block is decided. + When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. - [**Commit:**](./abci++_methods.md#commit) Instructs the Application to persist its state. It is a fundamental part of CometBFT's crash-recovery mechanism that ensures the @@ -152,7 +155,7 @@ call sequences of these methods. State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying state machine (application) snapshots instead of replaying historical blocks. For more details, see the -[state sync documentation](../p2p/messages/state-sync.md). +[state sync documentation](../p2p/legacy-docs/messages/state-sync.md). New nodes discover and request snapshots from other nodes in the P2P network. A CometBFT node that receives a request for snapshots from a peer will call @@ -202,23 +205,26 @@ More details on managing state across connections can be found in the section on ## Proposal timeout -Immediate execution requires the Application to fully execute the prepared block -before returning from `PrepareProposal`, this means that CometBFT cannot make progress -during the block execution. -This stands on the consensus algorithm critical path: if the Application takes a long time -executing the block, the default value of *TimeoutPropose* might not be sufficient -to accommodate the long block execution time and non-proposer nodes might time -out and prevote `nil`. The proposal, in this case, will probably be rejected and a new round will be necessary. - +`PrepareProposal` stands on the consensus algorithm critical path, +i.e., CometBFT cannot make progress while this method is being executed. +Hence, if the Application takes a long time preparing a proposal, +the default value of *TimeoutPropose* might not be sufficient +to accommodate the method's execution and validator nodes might time out and prevote `nil`. +The proposal, in this case, will probably be rejected and a new round will be necessary. -Operators will need to adjust the default value of *TimeoutPropose* in CometBFT's configuration file, +Timeouts are automatically increased for each new round of a height and, if the execution of `PrepareProposal` is bound, eventually *TimeoutPropose* will be long enough to accommodate the execution of `PrepareProposal`. +However, relying on this self adaptation could lead to performance degradation and, therefore, +operators are suggested to adjust the initial value of *TimeoutPropose* in CometBFT's configuration file, in order to suit the needs of the particular application being deployed. +This is particularly important if applications implement *immediate execution*. +To implement this technique, proposers need to execute the block being proposed within `PrepareProposal`, which could take longer than *TimeoutPropose*. + ## Deterministic State-Machine Replication [↑ Back to Outline](#outline) -ABCI++ applications must implement deterministic finite-state machines to be +ABCI applications must implement deterministic finite-state machines to be securely replicated by the CometBFT consensus engine. This means block execution must be strictly deterministic: given the same ordered set of transactions, all nodes will compute identical responses, for all @@ -233,15 +239,20 @@ from block execution (`FinalizeBlock` calls), and not through any other kind of request. This is the only way to ensure all nodes see the same transactions and compute the same results. -Some Applications may choose to implement immediate execution, which entails executing the blocks -that are about to be proposed (via `PrepareProposal`), and those that the Application is asked to -validate (via `ProcessProposal`). However, the state changes caused by processing those +Applications that implement immediate execution (execute the blocks +that are about to be proposed, in `PrepareProposal`, or that require validation, in `ProcessProposal`) produce a new candidate state before a block is decided. +The state changes caused by processing those proposed blocks must never replace the previous state until `FinalizeBlock` confirms -the block decided. +that the proposed block was decided and `Commit` is invoked for it. + +The same is true to Applications that quickly accept blocks and execute the +blocks optimistically in parallel with the remaining consensus steps to save +time during `FinalizeBlock`; they must only apply state changes in `Commit`. Additionally, vote extensions or the validation thereof (via `ExtendVote` or `VerifyVoteExtension`) must *never* have side effects on the current state. -They can only be used when their data is provided in a `RequestPrepareProposal` call. +They can only be used when their data is provided in a `RequestPrepareProposal` call but, again, +without side effects to the app state. If there is some non-determinism in the state machine, consensus will eventually fail as nodes disagree over the correct values for the block header. The @@ -266,11 +277,12 @@ Sources of non-determinism in applications may include: See [#56](https://github.com/tendermint/abci/issues/56) for the original discussion. -Note that some methods (`Query`, `FinalizeBlock`) return non-deterministic data in the form -of `Info` and `Log` fields. The `Log` is intended for the literal output from the Application's -logger, while the `Info` is any additional info that should be returned. These are the only fields -that are not included in block header computations, so we don't need agreement -on them. All other fields in the `Response*` must be strictly deterministic. +Note that some methods (e.g., `Query` and `FinalizeBlock`) may return +non-deterministic data in the form of `Info`, `Log` and/or `Events` fields. The +`Log` is intended for the literal output from the Application's logger, while +the `Info` is any additional info that should be returned. These fields are not +included in block header computations, so we don't need agreement on them. See +each field's description on whether it must be deterministic or not. ## Events @@ -278,7 +290,7 @@ on them. All other fields in the `Response*` must be strictly deterministic. Method `FinalizeBlock` includes an `events` field at the top level in its `Response*`, and one `events` field per transaction included in the block. -Applications may respond to this ABCI++ method with an event list for each executed +Applications may respond to this ABCI 2.0 method with an event list for each executed transaction, and a general event list for the block itself. Events allow applications to associate metadata with transactions and blocks. Events returned via `FinalizeBlock` do not impact the consensus algorithm in any way @@ -302,9 +314,11 @@ message Event { } ``` -The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The -index flag notifies the CometBFT indexer to index the attribute. The value of -the `index` flag is non-deterministic and may vary across different nodes in the network. +The attributes of an `Event` consist of a `key`, a `value`, and an `index` +flag. The index flag notifies the CometBFT indexer to index the attribute. + +The `type` and `attributes` fields are non-deterministic and may vary across +different nodes in the network. ```protobuf message EventAttribute { diff --git a/spec/abci/abci++_client_server.md b/spec/abci/abci++_client_server.md index b6b11a18bb9..2a5fb5414d5 100644 --- a/spec/abci/abci++_client_server.md +++ b/spec/abci/abci++_client_server.md @@ -17,7 +17,7 @@ You are expected to have read all previous sections of ABCI++ specification, nam ## Message Protocol and Synchrony The message protocol consists of pairs of requests and responses defined in the -[protobuf file](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). +[protobuf file](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). Some messages have no fields, while others may include byte-arrays, strings, integers, or custom protobuf types. diff --git a/spec/abci/abci++_comet_expected_behavior.md b/spec/abci/abci++_comet_expected_behavior.md index b330588aa41..092d49e0863 100644 --- a/spec/abci/abci++_comet_expected_behavior.md +++ b/spec/abci/abci++_comet_expected_behavior.md @@ -39,7 +39,7 @@ Application design should consider _any_ of these possible sequences. The following grammar, written in case-sensitive Augmented Backus–Naur form (ABNF, specified in [IETF rfc7405](https://datatracker.ietf.org/doc/html/rfc7405)), specifies all possible -sequences of calls to ABCI++, taken by a correct process, across all heights from the genesis block, +sequences of calls to ABCI++, taken by a **correct process**, across all heights from the genesis block, including recovery runs, from the point of view of the Application. ```abnf @@ -56,7 +56,7 @@ consensus-exec = (inf)consensus-height consensus-height = *consensus-round decide commit consensus-round = proposer / non-proposer -proposer = *got-vote [prepare-proposal process-proposal] [extend] +proposer = *got-vote [prepare-proposal [process-proposal]] [extend] extend = *got-vote extend-vote *got-vote non-proposer = *got-vote [process-proposal] [extend] @@ -118,10 +118,10 @@ Let us now examine the grammar line by line, providing further details. to provide the Application with all the snapshots needed, in order to reconstruct the state locally. A successful attempt must provide at least one chunk via `ApplySnapshotChunk`. At the end of a successful attempt, CometBFT calls `Info` to make sure the reconstructed state's - _AppHash_ matches the one in the block header at the corresponding height. Note that the state - of the application does not contain vote extensions itself. The application can rely on - [CometBFT to ensure](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history) - the node has all the relevant data to proceed with the execution beyond this point. + _AppHash_ matches the one in the block header at the corresponding height. Note that the state + of the application does not contain vote extensions itself. The application can rely on + [CometBFT to ensure](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history) + the node has all the relevant data to proceed with the execution beyond this point. >```abnf >state-sync = *state-sync-attempt success-sync info @@ -130,7 +130,8 @@ Let us now examine the grammar line by line, providing further details. >``` * In recovery mode, CometBFT first calls `Info` to know from which height it needs to replay decisions - to the Application. After this, CometBFT enters normal consensus execution. + to the Application. After this, CometBFT enters consensus execution, first in replay mode and then + in normal mode. >```abnf >recovery = info consensus-exec @@ -149,15 +150,27 @@ Let us now examine the grammar line by line, providing further details. `FinalizeBlock`, followed by a call to `Commit`. In each round, the sequence of method calls depends on whether the local process is the proposer or not. Note that, if a height contains zero rounds, this means the process is replaying an already decided value (catch-up mode). + When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. + >```abnf >consensus-height = *consensus-round decide commit >consensus-round = proposer / non-proposer >``` -* For every round, if the local process is the proposer of the current round, CometBFT calls `PrepareProposal`, followed by `ProcessProposal`. -These two always come together because they reflect the same proposal that the process -also delivers to itself. +* For every round, if the local process is the proposer of the current round, CometBFT calls `PrepareProposal`. + A successful execution of `PrepareProposal` results in a proposal block being (i) signed and (ii) stored + (e.g., in stable storage). + + A crash during this step will direct how the node proceeds the next time it is executed, for the same round, after restarted. + If it crashed before (i), then, during the recovery, `PrepareProposal` will execute as if for the first time. + Following a crash between (i) and (ii) and in (the likely) case `PrepareProposal` produces a different block, + the signing of this block will fail, which means that the new block will not be stored or broadcast. + If the crash happened after (ii), then signing fails but nothing happens to the stored block. + + If a block was stored, it is sent to all validators, including the proposer. + Receiving a proposal block triggers `ProcessProposal` with such a block. Then, optionally, the Application is asked to extend its vote for that round. Calls to `VerifyVoteExtension` can come at any time: the @@ -165,12 +178,15 @@ also delivers to itself. of this height. >```abnf ->proposer = *got-vote [prepare-proposal process-proposal] [extend] +>proposer = *got-vote [prepare-proposal [process-proposal]] [extend] >extend = *got-vote extend-vote *got-vote >``` * Also for every round, if the local process is _not_ the proposer of the current round, CometBFT - will call `ProcessProposal` at most once. At most one call to `ExtendVote` may occur only after + will call `ProcessProposal` at most once. + Under certain conditions, CometBFT may not call `ProcessProposal` in a round; + see [this section](./abci++_example_scenarios.md#scenario-3) for an example. + At most one call to `ExtendVote` may occur only after `ProcessProposal` is called. A number of calls to `VerifyVoteExtension` can occur in any order with respect to `ProcessProposal` and `ExtendVote` throughout the round. The reasons are the same as above, namely, the process running slightly late in the current round, or votes from future @@ -211,7 +227,7 @@ As for the new methods: * `PrepareProposal` must create a list of [transactions](./abci++_methods.md#prepareproposal) by copying over the transaction list passed in `RequestPrepareProposal.txs`, in the same order. - + The Application must check whether the size of all transactions exceeds the byte limit (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the end of the list until the total byte size is at or below the limit. @@ -228,41 +244,39 @@ Finally, `Commit`, which is kept in ABCI++, no longer returns the `AppHash`. It `FinalizeBlock` to do so. Thus, a slight refactoring of the old `Commit` implementation will be needed to move the return of `AppHash` to `FinalizeBlock`. -## Accomodating for vote extensions +## Accommodating for vote extensions In a manner transparent to the application, CometBFT ensures the node is provided with all -the data it needs to participate in consensus. +the data it needs to participate in consensus. In the case of recovering from a crash, or joining the network via state sync, CometBFT will make -sure the node acquires the necessary vote extensions before switching to consensus. +sure the node acquires the necessary vote extensions before switching to consensus. -If a node is already in consensus but falls behind, during catch-up, CometBFT will provide the node with +If a node is already in consensus but falls behind, during catch-up, CometBFT will provide the node with vote extensions from past heights by retrieving the extensions within `ExtendedCommit` for old heights that it had previously stored. -We realize this is sub-optimal due to the increase in storage needed to store the extensions, we are +We realize this is sub-optimal due to the increase in storage needed to store the extensions, we are working on an optimization of this implementation which should alleviate this concern. However, the application can use the existing `retain_height` parameter to decide how much history it wants to keep, just as is done with the block history. The network-wide implications of the usage of `retain_height` stay the same. -The decision to store -historical commits and potential optimizations, are discussed in detail in [RFC-100](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#current-limitations-and-possible-implementations) +The decision to store +historical commits and potential optimizations, are discussed in detail in [RFC-100](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/rfc/rfc-100-abci-vote-extension-propag.md#current-limitations-and-possible-implementations) -## Handling upgrades to ABCI 2.0 +## Handling upgrades to ABCI 2.0 -If applications upgrade to ABCI 2.0, CometBFT internally ensures that the [application setup](./abci%2B%2B_app_requirements.md#application-configuration-required-to-switch-to-abci-20) is reflected in its operation. -CometBFT retrieves from the application configuration the value of `VoteExtensionsEnableHeight`( *he*,), -the height at which vote extensions are required for consensus to proceed, and uses it to determine the data it stores and data it sends to a peer -that is catching up. +If applications upgrade to ABCI 2.0, CometBFT internally ensures that the [application setup](./abci%2B%2B_app_requirements.md#application-configuration-required-to-switch-to-abci-20) is reflected in its operation. +CometBFT retrieves from the application configuration the value of `VoteExtensionsEnableHeight`( _he_,), +the height at which vote extensions are required for consensus to proceed, and uses it to determine the data it stores and data it sends to a peer that is catching up. -Namely, upon saving the block for a given height *h* in the block store at decision time -- if *h ≥ he*, the corresponding extended commit that was used to decide locally is saved as well -- if *h < he*, there are no changes to the data saved +Namely, upon saving the block for a given height _h_ in the block store at decision time -In the catch-up mechanism, when a node *f* realizes that another peer is at height *hp*, which is more than 2 heights behind, -- if *hp ≥ he*, *f* uses the extended commit to - reconstruct the precommit votes with their corresponding extensions -- if *hp < he*, *f* uses the canonical commit to reconstruct the precommit votes, - as done for ABCI 1.0 and earlier. - +* if _h ≥ he_, the corresponding extended commit that was used to decide locally is saved as well +* if _h < he_, there are no changes to the data saved +In the catch-up mechanism, when a node _f_ realizes that another peer is at height _hp_, which is more than 2 heights behind height _hf_, +* if _hp ≥ he_, _f_ uses the extended commit to + reconstruct the precommit votes with their corresponding extensions +* if _hp < he_, _f_ uses the canonical commit to reconstruct the precommit votes, + as done for ABCI 1.0 and earlier. diff --git a/spec/abci/abci++_example_scenarios.md b/spec/abci/abci++_example_scenarios.md index 93fb6a7a6f7..2af4665ceb1 100644 --- a/spec/abci/abci++_example_scenarios.md +++ b/spec/abci/abci++_example_scenarios.md @@ -4,13 +4,13 @@ title: ABCI++ extra --- # Introduction -In the section [CometBFT's expected behaviour](./abci++_comet_expected_behavior.md#valid-method-call-sequences), -we presented the most common behaviour, usually referred to as the good case. -However, the grammar specified in the same section is more general and covers more scenarios -that an Application designer needs to account for. +In the section [CometBFT's expected behaviour](./abci++_comet_expected_behavior.md#valid-method-call-sequences), +we presented the most common behaviour, usually referred to as the good case. +However, the grammar specified in the same section is more general and covers more scenarios +that an Application designer needs to account for. -In this section, we give more information about these possible scenarios. We focus on methods -introduced by ABCI++: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate +In this section, we give more information about these possible scenarios. We focus on methods +introduced by ABCI++: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate on the part of the grammar presented below. ```abnf @@ -21,32 +21,35 @@ proposer = [prepare-proposal process-proposal] non-proposer = [process-proposal] ``` -We can see from the grammar that we can have several rounds before deciding a block. The reasons +We can see from the grammar that we can have several rounds before deciding a block. The reasons why one round may not be enough are: + * network asynchrony, and -* a Byzantine process being the proposer. +* a Byzantine process being the proposer. -If we assume that the consensus algorithm decides on block $X$ in round $r$, in the rounds +If we assume that the consensus algorithm decides on block $X$ in round $r$, in the rounds $r' <= r$, CometBFT can exhibit any of the following behaviours: -1. Call `PrepareProposal` and/or `ProcessProposal` for block $X$. +1. Call `PrepareProposal` and/or `ProcessProposal` for block $X$. 1. Call `PrepareProposal` and/or `ProcessProposal` for block $Y \neq X$. 1. Does not call `PrepareProposal` and/or `ProcessProposal`. -In the rounds when it is the proposer, CometBFT's `PrepareProposal` call is always followed by the -`ProcessProposal` call. The reason is that the process always delivers the proposal to itself, which -triggers the `ProcessProposal` call. +In the rounds in which the process is the proposer, CometBFT's `PrepareProposal` call is always followed by the +`ProcessProposal` call. The reason is that the process also broadcasts the proposal to itself, which is locally delivered and triggers the `ProcessProposal` call. +The proposal processed by `ProcessProposal` is the same as what was returned by any of the preceding `PrepareProposal` invoked for the same height and round. +While in the absence of restarts there is only one such preceding invocations, if the proposer restarts there could have been one extra invocation to `PrepareProposal` for each restart. -As the number of rounds the consensus algorithm needs to decide in a given run is a priori unknown, the -application needs to account for any number of rounds, where each round can exhibit any of these three -behaviours. Recall that the application is unaware of the internals of consensus and thus of the rounds. +As the number of rounds the consensus algorithm needs to decide in a given run is a priori unknown, the +application needs to account for any number of rounds, where each round can exhibit any of these three +behaviours. Recall that the application is unaware of the internals of consensus and thus of the rounds. # Possible scenarios -The unknown number of rounds we can have when following the consensus algorithm yields a vast number of -scenarios we can expect. Listing them all is unfeasible. However, here we give several of them and draw the + +The unknown number of rounds we can have when following the consensus algorithm yields a vast number of +scenarios we can expect. Listing them all is unfeasible. However, here we give several of them and draw the main conclusions. Specifically, we will show that before block $X$ is decided: - -1. On a correct node, `PrepareProposal` may be called multiple times and for different blocks ([**Scenario 1**](#scenario-1)). + +1. On a correct node, `PrepareProposal` may be called multiple times and for different blocks ([**Scenario 1**](#scenario-1)). 1. On a correct node, `ProcessProposal` may be called multiple times and for different blocks ([**Scenario 2**](#scenario-2)). 1. On a correct node, `PrepareProposal` and `ProcessProposal` for block $X$ may not be called ([**Scenario 3**](#scenario-3)). 1. On a correct node, `PrepareProposal` and `ProcessProposal` may not be called at all ([**Scenario 4**](#scenario-4)). @@ -54,12 +57,12 @@ main conclusions. Specifically, we will show that before block $X$ is decided: ## Basic information -Each scenario is presented from the perspective of a process $p$. More precisely, we show what happens in -each round's $step$ of the [Tendermint consensus algorithm](https://arxiv.org/pdf/1807.04938.pdf). While in -practice the consensus algorithm works with respect to voting power of the validators, in this document -we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The legend is below: +Each scenario is presented from the perspective of a process $p$. More precisely, we show what happens in +each round's $step$ of the [Tendermint consensus algorithm](https://arxiv.org/pdf/1807.04938.pdf). While in +practice the consensus algorithm works with respect to voting power of the validators, in this document +we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The legend is below: -### Round X: +### Round X 1. **Propose:** Describes what happens while $step_p = propose$. 1. **Prevote:** Describes what happens while $step_p = prevote$. @@ -69,93 +72,93 @@ we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The l $p$ calls `ProcessProposal` many times with different values. -### Round 0: - -1. **Propose:** The proposer of this round is a Byzantine process, and it chooses not to send the proposal -message. Therefore, $p$'s $timeoutPropose$ expires, it sends $Prevote$ for $nil$, and it does not call -`ProcessProposal`. All correct processes do the same. -1. **Prevote:** $p$ eventually receives $2f+1$ $Prevote$ messages for $nil$ and starts $timeoutPrevote$. -When $timeoutPrevote$ expires it sends $Precommit$ for $nil$. -1. **Precommit:** $p$ eventually receives $2f+1$ $Precommit$ messages for $nil$ and starts $timeoutPrecommit$. -When it expires, it moves to the next round. - -### Round 1: - -1. **Propose:** A correct process is the proposer in this round. Its $validValue$ is $nil$, and it is free -to generate and propose a new block $Y$. Process $p$ receives this proposal in time, calls `ProcessProposal` -for block $Y$, and broadcasts a $Prevote$ message for it. -1. **Prevote:** Due to network asynchrony less than $2f+1$ processes send $Prevote$ for this block. -Therefore, $p$ does not update $validValue$ in this round. -1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this -block and send $Precommit$ message. As a consequence, $p$ does not decide on $Y$. - -### Round 2: - -1. **Propose:** Same as in [**Round 1**](#round-1), just another correct process is the proposer, and it -proposes another value $Z$. Process $p$ receives the proposal on time, calls `ProcessProposal` for new block -$Z$, and broadcasts a $Prevote$ message for it. +### Round 0 + +1. **Propose:** The proposer of this round is a Byzantine process, and it chooses not to send the proposal +message. Therefore, $p$'s $timeoutPropose$ expires, it sends $Prevote$ for $nil$, and it does not call +`ProcessProposal`. All correct processes do the same. +1. **Prevote:** $p$ eventually receives $2f+1$ $Prevote$ messages for $nil$ and starts $timeoutPrevote$. +When $timeoutPrevote$ expires it sends $Precommit$ for $nil$. +1. **Precommit:** $p$ eventually receives $2f+1$ $Precommit$ messages for $nil$ and starts $timeoutPrecommit$. +When it expires, it moves to the next round. + +### Round 1 + +1. **Propose:** A correct process is the proposer in this round. Its $validValue$ is $nil$, and it is free +to generate and propose a new block $Y$. Process $p$ receives this proposal in time, calls `ProcessProposal` +for block $Y$, and broadcasts a $Prevote$ message for it. +1. **Prevote:** Due to network asynchrony less than $2f+1$ processes send $Prevote$ for this block. +Therefore, $p$ does not update $validValue$ in this round. +1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this +block and send $Precommit$ message. As a consequence, $p$ does not decide on $Y$. + +### Round 2 + +1. **Propose:** Same as in [**Round 1**](#round-1), just another correct process is the proposer, and it +proposes another value $Z$. Process $p$ receives the proposal on time, calls `ProcessProposal` for new block +$Z$, and broadcasts a $Prevote$ message for it. 1. **Prevote:** Same as in [**Round 1**](#round-1). 1. **Precommit:** Same as in [**Round 1**](#round-1). -Rounds like these can continue until we have a round in which process $p$ updates its $validValue$ or until -we reach round $r$ where process $p$ decides on a block. After that, it will not call `ProcessProposal` -anymore for this height. +Rounds like these can continue until we have a round in which process $p$ updates its $validValue$ or until +we reach round $r$ where process $p$ decides on a block. After that, it will not call `ProcessProposal` +anymore for this height. -## Scenario 2 +## Scenario 2 $p$ calls `PrepareProposal` many times with different values. -### Round 0: +### Round 0 -1. **Propose:** Process $p$ is the proposer in this round. Its $validValue$ is $nil$, and it is free to -generate and propose new block $Y$. Before proposing, it calls `PrepareProposal` for $Y$. After that, it -broadcasts the proposal, delivers it to itself, calls `ProcessProposal` and broadcasts $Prevote$ for it. -1. **Prevote:** Due to network asynchrony less than $2f+1$ processes receive the proposal on time and send -$Prevote$ for it. Therefore, $p$ does not update $validValue$ in this round. -1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this -block and send non-$nil$ $Precommit$ message. As a consequence, $p$ does not decide on $Y$. +1. **Propose:** Process $p$ is the proposer in this round. Its $validValue$ is $nil$, and it is free to +generate and propose new block $Y$. Before proposing, it calls `PrepareProposal` for $Y$. After that, it +broadcasts the proposal, delivers it to itself, calls `ProcessProposal` and broadcasts $Prevote$ for it. +1. **Prevote:** Due to network asynchrony less than $2f+1$ processes receive the proposal on time and send +$Prevote$ for it. Therefore, $p$ does not update $validValue$ in this round. +1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this +block and send non-$nil$ $Precommit$ message. As a consequence, $p$ does not decide on $Y$. -After this round, we can have multiple rounds like those in [Scenario 1](#scenario-1). The important thing -is that process $p$ should not update its $validValue$. Consequently, when process $p$ reaches the round -when it is again the proposer, it will ask the mempool for the new block again, and the mempool may return a -different block $Z$, and we can have the same round as [Round 0](#round-0-1) just for a different block. As -a result, process $p$ calls `PrepareProposal` again but for a different value. When it reaches round $r$ -some process will propose block $X$ and if $p$ receives $2f+1$ $Precommit$ messages, it will decide on this -value. +After this round, we can have multiple rounds like those in [Scenario 1](#scenario-1). The important thing +is that process $p$ should not update its $validValue$. Consequently, when process $p$ reaches the round +when it is again the proposer, it will ask the mempool for the new block again, and the mempool may return a +different block $Z$, and we can have the same round as [Round 0](#round-0-1) just for a different block. As +a result, process $p$ calls `PrepareProposal` again but for a different value. When it reaches round $r$ +some process will propose block $X$ and if $p$ receives $2f+1$ $Precommit$ messages, it will decide on this +value. -## Scenario 3 +## Scenario 3 -$p$ calls `PrepareProposal` and `ProcessProposal` for many values, but decides on a value for which it did +$p$ calls `PrepareProposal` and `ProcessProposal` for many values, but decides on a value for which it did not call `PrepareProposal` or `ProcessProposal`. -In this scenario, in all rounds before $r$ we can have any round presented in [Scenario 1](#scenario-1) or +In this scenario, in all rounds before $r$ we can have any round presented in [Scenario 1](#scenario-1) or [Scenario 2](#scenario-2). What is important is that: -- no proposer proposed block $X$ or if it did, process $p$, due to asynchrony, did not receive it in time, + +* no proposer proposed block $X$ or if it did, process $p$, due to asynchrony, did not receive it in time, so it did not call `ProcessProposal`, and -- if $p$ was the proposer it proposed some other value $\neq X$. +* if $p$ was the proposer it proposed some other value $\neq X$. -### Round $r$: +### Round $r$ -1. **Propose:** A correct process is the proposer in this round, and it proposes block $X$. +1. **Propose:** A correct process is the proposer in this round, and it proposes block $X$. Due to asynchrony, the proposal message arrives to process $p$ after its $timeoutPropose$ expires and it sends $Prevote$ for $nil$. Consequently, process $p$ does not call `ProcessProposal` for block $X$. However, the same proposal arrives at other processes before their $timeoutPropose$ expires, and they send $Prevote$ for this proposal. -1. **Prevote:** Process $p$ receives $2f+1$ $Prevote$ messages for proposal $X$, updates correspondingly its -$validValue$ and $lockedValue$ and sends $Precommit$ message. All correct processes do the same. -1. **Precommit:** Finally, process $p$ receives $2f+1$ $Precommit$ messages, and decides on block $X$. +1. **Prevote:** Process $p$ receives $2f+1$ $Prevote$ messages for proposal $X$, updates correspondingly its +$validValue$ and $lockedValue$ and sends $Precommit$ message. All correct processes do the same. +1. **Precommit:** Finally, process $p$ receives $2f+1$ $Precommit$ messages, and decides on block $X$. ## Scenario 4 -[Scenario 3](#scenario-3) can be translated into a scenario where $p$ does not call `PrepareProposal` and -`ProcessProposal` at all. For this, it is necessary that process $p$ is not the proposer in any of the -rounds $0 <= r' <= r$ and that due to network asynchrony or Byzantine proposer, it does not receive the -proposal before $timeoutPropose$ expires. As a result, it will enter round $r$ without calling -`PrepareProposal` and `ProcessProposal` before it, and as shown in Round $r$ of [Scenario 3](#scenario-3) it +[Scenario 3](#scenario-3) can be translated into a scenario where $p$ does not call `PrepareProposal` and +`ProcessProposal` at all. For this, it is necessary that process $p$ is not the proposer in any of the +rounds $0 <= r' <= r$ and that due to network asynchrony or Byzantine proposer, it does not receive the +proposal before $timeoutPropose$ expires. As a result, it will enter round $r$ without calling +`PrepareProposal` and `ProcessProposal` before it, and as shown in Round $r$ of [Scenario 3](#scenario-3) it will decide in this round. Again without calling any of these two calls. - diff --git a/spec/abci/abci++_methods.md b/spec/abci/abci++_methods.md index a50f04653c0..d9d6eb9ea08 100644 --- a/spec/abci/abci++_methods.md +++ b/spec/abci/abci++_methods.md @@ -14,7 +14,7 @@ title: Methods * **Response**: * `Message (string)`: The input string * **Usage**: - * Echo a string to test an abci client/server implementation + * Echo a string to test an ABCI client/server implementation ### Flush @@ -29,22 +29,22 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |---------------|--------|------------------------------------------|--------------| + | Name | Type | Description | Field Number | + |---------------|--------|----------------------------------------|--------------| | version | string | The CometBFT software semantic version | 1 | - | block_version | uint64 | The CometBFT Block Protocol version | 2 | - | p2p_version | uint64 | The CometBFT P2P Protocol version | 3 | + | block_version | uint64 | The CometBFT Block version | 2 | + | p2p_version | uint64 | The CometBFT P2P version | 3 | | abci_version | string | The CometBFT ABCI semantic version | 4 | * **Response**: - | Name | Type | Description | Field Number | - |---------------------|--------|-----------------------------------------------------|--------------| - | data | string | Some arbitrary information | 1 | - | version | string | The application software semantic version | 2 | - | app_version | uint64 | The application protocol version | 3 | - | last_block_height | int64 | Latest height for which the app persisted its state | 4 | - | last_block_app_hash | bytes | Latest AppHash returned by `Commit` | 5 | + | Name | Type | Description | Field Number | Deterministic | + |---------------------|--------|-----------------------------------------------------|--------------|---------------| + | data | string | Some arbitrary information | 1 | N/A | + | version | string | The application software semantic version | 2 | N/A | + | app_version | uint64 | The application version | 3 | N/A | + | last_block_height | int64 | Latest height for which the app persisted its state | 4 | N/A | + | last_block_app_hash | bytes | Latest AppHash returned by `FinalizeBlock` | 5 | N/A | * **Usage**: * Return information about the application state. @@ -71,11 +71,11 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |------------------|----------------------------------------------|--------------------------------------------------|--------------| - | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | - | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | - | app_hash | bytes | Initial application hash. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |------------------|----------------------------------------------|--------------------------------------------------|--------------|---------------| + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | Yes | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | Yes | + | app_hash | bytes | Initial application hash. | 3 | Yes | * **Usage**: * Called once upon genesis. @@ -93,26 +93,26 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | data | bytes | Raw query bytes. Can be used with or in lieu of Path. | 1 | - | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | - | height | int64 | The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 3 | - | prove | bool | Return Merkle proof with response if possible | 4 | + | Name | Type | Description | Field Number | + |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | data | bytes | Request parameters for the application to interpret analogously to a [URI query component](https://www.rfc-editor.org/rfc/rfc3986#section-3.4). Can be used with or in lieu of `path`. | 1 | + | path | string | A request path for the application to interpret analogously to a [URI path component](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) in e.g. routing. Can be used with or in lieu of `data`. Applications MUST interpret "/store" or any path starting with "/store/" as a query by key on the underlying store, in which case a key SHOULD be specified in `data`. Applications SHOULD allow queries over specific types like `/accounts/...` or `/votes/...`. | 2 | + | height | int64 | The block height against which to query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1. | 3 | + | prove | bool | Return Merkle proof with response if possible. | 4 | * **Response**: - | Name | Type | Description | Field Number | - |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | - | info | string | Additional information. **May be non-deterministic.** | 4 | - | index | int64 | The index of the key in the tree. | 5 | - | key | bytes | The key of the matching data. | 6 | - | value | bytes | The value of the matching data. | 7 | - | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | - | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | - | codespace | string | Namespace for the `code`. | 10 | + | Name | Type | Description | Field Number | Deterministic | + |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | N/A | + | log | string | The output of the application's logger. | 3 | N/A | + | info | string | Additional information. | 4 | N/A | + | index | int64 | The index of the key in the tree. | 5 | N/A | + | key | bytes | The key of the matching data. | 6 | N/A | + | value | bytes | The value of the matching data. | 7 | N/A | + | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | N/A | + | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | N/A | + | codespace | string | Namespace for the `code`. | 10 | N/A | * **Usage**: * Query for data from the application at current or past height. @@ -124,21 +124,23 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | tx | bytes | The request transaction bytes | 1 | - | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | + | Name | Type | Description | Field Number | + |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | tx | bytes | The request transaction bytes | 1 | + | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | * **Response**: - | Name | Type | Description | Field Number | - |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | data | bytes | Result bytes, if any. | 2 | - | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | - | codespace | string | Namespace for the `code`. | 8 | - | sender | string | The transaction's sender (e.g. the signer) | 9 | - | priority | int64 | The transaction's priority (for mempool ordering) | 10 | + | Name | Type | Description | Field Number | Deterministic | + |------------|---------------------------------------------------|----------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | N/A | + | data | bytes | Result bytes, if any. | 2 | N/A | + | log | string | The output of the application's logger. | 3 | N/A | + | info | string | Additional information. | 4 | N/A | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | N/A | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | N/A | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | N/A | + | codespace | string | Namespace for the `code`. | 8 | N/A | * **Usage**: @@ -159,16 +161,13 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|-------|------------------------------------|--------------| - Commit signals the application to persist application state. It takes no parameters. * **Response**: - | Name | Type | Description | Field Number | - |---------------|-------|------------------------------------------------------------------------|--------------| - | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | + | Name | Type | Description | Field Number | Deterministic | + |---------------|-------|------------------------------------------------------------------------|--------------|---------------| + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | No | * **Usage**: @@ -183,16 +182,13 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|-------|------------------------------------|--------------| - Empty request asking the application for a list of snapshots. * **Response**: - | Name | Type | Description | Field Number | - |-----------|--------------------------------|--------------------------------|--------------| - | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |-----------|--------------------------------|--------------------------------|--------------|---------------| + | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | N/A | * **Usage**: * Used during state sync to discover available snapshots on peers. @@ -210,9 +206,9 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |-------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | chunk | bytes | The binary chunk contents, in an arbitrary format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |-------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | chunk | bytes | The binary chunk contents, in an arbitrary format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | N/A | * **Usage**: * Used during state sync to retrieve snapshot chunks from peers. @@ -228,9 +224,9 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |--------|-------------------|-----------------------------------|--------------| - | result | [Result](#result) | The result of the snapshot offer. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-------------------|-----------------------------------|--------------|---------------| + | result | [Result](#result) | The result of the snapshot offer. | 1 | N/A | #### Result @@ -255,25 +251,25 @@ title: Methods can be spoofed by adversaries, so applications should employ additional verification schemes to avoid denial-of-service attacks. The verified `AppHash` is automatically checked against the restored application at the end of snapshot restoration. - * For more information, see the `Snapshot` data type or the [state sync section](../p2p/messages/state-sync.md). + * For more information, see the `Snapshot` data type or the [state sync section](../p2p/legacy-docs/messages/state-sync.md). ### ApplySnapshotChunk * **Request**: - | Name | Type | Description | Field Number | - |--------|--------|-----------------------------------------------------------------------------|--------------| + | Name | Type | Description | Field Number | + |--------|--------|---------------------------------------------------------------------------|--------------| | index | uint32 | The chunk index, starting from `0`. CometBFT applies chunks sequentially. | 1 | - | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | - | sender | string | The P2P ID of the node who sent this chunk. | 3 | + | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | + | sender | string | The P2P ID of the node who sent this chunk. | 3 | * **Response**: - | Name | Type | Description | Field Number | - |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | result | Result (see below) | The result of applying this chunk. | 1 | - | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | - | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | result | Result (see below) | The result of applying this chunk. | 1 | N/A | + | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | N/A | + | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | N/A | ```proto enum Result { @@ -299,7 +295,7 @@ title: Methods peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. The application should be prepared to reset and accept it or abort as appropriate. -## New methods introduced in ABCI++ +## New methods introduced in ABCI 2.0 ### PrepareProposal @@ -311,7 +307,7 @@ title: Methods |----------------------|-------------------------------------------------|-----------------------------------------------------------------------------------------------|--------------| | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 | | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 | - | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from CometBFT's data structures. | 3 | + | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from CometBFT's data structures. | 3 | | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved. | 4 | | height | int64 | The height of the block that will be proposed. | 5 | | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that that will be proposed. | 6 | @@ -320,16 +316,17 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------|--------------| - | txs | repeated bytes | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | + | Name | Type | Description | Field Number | Deterministic | + |------|----------------|---------------------------------------------------------------------------------------------|--------------|---------------| + | txs | repeated bytes | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | No | * **Usage**: * `RequestPrepareProposal`'s parameters `txs`, `misbehavior`, `height`, `time`, `next_validators_hash`, and `proposer_address` are the same as in `RequestProcessProposal` and `RequestFinalizeBlock`. - * `RequestPrepareProposal.local_last_commit` is a set of the precommit votes that allowed the - decision of the previous block, together with their corresponding vote extensions. + * `RequestPrepareProposal.local_last_commit` is a set of the precommit votes for the previous + height, including the ones that led to the decision of the previous block, + together with their corresponding vote extensions. * The `height`, `time`, and `proposer_address` values match the values from the header of the proposed block. * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that CometBFT @@ -355,12 +352,16 @@ title: Methods traceability, it is its responsibility's to support it. For instance, the Application could attach to a transformed transaction a list with the hashes of the transactions it derives from. - * CometBFT MAY include a list of transactions in `RequestPrepareProposal.txs` whose total - size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. + * The Application MAY configure CometBFT to include a list of transactions in `RequestPrepareProposal.txs` + whose total size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. + If the Application sets `ConsensusParams.Block.MaxBytes` to -1, CometBFT + will include _all_ transactions currently in the mempool in `RequestPrepareProposal.txs`, + which may not fit in `RequestPrepareProposal.max_tx_bytes`. Therefore, if the size of `RequestPrepareProposal.txs` is greater than `RequestPrepareProposal.max_tx_bytes`, the Application MUST remove transactions to ensure that the `RequestPrepareProposal.max_tx_bytes` limit is respected by those transactions - returned in `ResponsePrepareProposal.txs` . + returned in `ResponsePrepareProposal.txs`. + This is specified in [Requirement 2](./abci%2B%2B_app_requirements.md). * As a result of executing the prepared proposal, the Application may produce block events or transaction events. The Application must keep those events until a block is decided and then pass them on to CometBFT via `ResponseFinalizeBlock`. @@ -372,12 +373,11 @@ title: Methods --> * If CometBFT fails to validate the `ResponsePrepareProposal`, CometBFT will assume the Application is faulty and crash. - * The implementation of `PrepareProposal` can be non-deterministic. + * The implementation of `PrepareProposal` MAY be non-deterministic. #### When does CometBFT call "PrepareProposal" ? - When a validator _p_ enters consensus round _r_, height _h_, in which _p_ is the proposer, and _p_'s _validValue_ is `nil`: @@ -390,8 +390,7 @@ and _p_'s _validValue_ is `nil`: returns from the call. 3. The Application uses the information received (transactions, commit info, misbehavior, time) to (potentially) modify the proposal. - * the Application MAY fully execute the block and produce a candidate state — immediate - execution + * the Application MAY fully execute the block and produce a candidate state (immediate execution) * the Application can manipulate transactions: * leave transactions untouched * add new transactions (not present initially) to the proposal @@ -400,6 +399,9 @@ and _p_'s _validValue_ is `nil`: * modify transactions (e.g. aggregate them). As explained above, this compromises client traceability, unless it is implemented at the Application level. * reorder transactions - the Application reorders transactions in the list + * the Application MAY use the vote extensions in the commit info to modify the proposal, in which case it is suggested + that extensions be validated in the same maner as done in `VerifyVoteExtension`, since extensions of votes included + in the commit info after the minimum of +2/3 had been reached are not verified. 4. The Application includes the transaction list (whether modified or not) in the return parameters (see the rules in section _Usage_), and returns from the call. 5. _p_ uses the (possibly) modified block as _p_'s proposal in round _r_, height _h_. @@ -426,9 +428,9 @@ the consensus algorithm will use it as proposal and will not call `RequestPrepar * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------|--------------| - | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-----------------------------------|------------------------------------------------------------------|--------------|---------------| + | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | Yes | * **Usage**: * Contains all information on the proposed block needed to fully execute it. @@ -436,14 +438,16 @@ the consensus algorithm will use it as proposal and will not call `RequestPrepar `RequestFinalizeBlock`. * However, any resulting state changes must be kept as _candidate state_, and the Application should be ready to discard it in case another block is decided. - * `RequestProcessProposal` is also called at the proposer of a round. The reason for this is to - inform the Application of the block header's hash, which cannot be done at `PrepareProposal` - time. In this case, the call to `RequestProcessProposal` occurs right after the call to - `RequestPrepareProposal`. + * `RequestProcessProposal` is also called at the proposer of a round. + Normally the call to `RequestProcessProposal` occurs right after the call to `RequestPrepareProposal` and + `RequestProcessProposal` matches the block produced based on `ResponsePrepareProposal` (i.e., + `RequestPrepareProposal.txs` equals `RequestProcessProposal.txs`). + However, no such guarantee is made since, in the presence of failures, `RequestProcessProposal` may match + `ResponsePrepareProposal` from an earlier invocation or `ProcessProposal` may not be invoked at all. * The height and time values match the values from the header of the proposed block. * If `ResponseProcessProposal.status` is `REJECT`, consensus assumes the proposal received is not valid. - * The Application MAY fully execute the block — immediate execution + * The Application MAY fully execute the block (immediate execution) * The implementation of `ProcessProposal` MUST be deterministic. Moreover, the value of `ResponseProcessProposal.status` MUST **exclusively** depend on the parameters passed in the call to `RequestProcessProposal`, and the last committed Application state @@ -484,23 +488,29 @@ When a node _p_ enters consensus round _r_, height _h_, in which _q_ is the prop * **Request**: - | Name | Type | Description | Field Number | - |--------|-------|-------------------------------------------------------------------------------|--------------| - | hash | bytes | The header hash of the proposed block that the vote extension is to refer to. | 1 | - | height | int64 | Height of the proposed block (for sanity check). | 2 | + | Name | Type | Description | Field Number | + |----------------------|-------------------------------------------------|-------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The header hash of the proposed block that the vote extension is to refer to. | 1 | + | height | int64 | Height of the proposed block (for sanity check). | 2 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the proposed block (that the extension is to refer to). | 3 | + | txs | repeated bytes | List of transactions of the block that the extension is to refer to. | 4 | + | proposed_last_commit | [CommitInfo](#commitinfo) | Info about the last proposed block's last commit. | 5 | + | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved contained in the proposed block. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set contained in the proposed block. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | * **Response**: - | Name | Type | Description | Field Number | - |-------------------|-------|---------------------------------------------------------|--------------| - | vote_extension | bytes | Information signed by by CometBFT. Can have 0 length. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |----------------|-------|-------------------------------------------------------|--------------|---------------| + | vote_extension | bytes | Information signed by by CometBFT. Can have 0 length. | 1 | No | * **Usage**: * `ResponseExtendVote.vote_extension` is application-generated information that will be signed by CometBFT and attached to the Precommit message. * The Application may choose to use an empty vote extension (0 length). - * `RequestExtendVote.hash` corresponds to the hash of a proposed block that was made available - to the Application in a previous call to `ProcessProposal` for the current height. + * The contents of `RequestExtendVote` correspond to the proposed block on which the consensus algorithm + will send the Precommit message. * `ResponseExtendVote.vote_extension` will only be attached to a non-`nil` Precommit message. If the consensus algorithm is to precommit `nil`, it will not call `RequestExtendVote`. * The Application logic that creates the extension can be non-deterministic. @@ -515,7 +525,7 @@ When a validator _p_ is in consensus state _prevote_ of round _r_, height _h_, i then _p_ locks _v_ and sends a Precommit message in the following way 1. _p_ sets _lockedValue_ and _validValue_ to _v_, and sets _lockedRound_ and _validRound_ to _r_ -2. _p_'s CometBFT calls `RequestExtendVote` with _id(v)_ (`RequestExtendVote.hash`). The call is synchronous. +2. _p_'s CometBFT calls `RequestExtendVote` with _v_ (`RequestExtendVote`). The call is synchronous. 3. The Application returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by the consensus algorithm. 4. _p_ sets `ResponseExtendVote.extension` as the value of the `extension` field of type [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension), @@ -542,13 +552,13 @@ a [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension) fi | hash | bytes | The hash of the proposed block that the vote extension refers to. | 1 | | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension. | 2 | | height | int64 | Height of the block (for sanity check). | 3 | - | vote_extension | bytes | Application-specific information signed by CometBFT. Can have 0 length. | 4 | + | vote_extension | bytes | Application-specific information signed by CometBFT. Can have 0 length. | 4 | * **Response**: - | Name | Type | Description | Field Number | - |--------|-------------------------------|----------------------------------------------------------------|--------------| - | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-------------------------------|----------------------------------------------------------------|--------------|---------------| + | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | Yes | * **Usage**: * `RequestVerifyVoteExtension.vote_extension` can be an empty byte array. The Application's @@ -584,6 +594,12 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): structure in calls to `RequestPrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. * `REJECT`, _p_ will deem the Precommit message invalid and discard it. +When a node _p_ is in consensus round _0_, height _h_, and _p_ receives a Precommit +message for CommitRound _r_, height _h-1_ from validator _q_ (_q_ ≠ _p_), _p_ +MAY add the Precommit message and associated extension to [ExtendedCommitInfo](#extendedcommitinfo) +without calling `RequestVerifyVoteExtension` to verify it. + + ### FinalizeBlock #### Parameters and Types @@ -603,18 +619,18 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| - | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing | 1 | - | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | - | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | - | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 4 | - | app_hash | bytes | The Merkle root hash of the application state. | 5 | + | Name | Type | Description | Field Number | Deterministic | + |-------------------------|---------------------------------------------------|----------------------------------------------------------------------------------|--------------|---------------| + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing | 1 | No | + | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | Yes | + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | Yes | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 4 | Yes | + | app_hash | bytes | The Merkle root hash of the application state. | 5 | Yes | * **Usage**: * Contains the fields of the newly decided block. * This method is equivalent to the call sequence `BeginBlock`, [`DeliverTx`], - and `EndBlock` in the previous version of ABCI. + and `EndBlock` in ABCI 1.0. * The height and time values match the values from the header of the proposed block. * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.misbehavior` to determine rewards and punishments for the validators. @@ -650,6 +666,8 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): making the Application's state evolve in the context of state machine replication. * Currently, CometBFT will fill up all fields in `RequestFinalizeBlock`, even if they were already passed on to the Application via `RequestPrepareProposal` or `RequestProcessProposal`. + * When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. #### When does CometBFT call `FinalizeBlock`? @@ -684,14 +702,14 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |---------|-------|---------------------------------------------------------------------|--------------| - | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | - | power | int64 | Voting power of the validator | 3 | + | Name | Type | Description | Field Number | + |---------|-------|------------------------------------------------------------|--------------| + | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | + | power | int64 | Voting power of the validator | 3 | * **Usage**: * Validator identified by address - * Used as part of VoteInfo within `CommitInfo` (used in `ProcessProposal` and `FinalizeBlock`), + * Used as part of VoteInfo within `CommitInfo` (used in `ProcessProposal` and `FinalizeBlock`), and `ExtendedCommitInfo` (used in `PrepareProposal`). * Does not include PubKey to avoid sending potentially large quantum pubkeys over the ABCI @@ -700,10 +718,10 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |---------|--------------------------------------------------|-------------------------------|--------------| - | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | - | power | int64 | Voting power of the validator | 2 | + | Name | Type | Description | Field Number | Deterministic | + |---------|--------------------------------------------------|-------------------------------|--------------|---------------| + | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | Yes | + | power | int64 | Voting power of the validator | 2 | Yes | * **Usage**: * Validator identified by PubKey @@ -713,13 +731,13 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |--------------------|-------------------------------------------------|------------------------------------------------------------------------------|--------------| - | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | - | validator | [Validator](#validator) | The offending validator | 2 | - | height | int64 | Height when the offense occurred | 3 | - | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that was committed at height `height` | 4 | - | total_voting_power | int64 | Total voting power of the validator set at height `height` | 5 | + | Name | Type | Description | Field Number | + |--------------------|-------------------------------------------------|--------------------------------------------------------------|--------------| + | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that was committed at height `height` | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `height` | 5 | #### MisbehaviorType @@ -737,45 +755,45 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| - | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | - | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | - | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | - | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + | Name | Type | Description | Field Number | Deterministic | + |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------|---------------| + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | Yes | + | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | Yes | + | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | Yes | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | Yes | ### ProofOps * **Fields**: - | Name | Type | Description | Field Number | - |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | N/A | ### ProofOp * **Fields**: - | Name | Type | Description | Field Number | - |------|--------|------------------------------------------------|--------------| - | type | string | Type of Merkle proof and how it's encoded. | 1 | - | key | bytes | Key in the Merkle tree that this proof is for. | 2 | - | data | bytes | Encoded Merkle proof for the key. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |------|--------|------------------------------------------------|--------------|---------------| + | type | string | Type of Merkle proof and how it's encoded. | 1 | N/A | + | key | bytes | Key in the Merkle tree that this proof is for. | 2 | N/A | + | data | bytes | Encoded Merkle proof for the key. | 3 | N/A | ### Snapshot * **Fields**: - | Name | Type | Description | Field Number | - |----------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | - | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. CometBFT does not interpret this. | 2 | - | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | - | hash | bytes | An arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. CometBFT does not interpret the hash, it only compares them. | 4 | - | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | + | Name | Type | Description | Field Number | Deterministic | + |----------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | N/A | + | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. CometBFT does not interpret this. | 2 | N/A | + | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | N/A | + | hash | bytes | An arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. CometBFT does not interpret the hash, it only compares them. | 4 | N/A | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | N/A | * **Usage**: - * Used for state sync snapshots, see the [state sync section](../p2p/messages/state-sync.md) for details. + * Used for state sync snapshots, see the [state sync section](../p2p/legacy-docs/messages/state-sync.md) for details. * A snapshot is considered identical across nodes only if _all_ fields are equal (including `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. * When sent across the network, a snapshot message can be at most 4 MB. @@ -786,10 +804,10 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |-----------------------------|-------------------------|----------------------------------------------------------------|--------------| - | validator | [Validator](#validator) | The validator that sent the vote. | 1 | - | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | + | Name | Type | Description | Field Number | + |-------------------|-------------------------|---------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | * **Usage**: * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. @@ -819,6 +837,12 @@ Most of the data structures used in ABCI are shared [common data structures](../ | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | | votes | repeated [VoteInfo](#voteinfo) | List of validators' addresses in the last validator set with their voting information. | 2 | +* **Notes** + * The `VoteInfo` in `votes` are ordered by the voting power of the validators (descending order, highest to lowest voting power). + * CometBFT guarantees the `votes` ordering through its logic to update the validator set in which, in the end, the validators are sorted (descending) by their voting power. + * The ordering is also persisted when a validator set is saved in the store. + * The validator set is loaded from the store when building the `CommitInfo`, ensuring order is maintained from the persisted validator set. + ### ExtendedCommitInfo * **Fields**: @@ -828,20 +852,26 @@ Most of the data structures used in ABCI are shared [common data structures](../ | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | | votes | repeated [ExtendedVoteInfo](#extendedvoteinfo) | List of validators' addresses in the last validator set with their voting information, including vote extensions. | 2 | +* **Notes** + * The `ExtendedVoteInfo` in `votes` are ordered by the voting power of the validators (descending order, highest to lowest voting power). + * CometBFT guarantees the `votes` ordering through its logic to update the validator set in which, in the end, the validators are sorted (descending) by their voting power. + * The ordering is also persisted when a validator set is saved in the store. + * The validator set is loaded from the store when building the `ExtendedCommitInfo`, ensuring order is maintained from the persisted validator set. + ### ExecTxResult * **Fields**: - | Name | Type | Description | Field Number | - |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | data | bytes | Result bytes, if any. | 2 | - | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | - | info | string | Additional information. **May be non-deterministic.** | 4 | - | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | - | gas_used | int64 | Amount of gas consumed by transaction. | 6 | - | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | - | codespace | string | Namespace for the `code`. | 8 | + | Name | Type | Description | Field Number | Deterministic | + |------------|---------------------------------------------------|----------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | Yes | + | data | bytes | Result bytes, if any. | 2 | Yes | + | log | string | The output of the application's logger. | 3 | No | + | info | string | Additional information. | 4 | No | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | Yes | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | Yes | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | No | + | codespace | string | Namespace for the `code`. | 8 | Yes | ### ProposalStatus diff --git a/spec/consensus/readme.md b/spec/consensus/README.md similarity index 92% rename from spec/consensus/readme.md rename to spec/consensus/README.md index 9dbee537e11..edf2ee90d38 100644 --- a/spec/consensus/readme.md +++ b/spec/consensus/README.md @@ -20,7 +20,7 @@ Specification of the consensus protocol implemented in CometBFT. creates a block proposal for consensus - [Light Client Protocol](./light-client) - A protocol for light weight consensus verification and syncing to the latest state -- [Signing](./signing.md) - Rules for cryptographic signatures +- [Validator Signing](./signing.md) - Rules for cryptographic signatures produced by validators. - [Write Ahead Log](./wal.md) - Write ahead log used by the consensus state machine to recover from crashes. diff --git a/spec/consensus/consensus-paper/README.md b/spec/consensus/consensus-paper/README.md index 3c328ddd066..d3d71b763b2 100644 --- a/spec/consensus/consensus-paper/README.md +++ b/spec/consensus/consensus-paper/README.md @@ -1,11 +1,15 @@ +--- +order: 1 +--- + # Consensus Paper The repository contains the specification (and the proofs) of the Tendermint consensus protocol, adopted in CometBFT. -## How to install Latex on Mac OS +## How to install Latex on MacOS -MacTex is Latex distribution for Mac OS. You can download it [here](http://www.tug.org/mactex/mactex-download.html). +MacTex is Latex distribution for MacOS. You can download it [here](http://www.tug.org/mactex/mactex-download.html). Popular IDE for Latex-based projects is TexStudio. It can be downloaded [here](https://www.texstudio.org/). diff --git a/spec/consensus/consensus.md b/spec/consensus/consensus.md index 438dcf5068d..9fe30bf9e72 100644 --- a/spec/consensus/consensus.md +++ b/spec/consensus/consensus.md @@ -293,7 +293,7 @@ may make JSet verification/gossip logic easier to implement. ### Censorship Attacks Due to the definition of a block -[commit](https://github.com/cometbft/cometbft/blob/main/docs/core/validators.md), any 1/3+ coalition of +[commit](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/validators.md), any 1/3+ coalition of validators can halt the blockchain by not broadcasting their votes. Such a coalition can also censor particular transactions by rejecting blocks that include these transactions, though this would result in a diff --git a/spec/consensus/creating-proposal.md b/spec/consensus/creating-proposal.md index cb43c8ebb41..feeb8e59666 100644 --- a/spec/consensus/creating-proposal.md +++ b/spec/consensus/creating-proposal.md @@ -4,40 +4,58 @@ order: 2 # Creating a proposal A block consists of a header, transactions, votes (the commit), -and a list of evidence of malfeasance (ie. signing conflicting votes). +and a list of evidence of malfeasance (eg. signing conflicting votes). -We include no more than 1/10th of the maximum block size -(`ConsensusParams.Block.MaxBytes`) of evidence with each block. +Outstanding evidence items get priority over outstanding transactions in the mempool. +All in all, the block MUST NOT exceed `ConsensusParams.Block.MaxBytes`, +or 100MB if `ConsensusParams.Block.MaxBytes == -1`. ## Reaping transactions from the mempool When we reap transactions from the mempool, we calculate maximum data size by subtracting maximum header size (`MaxHeaderBytes`), the maximum -amino overhead for a block (`MaxAminoOverheadForBlock`), the size of +protobuf overhead for a block (`MaxOverheadForBlock`), the size of the last commit (if present) and evidence (if present). While reaping -we account for amino overhead for each transaction. +we account for protobuf overhead for each transaction. ```go -func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { - return maxBytes - +func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 { + return maxBytes - MaxOverheadForBlock - MaxHeaderBytes - - int64(valsCount)*MaxVoteBytes - - int64(evidenceCount)*MaxEvidenceBytes + MaxCommitBytes(valsCount) - + evidenceBytes } ``` +If `ConsensusParams.Block.MaxBytes == -1`, we reap *all* outstanding transactions from the mempool + +## Preparing the proposal + +Once the transactions have been reaped from the mempool according to the rules described above, +CometBFT calls `PrepareProposal` to the application with the transaction list that has just been reaped. +As part of this call the application can remove, add, or reorder transactions in the transaction list. + +The `RequestPrepareProposal` contains two important fields: + +* `MaxTxBytes`, which contains the value returned by `MaxDataBytes` described above. + The application MUST NOT return a list of transactions whose size exceeds this number. +* `Txs`, which contains the list of reaped transactions. + +For more details on `PrepareProposal`, please see the +[relevant part of the spec](../abci/abci%2B%2B_methods.md#prepareproposal) + ## Validating transactions in the mempool -Before we accept a transaction in the mempool, we check if it's size is no more +Before we accept a transaction in the mempool, we check if its size is no more than {MaxDataSize}. {MaxDataSize} is calculated using the same formula as -above, except we subtract the max number of evidence, {MaxNum} by the maximum size of evidence +above, except we assume there is no evidence. ```go -func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { - return maxBytes - - MaxOverheadForBlock - - MaxHeaderBytes - - (maxNumEvidence * MaxEvidenceBytes) +func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { + return maxBytes - + MaxOverheadForBlock - + MaxHeaderBytes - + MaxCommitBytes(valsCount) } ``` diff --git a/spec/consensus/evidence.md b/spec/consensus/evidence.md index b3f3de5c6a3..ad341f285f9 100644 --- a/spec/consensus/evidence.md +++ b/spec/consensus/evidence.md @@ -1,4 +1,5 @@ --- +order: 4 --- # Evidence diff --git a/spec/consensus/light-client/accountability.md b/spec/consensus/light-client/accountability.md index 569dadea9e3..3907e8d4723 100644 --- a/spec/consensus/light-client/accountability.md +++ b/spec/consensus/light-client/accountability.md @@ -1,3 +1,3 @@ # Fork accountability -Deprecated, please see [light-client/accountability](https://github.com/cometbft/cometbft/blob/main/spec/light-client/accountability). +Deprecated, please see [light-client/accountability](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/accountability). diff --git a/spec/consensus/light-client/detection.md b/spec/consensus/light-client/detection.md index f4f4ad3df80..9e70726c76e 100644 --- a/spec/consensus/light-client/detection.md +++ b/spec/consensus/light-client/detection.md @@ -1,3 +1,3 @@ # Detection -Deprecated, please see [light-client/detection](https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection). +Deprecated, please see [light-client/detection](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection). diff --git a/spec/consensus/light-client/verification.md b/spec/consensus/light-client/verification.md index f482ef21e3c..d0e2bf1e5c8 100644 --- a/spec/consensus/light-client/verification.md +++ b/spec/consensus/light-client/verification.md @@ -1,3 +1,3 @@ # Core Verification -Deprecated, please see [light-client/verification](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification). +Deprecated, please see [light-client/verification](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification). diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md index b42b3ab2f1f..ee8ca693d93 100644 --- a/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md @@ -77,10 +77,10 @@ function StartRound(round) { ```go upon timely(⟨PROPOSAL, h_p, round_p, (v,t), −1⟩) from proposer(h_p, round_p) while step_p = propose do { if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v,t)⟩ + broadcast ⟨PREVOTE, h_p, round_p, id(v,t)⟩ } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ } step_p ← prevote } @@ -96,7 +96,7 @@ This gives the following rule: #### **[PBTS-ALG-OLD-PREVOTE.0]** ```go -upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, vr, id((v, tvote)⟩ +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, vr, id((v, tvote)⟩ while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { broadcast ⟨PREVOTE, h_p, roundp, id(v, tprop)⟩ @@ -120,10 +120,10 @@ upon timely(⟨PROPOSAL, h_p, round_p, (v,t), ∗⟩) from proposer(h_p, round_p if step_p = prevote { lockedValue_p ← v lockedRound_p ← round_p - broadcast ⟨PRECOMMIT, h_p, round_p, id(v,t))⟩ + broadcast ⟨PRECOMMIT, h_p, round_p, id(v,t))⟩ step_p ← precommit } - validValue_p ← v + validValue_p ← v validRound_p ← round_p } ``` @@ -142,7 +142,7 @@ upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) AND 2f + 1 ⟨PREC if valid(v) { decision_p [h_p] = (v,t) // decide on time too h_p ← h_p + 1 - reset lockedRound_p , lockedValue_p, validRound_p and validValue_p to initial values and empty message log + reset lockedRound_p , lockedValue_p, validRound_p and validValue_p to initial values and empty message log StartRound(0) } } diff --git a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md index 8fee14252ee..06f9e8ea58e 100644 --- a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md @@ -12,7 +12,7 @@ Every correct validator `V` maintains a synchronized clock `C_V` that ensures: #### **[PBTS-CLOCK-PRECISION.0]** -There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, +There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, `|C_V(t) - C_W(t)| < PRECISION` @@ -53,7 +53,7 @@ A proposer proposes a pair `(v,t)` of consensus value `v` and time `t`. [Time-Validity] If a correct validator decides on `t` then `t` is "OK" (we will formalize this below), even if up to `2f` validators are faulty. -However, the properties of Tendermint consensus algorithm are of more interest with respect to the blocks, that is, what is written into a block and when. We therefore, in the following, will give the safety and liveness properties from this block-centric viewpoint. +However, the properties of Tendermint consensus algorithm are of more interest with respect to the blocks, that is, what is written into a block and when. We therefore, in the following, will give the safety and liveness properties from this block-centric viewpoint. For this, observe that the time `t` decided at consensus height `k` will be written in the block of height `k+1`, and will be supported by `2f + 1` `PRECOMMIT` messages of the same consensus round `r`. The time written in the block, we will denote by `b.time` (to distinguish it from the term `bfttime` used for median-based time). For this, it is important to have the following consensus algorithm property: #### **[PBTS-INV-TIME-AGR.0]** @@ -188,4 +188,4 @@ Back to [main document][main]. [arXiv]: https://arxiv.org/abs/1807.04938 -[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 +[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 diff --git a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md index bcb01d73640..f71d7ab808e 100644 --- a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md @@ -58,7 +58,7 @@ We assume that the field `proposal` in the `PROPOSE` message is a pair `(v, time In the reception step at node `p` at local time `now_p`, upon receiving a message `m`: -- **if** the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely`. +- **if** the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely`. (`PRECISION` and `MSGDELAY` being system parameters, see [below](#safety-and-liveness)) > after the presentation in the dev session, we realized that different semantics for the reception step is closer aligned to the implementation. Instead of dropping propose messages, we keep all of them, and mark timely ones. @@ -82,7 +82,7 @@ function StartRound(round) { step_p ← propose if proposer(h_p, round_p) = p { - + if validValue_p != nil { proposal ← validValue_p @@ -92,7 +92,7 @@ function StartRound(round) { } broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ } else { - schedule OnTimeoutPropose(h_p,round_p) to + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) } } @@ -111,14 +111,14 @@ function StartRound(round) { wait until now_p > block time of block h_p - 1 if validValue_p != nil { // add "now_p" - proposal ← (validValue_p, now_p) + proposal ← (validValue_p, now_p) } else { // add "now_p" - proposal ← (getValue(), now_p) + proposal ← (getValue(), now_p) } broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ } else { - schedule OnTimeoutPropose(h_p,round_p) to + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) } } @@ -140,12 +140,12 @@ function StartRound(round) { ```go -upon timely(⟨PROPOSAL, h_p, round_p, v, vr⟩) +upon timely(⟨PROPOSAL, h_p, round_p, v, vr⟩) from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ } else { broadcast ⟨PREVOTE, hp, round_p, nil⟩ @@ -158,9 +158,9 @@ while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { ```go -upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v, tvote)⟩ +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v, tvote)⟩ while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { // send hash of v and tprop in PREVOTE message @@ -187,15 +187,15 @@ upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) ```go -upon ⟨PROPOSAL, h_p, r, v, ∗⟩ from proposer(h_p, r) - AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v)⟩ +upon ⟨PROPOSAL, h_p, r, v, ∗⟩ from proposer(h_p, r) + AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v)⟩ while decisionp[h_p] = nil do { if valid(v) { decision_p [h_p] = v h_p ← h_p + 1 - reset lockedRound_p , lockedValue_p, validRound_p and - validValue_p to initial values and empty message log + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log StartRound(0) } } @@ -206,15 +206,15 @@ upon ⟨PROPOSAL, h_p, r, v, ∗⟩ from proposer(h_p, r) ```go -upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) +upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v,t)⟩ while decisionp[h_p] = nil do { if valid(v) { // decide on time too - decision_p [h_p] = (v,t) + decision_p [h_p] = (v,t) h_p ← h_p + 1 - reset lockedRound_p , lockedValue_p, validRound_p and - validValue_p to initial values and empty message log + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log StartRound(0) } } diff --git a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla index 0bf3d28142a..d8524540faa 100644 --- a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla +++ b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla @@ -3,7 +3,7 @@ A TLA+ specification of a simplified Tendermint consensus algorithm, with added clocks and proposer-based timestamps. This TLA+ specification extends and modifies the Tendermint TLA+ specification for fork accountability: - https://github.com/cometbft/cometbft/blob/main/spec/light-client/accountability/TendermintAcc_004_draft.tla + https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/accountability/TendermintAcc_004_draft.tla * Version 1. A preliminary specification. diff --git a/spec/consensus/proposer-selection.md b/spec/consensus/proposer-selection.md index f9f0ff4ace1..e5142bd3a97 100644 --- a/spec/consensus/proposer-selection.md +++ b/spec/consensus/proposer-selection.md @@ -179,7 +179,7 @@ In order to prevent this, when a new validator is added, its initial priority is where P is the total voting power of the set including V. -Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. +Current implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. If we consider the validator set where p3 has just been added: diff --git a/spec/consensus/signing.md b/spec/consensus/signing.md index 38afe35022b..68547eea25e 100644 --- a/spec/consensus/signing.md +++ b/spec/consensus/signing.md @@ -1,4 +1,5 @@ --- +order: 5 --- # Validator Signing diff --git a/spec/consensus/wal.md b/spec/consensus/wal.md index 9bff87f0888..599d63d3553 100644 --- a/spec/consensus/wal.md +++ b/spec/consensus/wal.md @@ -1,3 +1,6 @@ +--- +order: 6 +--- # WAL Consensus module writes every message to the WAL (write-ahead log). @@ -28,5 +31,5 @@ WAL. Then it will go to precommit, and that time it will work because the private validator contains the `LastSignBytes` and then we’ll replay the precommit from the WAL. -Make sure to read about [WAL corruption](https://github.com/cometbft/cometbft/blob/main/docs/core/running-in-production.md#wal-corruption) +Make sure to read about [WAL corruption](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/running-in-production.md#wal-corruption) and recovery strategies. diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index a5be15626b4..ecd449adc67 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -51,7 +51,7 @@ and a list of evidence of malfeasance (ie. signing conflicting votes). | Name | Type | Description | Validation | |--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header](#header) | Must adhere to the validation rules of [header](#header) | -| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to CometBFT. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#checktx). +| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to CometBFT. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci%2B%2B_methods.md#checktx). | Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply | | LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | @@ -121,9 +121,9 @@ the data in the current block, the previous block, and the results returned by t | Name | Type | Description | Validation | |-------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Version | [Version](#version) | Version defines the application and protocol version being used. | Must adhere to the validation rules of [Version](#version) | +| Version | [Version](#version) | Version defines the application and block versions being used. | Must adhere to the validation rules of [Version](#version) | | ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. | -| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | +| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | | Time | [Time](#time) | The timestamp is equal to the weighted median of validators present in the last commit. Read more on time in the [BFT-time section](../consensus/bft-time.md). Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. | Time must be >= previous header timestamp + consensus parameters TimeIotaMs. The timestamp of the first block must be equal to the genesis time (since there's no votes to compute the median). | | LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. | | LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 | @@ -131,9 +131,9 @@ the data in the current block, the previous block, and the results returned by t | ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | | NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | | ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | -| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | +| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | | LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | -| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 | +| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 | | ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | ## Version @@ -142,10 +142,10 @@ NOTE: that this is more specifically the consensus version and doesn't include i P2P Version. (TODO: we should write a comprehensive document about versioning that this can refer to) -| Name | type | Description | Validation | -|-------|--------|-----------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| -| Block | uint64 | This number represents the version of the block protocol and must be the same throughout an operational network | Must be equal to protocol version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | -| App | uint64 | App version is decided on by the application. Read [here](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_app_requirements.md) | `block.Version.App == state.Version.Consensus.App` | +| Name | type | Description | Validation | +|-------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Block | uint64 | This number represents the block version and must be the same throughout an operational network | Must be equal to block version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | +| App | uint64 | App version is decided on by the application. Read [here](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci++_app_requirements.md) | `block.Version.App == state.Version.Consensus.App` | ## BlockID @@ -156,7 +156,7 @@ The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` inc | Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 | | PartSetHeader | [PartSetHeader](#partsetheader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#partsetheader) | -See [MerkleRoot](./encoding.md#MerkleRoot) for details. +See [MerkleRoot](./encoding.md#merkleroot) for details. ## PartSetHeader @@ -225,7 +225,7 @@ to reconstruct the vote set given the validator set. | Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 | NOTE: `ValidatorAddress` and `Timestamp` fields may be removed in the future -(see [ADR-25](https://github.com/cometbft/cometbft/blob/main/docs/architecture/adr-025-commit.md)). +(see [ADR-25](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/architecture/tendermint-core/adr-025-commit.md)). ## ExtendedCommitSig @@ -394,7 +394,7 @@ in the same round of the same height. Votes are lexicographically sorted on `Blo `LightClientAttackEvidence` is a generalized evidence that captures all forms of known attacks on a light client such that a full node can verify, propose and commit the evidence on-chain for punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation -and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the_misbehavior_of_faulty_validators) +and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the-misbehavior-of-faulty-validators) | Name | Type | Description | Validation | |----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| @@ -476,7 +476,7 @@ func SumTruncated(bz []byte) []byte { | Name | Type | Description | Field Number | |--------------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 | -| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 | +| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://vitalik.ca/general/2017/12/31/pos_faq.html#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 | | max_bytes | int64 | maximum size in bytes of total evidence allowed to be entered into a block | 3 | ### ValidatorParams diff --git a/spec/core/state.md b/spec/core/state.md index 1dbb020001c..c19d0096ca1 100644 --- a/spec/core/state.md +++ b/spec/core/state.md @@ -104,6 +104,12 @@ The total size of a block is limited in bytes by the `ConsensusParams.Block.MaxB Proposed blocks must be less than this size, and will be considered invalid otherwise. +The Application may set `ConsensusParams.Block.MaxBytes` to -1. +In that case, the actual block limit is set to 100 MB, +and CometBFT will provide all transactions in the mempool as part of `PrepareProposal`. +The application has to be careful to return a list of transactions in `ResponsePrepareProposal` +whose size is less than or equal to `RequestPrepareProposal.MaxTxBytes`. + Blocks should additionally be limited by the amount of "gas" consumed by the transactions in the block, though this is not yet implemented. diff --git a/spec/light-client/README.md b/spec/light-client/README.md index 82f8f3045d5..46dbe1792f6 100644 --- a/spec/light-client/README.md +++ b/spec/light-client/README.md @@ -10,7 +10,7 @@ parent: This directory contains work-in-progress English and TLA+ specifications for the Light Client protocol. Implementations of the light client can be found in [Rust](https://github.com/informalsystems/tendermint-rs/tree/master/light-client) and -[Go](https://github.com/cometbft/cometbft/tree/main/light). +[Go](https://github.com/cometbft/cometbft/tree/v0.38.x/light). Light clients are assumed to be initialized once from a trusted source with a trusted header and validator set. The light client @@ -31,8 +31,8 @@ In case a lightclient attack is detected, the lightclient submits evidence to a The [English specification](verification/verification_001_published.md) describes the light client commit verification problem in terms of the temporal properties -[LCV-DIST-SAFE.1](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) and -[LCV-DIST-LIVE.1](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-live1). +[LCV-DIST-SAFE.1](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) and +[LCV-DIST-LIVE.1](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-live1). Commit verification is assumed to operate within the Cosmos Failure Model, where +2/3 of validators are correct for some time period and validator sets can change arbitrarily at each height. diff --git a/spec/light-client/accountability/README.md b/spec/light-client/accountability/README.md index 64b475bec71..07179dd6bbf 100644 --- a/spec/light-client/accountability/README.md +++ b/spec/light-client/accountability/README.md @@ -103,7 +103,7 @@ F3 is similar to F1, except that no two correct validators decide on different b In addition, without creating a fork on the main chain, light clients can be contaminated by more than a third of validators that are faulty and sign a forged header F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Light), as they trust a header that is signed by at least one correct validator (trusting period method). -The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a light client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the light client. +The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a light client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the light client. F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). @@ -207,10 +207,6 @@ Execution: *Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. -Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: - -. - If a light client is attacked using this attack with 1/3 or more of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the light client trusts deviates from the one on the main chain. #### Scenario 4: More than 2/3 of faults @@ -231,10 +227,9 @@ Execution Consequences: -* The validators in F1 will be detectable by the the fork accountability mechanisms. +* The validators in F1 will be detectable by the fork accountability mechanisms. * The validators in F2 cannot be detected using this mechanism. -Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. -* This case is not covered by the report as it only assumes at most 2/3 of faulty validators. +Only in case they signed something which conflicts with the application this can be used against them. Otherwise, they do not do anything incorrect. **Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires a different mechanism that would require as an evidence a sequence of blocks that led to that state. This might be very tricky to implement. @@ -291,7 +286,7 @@ Execution: Consequences: * To detect this, a node needs to see both, the forged header and the canonical header from the chain. -* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. +* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. **Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time it is eclipsed. @@ -305,6 +300,6 @@ punishing the 1/3+ lunatic cabal, that is the root cause of the attack. Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack light clients. Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by -referring to the block before the one in which height happen. +referring to the block before the one in which height happen. **Q:** can we say that in this case a validator declines to check if a proposed value is valid before voting for it? diff --git a/spec/light-client/attacks/Isolation_001_draft.tla b/spec/light-client/attacks/Isolation_001_draft.tla index 940b5ac1187..6e1b0b8a5bb 100644 --- a/spec/light-client/attacks/Isolation_001_draft.tla +++ b/spec/light-client/attacks/Isolation_001_draft.tla @@ -7,7 +7,7 @@ * * It follows the English specification: * - * https://github.com/cometbft/cometbft/blob/main/spec/light-client/attacks/isolate-attackers_001_draft.md + * https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/attacks/isolate-attackers_001_draft.md * * The assumptions made in this specification: * diff --git a/spec/light-client/attacks/isolate-attackers_001_draft.md b/spec/light-client/attacks/isolate-attackers_001_draft.md index a130dfb6fbf..e66920a3c5e 100644 --- a/spec/light-client/attacks/isolate-attackers_001_draft.md +++ b/spec/light-client/attacks/isolate-attackers_001_draft.md @@ -28,7 +28,7 @@ This specification considers how a full node in a Cosmos blockchain can isolate # Part I - Basics -For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1), cf. [Light Client Verification][verification]. +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1), cf. [Light Client Verification][verification]. # Part II - Definition of the Problem @@ -191,7 +191,7 @@ The main function `isolateMisbehavingProcesses` distinguishes three kinds of wro The question is whether this captures all attacks. First observe that the first checking in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [FN-NONVALID-OUTPUT] evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also it is sufficient to consider two different valid consensus values, that is, binary consensus. -**TODO** we have analyzed Tendermint consensus algorithm with TLA+ and have accompanied Galois in an independent study of the protocol based on [Ivy proofs](https://github.com/cometbft/cometbft/tree/main/spec/ivy-proofs). +**TODO** we have analyzed Tendermint consensus algorithm with TLA+ and have accompanied Galois in an independent study of the protocol based on [Ivy proofs](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/ivy-proofs). # References @@ -202,22 +202,22 @@ First observe that the first checking in `isolateMisbehavingProcesses` is `viola [[detection]] The specification of the light client attack detection mechanism. [supervisor]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/supervisor/supervisor_001_draft.md +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/supervisor/supervisor_001_draft.md -[verification]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md +[verification]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md [detection]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md [LC-DATA-EVIDENCE-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#lc-data-evidence1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#lc-data-evidence1 [CMBC-LC-EVIDENCE-DATA-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-evidence-data1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-evidence-data1 [node-based-attack-characterization]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#block-based-characterization-of-attacks +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#block-based-characterization-of-attacks -[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 +[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 -[LCV-FUNC-VALID.link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-func-valid2 +[LCV-FUNC-VALID.link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/isolate-attackers_002_reviewed.md b/spec/light-client/attacks/isolate-attackers_002_reviewed.md index ae5f99afaad..e83a4ca32a4 100644 --- a/spec/light-client/attacks/isolate-attackers_002_reviewed.md +++ b/spec/light-client/attacks/isolate-attackers_002_reviewed.md @@ -26,7 +26,7 @@ After providing the [problem statement](#Part-I---Basics-and-Definition-of-the-P # Part I - Basics and Definition of the Problem -For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1), we refer to the specification of [Light Client Verification][verification]. +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1), we refer to the specification of [Light Client Verification][verification]. The specification of the [detection mechanism][detection] describes @@ -189,7 +189,7 @@ The main function `isolateMisbehavingProcesses` distinguishes three kinds of wro The question is whether this captures all attacks. First observe that the first check in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [[LCAI-NONVALID-OUTPUT.1]](#LCAI-FUNC-NONVALID1]) evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence, after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also, as we have two different blocks for the same height, it is sufficient to consider two different valid consensus values, that is, binary consensus. -For this fixed group membership, we have analyzed the attacks using the TLA+ specification of [Tendermint Consensus in TLA+][tendermint-accountability]. We checked that indeed the only possible scenarios that can lead to violation of agreement are **equivocation** and **amnesia**. An independent study by Galois of the protocol based on [Ivy proofs](https://github.com/cometbft/cometbft/tree/main/spec/ivy-proofs) led to the same conclusion. +For this fixed group membership, we have analyzed the attacks using the TLA+ specification of [Tendermint Consensus in TLA+][tendermint-accountability]. We checked that indeed the only possible scenarios that can lead to violation of agreement are **equivocation** and **amnesia**. An independent study by Galois of the protocol based on [Ivy proofs](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/ivy-proofs) led to the same conclusion. # References @@ -201,25 +201,25 @@ For this fixed group membership, we have analyzed the attacks using the TLA+ spe [tendermint-accountability]: -https://github.com/cometbft/cometbft/tree/main/spec/light-client/accountability +https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/accountability [supervisor]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/supervisor/supervisor_001_draft.md +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/supervisor/supervisor_001_draft.md -[verification]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md +[verification]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md [detection]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md [LC-DATA-EVIDENCE-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#lc-data-evidence1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#lc-data-evidence1 [CMBC-LC-EVIDENCE-DATA-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-evidence-data1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-evidence-data1 [node-based-attack-characterization]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#block-based-characterization-of-attacks +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#block-based-characterization-of-attacks -[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 +[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 -[LCV-FUNC-VALID.link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-func-valid2 +[LCV-FUNC-VALID.link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/notes-on-evidence-handling.md b/spec/light-client/attacks/notes-on-evidence-handling.md index 242df12c23d..3d61efc7646 100644 --- a/spec/light-client/attacks/notes-on-evidence-handling.md +++ b/spec/light-client/attacks/notes-on-evidence-handling.md @@ -19,7 +19,7 @@ detects an attack, it needs to send to a witness only missing data (common heigh and conflicting light block) as it has its trace. Keeping light client attack data of constant size saves bandwidth and reduces an attack surface. As we will explain below, although in the context of light client core -[verification](https://github.com/cometbft/cometbft/tree/main/spec/light-client/verification) +[verification](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/verification) the roles of primary and witness are clearly defined, in case of the attack, we run the same attack detection procedure twice where the roles are swapped. The rationale is that the light client does not know what peer is correct (on a right main branch) @@ -68,7 +68,7 @@ The following invariant holds for the primary trace: ### Witness with a conflicting header The verified header at height `h` is cross-checked with every witness as part of -[detection](https://github.com/cometbft/cometbft/tree/main/spec/light-client/detection). +[detection](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/detection). If a witness returns the conflicting header at the height `h` the following procedure is executed to verify if the conflicting header comes from the valid trace and if that's the case to create an attack evidence: diff --git a/spec/light-client/detection/LCDetector_003_draft.tla b/spec/light-client/detection/LCDetector_003_draft.tla index cdc492b3661..ecc52ee32f4 100644 --- a/spec/light-client/detection/LCDetector_003_draft.tla +++ b/spec/light-client/detection/LCDetector_003_draft.tla @@ -3,7 +3,7 @@ * This is a specification of the light client detector module. * It follows the English specification: * - * https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md + * https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md * * The assumptions made in this specification: * diff --git a/spec/light-client/detection/detection_001_reviewed.md b/spec/light-client/detection/detection_001_reviewed.md index faa89d53aa8..8505fc5f4d2 100644 --- a/spec/light-client/detection/detection_001_reviewed.md +++ b/spec/light-client/detection/detection_001_reviewed.md @@ -767,19 +767,19 @@ Once a bogus block is recognized as such the secondary is removed. [[supervisor]] The specification of the light client supervisor. -[verification]: https://github.com/cometbft/cometbft/tree/main/spec/light-client/verification +[verification]: https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/verification -[supervisor]: https://github.com/cometbft/cometbft/tree/main/spec/light-client/supervisor +[supervisor]: https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/supervisor [CMBC-VAL-CONTAINS-CORR-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-val-contains-corr1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-val-contains-corr1 [fetch]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-func-fetch1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-func-fetch1 [LCV-INV-TP1-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-inv-tp1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-inv-tp1 diff --git a/spec/light-client/detection/detection_003_reviewed.md b/spec/light-client/detection/detection_003_reviewed.md index d5c73a0556a..2024598bf21 100644 --- a/spec/light-client/detection/detection_003_reviewed.md +++ b/spec/light-client/detection/detection_003_reviewed.md @@ -803,37 +803,37 @@ Once a bogus block is recognized as such the secondary is removed. [[supervisor]] The specification of the light client supervisor. -[verification]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md +[verification]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md -[supervisor]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/supervisor/supervisor_001_draft.md +[supervisor]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/supervisor/supervisor_001_draft.md -[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 +[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 -[CMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-sound-distr-poss-commit1 +[CMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-sound-distr-poss-commit1 -[LCV-SEQ-SAFE-link]:https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-seq-safe1 +[LCV-SEQ-SAFE-link]:https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-seq-safe1 [CMBC-VAL-CONTAINS-CORR-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-val-contains-corr1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-val-contains-corr1 [fetch]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-func-fetch1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-func-fetch1 [LCV-INV-TP1-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-inv-tp1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-inv-tp1 [LCV-LB-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-data-lightblock1 [LCV-LS-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-data-lightstore2 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-data-lightstore2 [LVC-HD-link]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-header-fields2 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#cmbc-header-fields2 [repl]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/supervisor/supervisor_001_draft.md#lc-func-replace-secondary1 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/supervisor/supervisor_001_draft.md#lc-func-replace-secondary1 [vtt]: -https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#lcv-func-main2 +https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_002_draft.md#lcv-func-main2 diff --git a/spec/light-client/supervisor/supervisor_001_draft.md b/spec/light-client/supervisor/supervisor_001_draft.md index 28ea997c750..66758a85eaa 100644 --- a/spec/light-client/supervisor/supervisor_001_draft.md +++ b/spec/light-client/supervisor/supervisor_001_draft.md @@ -307,7 +307,7 @@ type LCInitData struct { where only one of the components must be provided. `GenesisDoc` is defined in the [CometBFT -Types](https://github.com/cometbft/cometbft/blob/main/types/genesis.go). +Types](https://github.com/cometbft/cometbft/blob/v0.38.x/types/genesis.go). #### **[LC-DATA-GENESIS.1]** diff --git a/spec/light-client/supervisor/supervisor_002_draft.md b/spec/light-client/supervisor/supervisor_002_draft.md index 230872116a4..a9b6890d3c6 100644 --- a/spec/light-client/supervisor/supervisor_002_draft.md +++ b/spec/light-client/supervisor/supervisor_002_draft.md @@ -17,7 +17,7 @@ type LCInitData struct { where only one of the components must be provided. `GenesisDoc` is defined in the [CometBFT -Types](https://github.com/cometbft/cometbft/blob/main/types/genesis.go). +Types](https://github.com/cometbft/cometbft/blob/v0.38.x/types/genesis.go). ### Initialization @@ -45,8 +45,8 @@ able to verify anything. Cross-checking this trusted block with providers upon initialization is helpful for ensuring that the node is responsive and correctly configured but does not increase trust since proving a conflicting block is a -[light client attack](https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-attack1) -and not just a [bogus](https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_003_reviewed.md#cmbc-bogus1) block could result in +[light client attack](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#cmbc-lc-attack1) +and not just a [bogus](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_003_reviewed.md#cmbc-bogus1) block could result in performing backwards verification beyond the trusted period, thus a fruitless endeavour. diff --git a/spec/light-client/verification/verification_001_published.md b/spec/light-client/verification/verification_001_published.md index 45123d1a29a..5d60fb04276 100644 --- a/spec/light-client/verification/verification_001_published.md +++ b/spec/light-client/verification/verification_001_published.md @@ -406,9 +406,9 @@ Each instance must eventually terminate. > These definitions imply that if the primary is faulty, a header may or > may not be added to *LightStore*. In any case, -> [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) must hold. -> The invariant [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) and the liveness -> requirement [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-life1) +> [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) must hold. +> The invariant [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) and the liveness +> requirement [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-life1) > allow that verified headers are added to *LightStore* whose > height was not passed > to the verifier (e.g., intermediate headers used in bisection; see below). @@ -425,16 +425,16 @@ Each instance must eventually terminate. This specification provides a partial solution to the sequential specification. The *Verifier* solves the invariant of the sequential part -[**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) => [**[LCV-SEQ-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-seq-inv) +[**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) => [**[LCV-SEQ-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-seq-inv) In the case the primary is correct, and there is a recent header in *LightStore*, the verifier satisfies the liveness requirements. ⋀ *primary is correct* ⋀ always ∃ verified header in LightStore. *header.Time* > *now* - *trustingPeriod* -⋀ [**[LCV-A-Comm.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-a-comm) ⋀ ( +⋀ [**[LCV-A-Comm.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-a-comm) ⋀ ( ( [**[CMBC-CorrFull.1]**][CMBC-CorrFull-link] ⋀ - [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-live1) ) - ⟹ [**[LCV-SEQ-LIVE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-seq-live1) + [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-live1) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-seq-live1) ) # Part IV - Light Client Verification Protocol @@ -767,7 +767,7 @@ func VerifyToTarget(primary PeerID, lightStore LightStore, - Error conditions - if the precondition is violated - if `ValidAndVerified` or `FetchLightBlock` report an error - - if [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) is violated + - if [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) is violated ### Details of the Functions @@ -854,7 +854,7 @@ func Schedule(lightStore, nextHeight, targetHeight) Height *trustedStore* is implemented by the light blocks in lightStore that have the state *StateVerified*. -#### Argument for [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe) +#### Argument for [**[LCV-DIST-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe) - `ValidAndVerified` implements the soundness checks and the checks [**[CMBC-VAL-CONTAINS-CORR.1]**][CMBC-VAL-CONTAINS-CORR-link] and @@ -863,7 +863,7 @@ have the state *StateVerified*. - Only if `ValidAndVerified` returns with `SUCCESS`, the state of a light block is set to *StateVerified*. -#### Argument for [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-life) +#### Argument for [**[LCV-DIST-LIVE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-life) - If *primary* is correct, - `FetchLightBlock` will always return a light block consistent @@ -871,7 +871,7 @@ have the state *StateVerified*. - `ValidAndVerified` either verifies the header using the trusting period or falls back to sequential verification - - If [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) holds, eventually every + - If [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) holds, eventually every header will be verified and core verification **terminates successfully**. - successful termination depends on the age of *lightStore.LatestVerified* (for instance, initially on the age of *trustedHeader*) and the @@ -887,7 +887,7 @@ have the state *StateVerified*. ## Liveness Scenarios -The liveness argument above assumes [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) +The liveness argument above assumes [**[LCV-INV-TP.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-inv-tp1) which requires that there is a header that does not expire before the target height is reached. Here we discuss scenarios to ensure this. @@ -1150,7 +1150,7 @@ func Main (primary PeerID, lightStore LightStore, targetHeight Height) [RPC]: https://docs.cometbft.com/v0.34/rpc/ -[block]: https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md +[block]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/data_structures.md [CMBC-SEQ-link]: #cmbc-seq1 [CMBC-CorrFull-link]: #cmbc-corr-full1 @@ -1161,8 +1161,8 @@ func Main (primary PeerID, lightStore LightStore, targetHeight Height) [CMBC-VAL-COMMIT-link]: #cmbc-val-commit1 [lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md -[fork-detector]: https://github.com/cometbft/cometbft/tree/main/spec/light-client/detection -[fullnode]: https://github.com/cometbft/cometbft/blob/main/spec/blockchain +[fork-detector]: https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/detection +[fullnode]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/blockchain [ibc-rs]:https://github.com/informalsystems/ibc-rs diff --git a/spec/light-client/verification/verification_002_draft.md b/spec/light-client/verification/verification_002_draft.md index 4ae3731f825..9bbccba8f75 100644 --- a/spec/light-client/verification/verification_002_draft.md +++ b/spec/light-client/verification/verification_002_draft.md @@ -420,9 +420,9 @@ must eventually terminate. > These definitions imply that if the primary is faulty, a header may or > may not be added to *LightStore*. In any case, -> [**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) must hold. -> The invariant [**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) and the liveness -> requirement [**[LCV-DIST-LIVE.2]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-life) +> [**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) must hold. +> The invariant [**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) and the liveness +> requirement [**[LCV-DIST-LIVE.2]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-life) > allow that verified headers are added to *LightStore* whose > height was not passed > to the verifier (e.g., intermediate headers used in bisection; see below). @@ -439,16 +439,16 @@ must eventually terminate. This specification provides a partial solution to the sequential specification. The *Verifier* solves the invariant of the sequential part -[**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) => [**[LCV-SEQ-SAFE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) +[**[LCV-DIST-SAFE.2]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe2) => [**[LCV-SEQ-SAFE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) In the case the primary is correct, and *root* is a recent header in *LightStore*, the verifier satisfies the liveness requirements. ⋀ *primary is correct* ⋀ *root.header.Time* > *now* - *trustingPeriod* -⋀ [**[LCV-A-Comm.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-a-comm) ⋀ ( +⋀ [**[LCV-A-Comm.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-a-comm) ⋀ ( ( [**[CMBC-CorrFull.1]**][CMBC-CorrFull-link] ⋀ - [**[LCV-DIST-LIVE.2]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-live2) ) - ⟹ [**[LCV-SEQ-LIVE.1]**](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_001_published.md#lcv-dist-live1) + [**[LCV-DIST-LIVE.2]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-live2) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-live1) ) # Part IV - Light Client Verification Protocol @@ -612,7 +612,7 @@ func (ls LightStore) TraceTo(lightBlock LightBlock) (LightBlock, LightStore) - returns a **trusted** lightblock `root` from the lightstore with a height less than `lightBlock` - returns a lightstore that contains lightblocks that constitute a - [verification trace](https://github.com/cometbft/cometbft/tree/main/spec/light-client/detection) from + [verification trace](https://github.com/cometbft/cometbft/tree/v0.38.x/spec/light-client/detection) from `root` to `lightBlock` (including `lightBlock`) ### Inputs @@ -1035,7 +1035,7 @@ func Backwards (primary PeerID, root LightBlock, targetHeight Height) [RPC]: https://docs.cometbft.com/v0.34/rpc/ -[block]: https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md +[block]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/core/data_structures.md [CMBC-HEADER-link]: #cmbc-header1 [CMBC-SEQ-link]: #cmbc-seq1 @@ -1047,8 +1047,8 @@ func Backwards (primary PeerID, root LightBlock, targetHeight Height) [CMBC-VAL-COMMIT-link]: #cmbc-val-commit1 [lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md -[attack-detector]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/detection/detection_001_reviewed.md -[fullnode]: https://github.com/cometbft/cometbft/tree/main/spec/core +[attack-detector]: https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/detection/detection_001_reviewed.md +[fullnode]: https://github.com/cometbft/cometbft/tree/v0.38.x/spec/core [ibc-rs]:https://github.com/informalsystems/ibc-rs diff --git a/spec/p2p/README.md b/spec/p2p/README.md new file mode 100644 index 00000000000..29efd8ecadf --- /dev/null +++ b/spec/p2p/README.md @@ -0,0 +1,46 @@ +--- +order: 1 +parent: + title: P2P + order: 6 +--- + +# Peer-to-Peer Communication + +A CometBFT network is composed of multiple CometBFT instances, hereafter called +`nodes`, that interact by exchanging messages. + +The CometBFT protocols are designed under the assumption of a partially-connected network model. +This means that a node is not assumed to be directly connected to every other +node in the network. +Instead, each node is directly connected to only a subset of other nodes, +hereafter called its `peers`. + +The peer-to-peer (p2p) communication layer is then the component of CometBFT that: + +1. establishes connections between nodes in a CometBFT network +2. manages the communication between a node and the connected peers +3. intermediates the exchange of messages between peers in CometBFT protocols + +The specification the p2p layer is a work in progress, +tracked by [issue #19](https://github.com/cometbft/cometbft/issues/19). +The current content is organized as follows: + +- [`implementation`](./implementation/README.md): documents the current state + of the implementation of the p2p layer, covering the main components of the + `p2p` package. The documentation covers, in a fairly comprehensive way, + the items 1. and 2. from the list above. +- [`reactor-api`](./reactor-api/README.md): specifies the API offered by the + p2p layer to the protocol layer, through the `Reactor` abstraction. + This is a high-level specification (i.e., it should not be implementation-specific) + of the p2p layer API, covering item 3. from the list above. +- [`legacy-docs`](./legacy-docs/): We keep older documentation in + the `legacy-docs` directory, as overall, it contains useful information. + However, part of this content is redundant, + being more comprehensively covered in more recent documents, + and some implementation details might be outdated + (see [issue #981](https://github.com/cometbft/cometbft/issues/981)). + +In addition to this content, some unfinished, work in progress, and auxiliary +material can be found in the +[knowledge-base](https://github.com/cometbft/knowledge-base/tree/main/p2p) repository. diff --git a/spec/p2p/images/p2p-reactors.png b/spec/p2p/images/p2p-reactors.png new file mode 100644 index 00000000000..5515976c10c Binary files /dev/null and b/spec/p2p/images/p2p-reactors.png differ diff --git a/spec/p2p/v0.34/img/p2p_state.png b/spec/p2p/images/p2p_state.png similarity index 100% rename from spec/p2p/v0.34/img/p2p_state.png rename to spec/p2p/images/p2p_state.png diff --git a/spec/p2p/implementation/README.md b/spec/p2p/implementation/README.md new file mode 100644 index 00000000000..9011536b9b8 --- /dev/null +++ b/spec/p2p/implementation/README.md @@ -0,0 +1,43 @@ +--- +order: 1 +title: Implementation +--- + +# Implementation of the p2p layer + +This section documents the implementation of the peer-to-peer (p2p) +communication layer in CometBFT. + +The documentation was [produced](https://github.com/tendermint/tendermint/pull/9348) +using the `v0.34.*` releases +and the branch [`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x) +of this repository as reference. +As there were no substancial changes in the p2p implementation, the +documentation also applies to the releases `v0.37.*` and `v0.38.*` [^v35]. + +[^v35]: The releases `v0.35.*` and `v0.36.*`, which included a major + refactoring of the p2p layer implementation, were [discontinued][v35postmorten]. + +[v35postmorten]: https://interchain-io.medium.com/discontinuing-tendermint-v0-35-a-postmortem-on-the-new-networking-layer-3696c811dabc + +## Contents + +The documentation follows the organization of the +[`p2p` package](https://github.com/cometbft/cometbft/tree/v0.34.x/p2p), +which implements the following abstractions: + +- [Transport](./transport.md): establishes secure and authenticated + connections with peers; +- [Switch](./switch.md): responsible for dialing peers and accepting + connections from peers, for managing established connections, and for + routing messages between the reactors and peers, + that is, between local and remote instances of the CometBFT protocols; +- [PEX Reactor](./pex.md): due to the several roles of this component, the + documentation is split in several parts: + - [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer addresses, thus implementing a peer discovery service; + - [Address Book](./addressbook.md): stores discovered peer addresses and + quality metrics associated to peers with which the node has interacted; + - [Peer Manager](./peer_manager.md): defines when and to which peers a node + should dial, in order to establish outbound connections; +- [Types](./types.md) and [Configuration](./configuration.md) provide a list of + existing types and configuration parameters used by the p2p package. diff --git a/spec/p2p/v0.34/addressbook.md b/spec/p2p/implementation/addressbook.md similarity index 99% rename from spec/p2p/v0.34/addressbook.md rename to spec/p2p/implementation/addressbook.md index 8fd2cc3a2c8..26b9504214b 100644 --- a/spec/p2p/v0.34/addressbook.md +++ b/spec/p2p/implementation/addressbook.md @@ -303,7 +303,7 @@ The `MarkBad` method marks a peer as bad and bans it for a period of time. This method is only invoked within the PEX reactor, with a banning time of 24 hours, for the following reasons: -- A peer misbehaves in the [PEX protocol](pex-protocol.md#misbehavior) +- A peer misbehaves in the [PEX protocol](./pex-protocol.md#misbehavior) - When the `maxAttemptsToDial` limit (`16`) is reached for a peer - If an `ErrSwitchAuthenticationFailure` error is returned when dialing a peer diff --git a/spec/p2p/v0.34/configuration.md b/spec/p2p/implementation/configuration.md similarity index 61% rename from spec/p2p/v0.34/configuration.md rename to spec/p2p/implementation/configuration.md index 53ac3183db5..9f172c22c81 100644 --- a/spec/p2p/v0.34/configuration.md +++ b/spec/p2p/implementation/configuration.md @@ -6,41 +6,39 @@ This document contains configurable parameters a node operator can use to tune t | --- | --- | ---| | ListenAddress | "tcp://0.0.0.0:26656" | Address to listen for incoming connections (0.0.0.0:0 means any interface, any port) | | ExternalAddress | "" | Address to advertise to peers for them to dial | -| [Seeds](pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )| -| [Persistent peers](peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) | -| UPNP | false | UPNP port forwarding enabled | -| [AddrBook](addressbook.md) | defaultAddrBookPath | Path do address book | +| [Seeds](./pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )| +| [Persistent peers](./peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) | +| [AddrBook](./addressbook.md) | defaultAddrBookPath | Path do address book | | AddrBookStrict | true | Set true for strict address routability rules and false for private or local networks | -| [MaxNumInboundPeers](switch.md#accepting-peers) | 40 | Maximum number of inbound peers | -| [MaxNumOutboundPeers](peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers | -| [UnconditionalPeers](switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. | +| [MaxNumInboundPeers](./switch.md#accepting-peers) | 40 | Maximum number of inbound peers | +| [MaxNumOutboundPeers](./peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers | +| [UnconditionalPeers](./switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. | | PersistentPeersMaxDialPeriod| 0 * time.Second | Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) | | FlushThrottleTimeout |100 * time.Millisecond| Time to wait before flushing messages out on the connection | | MaxPacketMsgPayloadSize | 1024 | Maximum size of a message packet payload, in bytes | | SendRate | 5120000 (5 mB/s) | Rate at which packets can be sent, in bytes/second | | RecvRate | 5120000 (5 mB/s) | Rate at which packets can be received, in bytes/second| -| [PexReactor](pex.md) | true | Set true to enable the peer-exchange reactor | +| [PexReactor](./pex.md) | true | Set true to enable the peer-exchange reactor | | SeedMode | false | Seed mode, in which node constantly crawls the network and looks for. Does not work if the peer-exchange reactor is disabled. | | PrivatePeerIDs | empty | Comma separated list of peer IDsthat we do not add to the address book or gossip to other peers. They stay private to us. | | AllowDuplicateIP | false | Toggle to disable guard against peers connecting from the same ip.| -| [HandshakeTimeout](transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers | -| [DialTimeout](switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer | +| [HandshakeTimeout](./transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers | +| [DialTimeout](./switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer | These parameters can be set using the `$CMTHOME/config/config.toml` file. A subset of them can also be changed via command line using the following command line flags: -| Parameter | Flag| Example| -| --- | --- | ---| +| Parameter | Flag | Example | +| --- | --- | --- | | Listen address| `p2p.laddr` | "tcp://0.0.0.0:26656" | | Seed nodes | `p2p.seeds` | `--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` | | Persistent peers | `p2p.persistent_peers` | `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` | | Unconditional peers | `p2p.unconditional_peer_ids` | `--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | - | UPNP | `p2p.upnp` | `--p2p.upnp` | - | PexReactor | `p2p.pex` | `--p2p.pex` | - | Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` | - | Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | +| PexReactor | `p2p.pex` | `--p2p.pex` | +| Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` | +| Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | - **Note on persistent peers** + **Note on persistent peers** If `persistent_peers_max_dial_period` is set greater than zero, the pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` diff --git a/spec/p2p/v0.34/peer_manager.md b/spec/p2p/implementation/peer_manager.md similarity index 87% rename from spec/p2p/v0.34/peer_manager.md rename to spec/p2p/implementation/peer_manager.md index ba2e75ef323..5dfc14b2fc0 100644 --- a/spec/p2p/v0.34/peer_manager.md +++ b/spec/p2p/implementation/peer_manager.md @@ -85,16 +85,9 @@ To avoid this delay, which can be particularly relevant when the node has no peers, a node immediately attempts to dial peer addresses when they are received from a peer that is locally configured as a seed node. -> FIXME: The current logic was introduced in [#3762](https://github.com/tendermint/tendermint/pull/3762). -> Although it fix the issue, the delay between receiving an address and dialing -> the peer, it does not impose and limit on how many addresses are dialed in this -> scenario. -> So, all addresses received from a seed node are dialed, regardless of the -> current number of outbound peers, the number of dialing routines, or the -> `MaxNumOutboundPeers` parameter. -> -> Issue [#9548](https://github.com/tendermint/tendermint/issues/9548) was -> created to handle this situation. +> This was implemented in a rough way, leading to inconsistencies described in +> this [issue](https://github.com/cometbft/cometbft/issues/486), +> fixed by this [PR](https://github.com/cometbft/cometbft/pull/3360). ### First round @@ -124,19 +117,19 @@ This is not done in the p2p package, but it is part of the procedure to set up a The picture below is a first attempt of illustrating the life cycle of an outbound peer: - + A peer can be in the following states: - Candidate peers: peer addresses stored in the address boook, that can be retrieved via the [`PickAddress`](./addressbook.md#pick-address) method -- [Dialing](switch.md#dialing-peers): peer addresses that are currently being +- [Dialing](./switch.md#dialing-peers): peer addresses that are currently being dialed. This state exists to ensure that a single dialing routine exist per peer. -- [Reconnecting](switch.md#reconnect-to-peer): persistent peers to which a node +- [Reconnecting](./switch.md#reconnect-to-peer): persistent peers to which a node is currently reconnecting, as a previous connection attempt has failed. - Connected peers: peers that a node has successfully dialed, added as outbound peers. -- [Bad peers](addressbook.md#bad-peers): peers marked as bad in the address - book due to exhibited [misbehavior](pex-protocol.md#misbehavior). +- [Bad peers](./addressbook.md#bad-peers): peers marked as bad in the address + book due to exhibited [misbehavior](./pex-protocol.md#misbehavior). Peers can be reinstated after being marked as bad. ## Pending of documentation diff --git a/spec/p2p/v0.34/pex-protocol.md b/spec/p2p/implementation/pex-protocol.md similarity index 98% rename from spec/p2p/v0.34/pex-protocol.md rename to spec/p2p/implementation/pex-protocol.md index ed88993026c..760a56bd9dc 100644 --- a/spec/p2p/v0.34/pex-protocol.md +++ b/spec/p2p/implementation/pex-protocol.md @@ -78,7 +78,7 @@ Sending a PEX response to a peer that has not requested peer addresses is also considered a misbehavior. So, if a PEX response is received from a peer that is not registered in the `requestsSent` set, a `ErrUnsolicitedList` error is produced. -This leads the peer to be disconnected and [marked as a bad peer](addressbook.md#bad-peers). +This leads the peer to be disconnected and [marked as a bad peer](./addressbook.md#bad-peers). ## Providing Addresses @@ -102,7 +102,7 @@ The `receiveRequest` method is responsible for verifying this condition. The node keeps a `lastReceivedRequests` map with the time of the last PEX request received from every peer. If the interval between successive requests is less than the minimum accepted -one, the peer is disconnected and [marked as a bad peer](addressbook.md#bad-peers). +one, the peer is disconnected and [marked as a bad peer](./addressbook.md#bad-peers). An exception is made for the first two PEX requests received from a peer. > The probably reason is that, when a new peer is added, the two conditions for @@ -150,7 +150,7 @@ peers, the seed node sends a PEX request. Dialing a selected peer address can fail for multiple reasons. The seed node might have attempted to dial the peer too many times. -In this case, the peer address is marked as [bad in the address book](addressbook.md#bad-peers). +In this case, the peer address is marked as [bad in the address book](./addressbook.md#bad-peers). The seed node might have attempted to dial the peer recently, without success, and the exponential `backoffDuration` has not yet passed. Or the current connection attempt might fail, which is registered in the address book. diff --git a/spec/p2p/v0.34/pex.md b/spec/p2p/implementation/pex.md similarity index 100% rename from spec/p2p/v0.34/pex.md rename to spec/p2p/implementation/pex.md diff --git a/spec/p2p/v0.34/switch.md b/spec/p2p/implementation/switch.md similarity index 95% rename from spec/p2p/v0.34/switch.md rename to spec/p2p/implementation/switch.md index 1d50108f2b9..4497fef96e2 100644 --- a/spec/p2p/v0.34/switch.md +++ b/spec/p2p/implementation/switch.md @@ -50,11 +50,11 @@ The `DialPeersAsync` method receives a list of peer addresses (strings) and dials all of them in parallel. It is invoked in two situations: -- In the [setup](https://github.com/cometbft/cometbft/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L985) of a node, to establish connections with every configured - persistent peer +- In the [setup](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L987) +of a node, to establish connections with every configured persistent peer - In the RPC package, to implement two unsafe RPC commands, not used in production: - [`DialSeeds`](https://github.com/cometbft/cometbft/blob/29c5a062d23aaef653f11195db55c45cd9e02715/rpc/core/net.go#L47) and - [`DialPeers`](https://github.com/cometbft/cometbft/blob/29c5a062d23aaef653f11195db55c45cd9e02715/rpc/core/net.go#L87) + [`DialSeeds`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L47) and + [`DialPeers`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L87) The received list of peer addresses to dial is parsed into `NetAddress` instances. In case of parsing errors, the method returns. An exception is made for diff --git a/spec/p2p/v0.34/transport.md b/spec/p2p/implementation/transport.md similarity index 95% rename from spec/p2p/v0.34/transport.md rename to spec/p2p/implementation/transport.md index 457fa1e1cbb..20d4db87a43 100644 --- a/spec/p2p/v0.34/transport.md +++ b/spec/p2p/implementation/transport.md @@ -43,9 +43,9 @@ The `NetAddress` method exports the listen address configured for the transport. The maximum number of simultaneous incoming connections accepted by the listener is bound to `MaxNumInboundPeer` plus the configured number of unconditional peers, using the `MultiplexTransportMaxIncomingConnections` option, -in the node [initialization](https://github.com/cometbft/cometbft/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L563). +in the node [initialization](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L563). -This method is called when a node is [started](https://github.com/cometbft/cometbft/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L972). +This method is called when a node is [started](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L974). In case of errors, the `acceptPeers` routine is not started and the error is returned. ## Accept @@ -191,7 +191,7 @@ an `ErrRejected` error with reason `isIncompatible` is returned. The `Close` method closes the TCP listener created by the `Listen` method, and sends a signal for interrupting the `acceptPeers` routine. -This method is called when a node is [stopped](https://github.com/cometbft/cometbft/blob/46badfabd9d5491c78283a0ecdeb695e21785508/node/node.go#L1019). +This method is called when a node is [stopped](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L1023). ## Cleanup @@ -216,7 +216,7 @@ For this reason, this method is not invoked with a started transport. > Note that the default list of supported channel IDs, including the default reactors, > is provided to the transport as its original `NodeInfo` record. -[peer-sts]: https://github.com/cometbft/cometbft/blob/main/spec/p2p/peer.md#authenticated-encryption-handshake -[peer-handshake]:https://github.com/cometbft/cometbft/blob/main/spec/p2p/peer.md#cometbft-version-handshake +[peer-sts]: ../legacy-docs/peer.md#authenticated-encryption-handshake +[peer-handshake]: ../legacy-docs/peer.md#cometbft-version-handshake [sts-paper]: https://link.springer.com/article/10.1007/BF00124891 [sts-paper-pdf]: https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf diff --git a/spec/p2p/v0.34/types.md b/spec/p2p/implementation/types.md similarity index 97% rename from spec/p2p/v0.34/types.md rename to spec/p2p/implementation/types.md index 6d71da03fb7..cef2632936b 100644 --- a/spec/p2p/v0.34/types.md +++ b/spec/p2p/implementation/types.md @@ -231,9 +231,3 @@ Go documentation of `Metric` type: > // See cometbft/docs/architecture/adr-006-trust-metric.md for details Not imported by any other CometBFT source file. - -## Package `p2p.upnp` - -This package implementation was taken from "taipei-torrent". - -It is used by the `probe-upnp` command of the CometBFT binary. diff --git a/spec/p2p/legacy-docs/README.md b/spec/p2p/legacy-docs/README.md new file mode 100644 index 00000000000..5206ccbefd4 --- /dev/null +++ b/spec/p2p/legacy-docs/README.md @@ -0,0 +1,16 @@ +--- +order: 1 +title: Legacy Docs +--- + +# Legacy Docs + +This section contains useful information. However, part of this content is redundant, being more comprehensively covered +in more recent documents, and some implementation details might be outdated +(see issue [#981](https://github.com/cometbft/cometbft/issues/981)). + +- [Messages](./messages) +- [P2P Config](./config.md) +- [P2P Multiplex Connection](./connection.md) +- [Peer Discovery](./node.md) +- [Peers](./peer.md) diff --git a/spec/p2p/config.md b/spec/p2p/legacy-docs/config.md similarity index 83% rename from spec/p2p/config.md rename to spec/p2p/legacy-docs/config.md index 4b191e821a4..34383e62c9f 100644 --- a/spec/p2p/config.md +++ b/spec/p2p/legacy-docs/config.md @@ -1,3 +1,7 @@ +--- +order: 1 +--- + # P2P Config Here we describe configuration options around the Peer Exchange. @@ -17,14 +21,6 @@ and upon incoming connection shares some peers and disconnects. Dials these seeds when we need more peers. They should return a list of peers and then disconnect. If we already have enough peers in the address book, we may never need to dial them. -## Bootstrap Peers - -`--p2p.bootstrap_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` - -A list of peers to be added to the addressbook upon startup to ensure that the node has some peers to initially dial. -Unlike persistent peers, these addresses don't have any extra privileges. The node may not necessarily connect on redial -these peers. - ## Persistent Peers `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` diff --git a/spec/p2p/connection.md b/spec/p2p/legacy-docs/connection.md similarity index 99% rename from spec/p2p/connection.md rename to spec/p2p/legacy-docs/connection.md index 158d9d4fa5b..eb255a44153 100644 --- a/spec/p2p/connection.md +++ b/spec/p2p/legacy-docs/connection.md @@ -1,3 +1,7 @@ +--- +order: 1 +--- + # P2P Multiplex Connection ## MConnection diff --git a/spec/p2p/messages/README.md b/spec/p2p/legacy-docs/messages/README.md similarity index 100% rename from spec/p2p/messages/README.md rename to spec/p2p/legacy-docs/messages/README.md diff --git a/spec/p2p/messages/block-sync.md b/spec/p2p/legacy-docs/messages/block-sync.md similarity index 92% rename from spec/p2p/messages/block-sync.md rename to spec/p2p/legacy-docs/messages/block-sync.md index b00c3062c57..49afcc417ed 100644 --- a/spec/p2p/messages/block-sync.md +++ b/spec/p2p/legacy-docs/messages/block-sync.md @@ -39,8 +39,8 @@ It also contains an extended commit _iff_ vote extensions are enabled at the blo | Name | Type | Description | Field Number | |-----------|----------------------------------------------------------------|---------------------------------|--------------| -| Block | [Block](../../core/data_structures.md#block) | Requested Block | 1 | -| ExtCommit | [ExtendedCommit](../../core/data_structures.md#extendedcommit) | Sender's LastCommit information | 2 | +| Block | [Block](../../../core/data_structures.md#block) | Requested Block | 1 | +| ExtCommit | [ExtendedCommit](../../../core/data_structures.md#extendedcommit) | Sender's LastCommit information | 2 | ### StatusRequest diff --git a/spec/p2p/messages/consensus.md b/spec/p2p/legacy-docs/messages/consensus.md similarity index 85% rename from spec/p2p/messages/consensus.md rename to spec/p2p/legacy-docs/messages/consensus.md index ebb7fab7fca..f65c68c6ff7 100644 --- a/spec/p2p/messages/consensus.md +++ b/spec/p2p/legacy-docs/messages/consensus.md @@ -24,21 +24,18 @@ next block in the blockchain should be. | Name | Type | Description | Field Number | |----------|----------------------------------------------------|----------------------------------------|--------------| -| proposal | [Proposal](../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | +| proposal | [Proposal](../../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | ### Vote Vote is sent to vote for some block (or to inform others that a process does not vote in the -current round). Vote is defined in the -[Blockchain](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#blockidd) -section and contains validator's -information (validator address and index), height and round for which the vote is sent, vote type, -blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The -message is signed by the validator private key. +current round). Vote contains validator's information (validator address and index), height and +round for which the vote is sent, vote type, blockID if process vote for some block (`nil` otherwise) +and a timestamp when the vote is sent. The message is signed by the validator private key. | Name | Type | Description | Field Number | |------|--------------------------------------------|---------------------------|--------------| -| vote | [Vote](../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | +| vote | [Vote](../../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | ### BlockPart @@ -49,7 +46,7 @@ and the block part. |--------|--------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block. | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| part | [Part](../../core/data_structures.md#part) | A part of the block. | 3 | +| part | [Part](../../../core/data_structures.md#part) | A part of the block. | 3 | ### NewRoundStep @@ -79,7 +76,7 @@ In case the block is also committed, then IsCommit flag is set to true. |-----------------------|--------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| block_part_set_header | [PartSetHeader](../../core/data_structures.md#partsetheader) | | 3 | +| block_part_set_header | [PartSetHeader](../../../core/data_structures.md#partsetheader) | | 3 | | block_parts | int32 | | 4 | | is_commit | bool | | 5 | @@ -104,7 +101,7 @@ round, vote type and the index of the validator that is the originator of the co |--------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | | index | int32 | | 4 | ### VoteSetMaj23 @@ -116,7 +113,7 @@ It contains height, round, vote type and the BlockID. |--------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | ### VoteSetBits @@ -128,8 +125,8 @@ the votes a process has. |----------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | -| block_id | [BlockID](../../core/data_structures.md#blockid) | | 4 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | +| block_id | [BlockID](../../../core/data_structures.md#blockid) | | 4 | | votes | BitArray | Round of voting to finalize the block. | 5 | ### Message diff --git a/spec/p2p/messages/evidence.md b/spec/p2p/legacy-docs/messages/evidence.md similarity index 78% rename from spec/p2p/messages/evidence.md rename to spec/p2p/legacy-docs/messages/evidence.md index 34fc40a9155..7db104b3477 100644 --- a/spec/p2p/messages/evidence.md +++ b/spec/p2p/legacy-docs/messages/evidence.md @@ -16,8 +16,8 @@ Evidence has one channel. The channel identifier is listed below. ### EvidenceList -EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../core/data_structures.md#block) as well. +EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../../core/data_structures.md#block) as well. | Name | Type | Description | Field Number | |----------|-------------------------------------------------------------|------------------------|--------------| -| evidence | repeated [Evidence](../../core/data_structures.md#evidence) | List of valid evidence | 1 | +| evidence | repeated [Evidence](../../../core/data_structures.md#evidence) | List of valid evidence | 1 | diff --git a/spec/p2p/messages/mempool.md b/spec/p2p/legacy-docs/messages/mempool.md similarity index 100% rename from spec/p2p/messages/mempool.md rename to spec/p2p/legacy-docs/messages/mempool.md diff --git a/spec/p2p/messages/pex.md b/spec/p2p/legacy-docs/messages/pex.md similarity index 100% rename from spec/p2p/messages/pex.md rename to spec/p2p/legacy-docs/messages/pex.md diff --git a/spec/p2p/messages/state-sync.md b/spec/p2p/legacy-docs/messages/state-sync.md similarity index 94% rename from spec/p2p/messages/state-sync.md rename to spec/p2p/legacy-docs/messages/state-sync.md index cfc958e08da..30657ecbb00 100644 --- a/spec/p2p/messages/state-sync.md +++ b/spec/p2p/legacy-docs/messages/state-sync.md @@ -28,7 +28,7 @@ available snapshots: ### SnapShotResponse The receiver will query the local ABCI application via `ListSnapshots`, and send a message -containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: and stored at the application layer. When a peer is starting it will request snapshots. +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: and stored at the application layer. When a peer is starting it will request snapshots. | Name | Type | Description | Field Number | |----------|--------|-----------------------------------------------------------|--------------| @@ -89,9 +89,9 @@ if necessary. The light block at the height of the snapshot will be used to veri | Name | Type | Description | Field Number | |---------------|---------------------------------------------------------|--------------------------------------|--------------| -| light_block | [LightBlock](../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | +| light_block | [LightBlock](../../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | -State sync will use [light client verification](../../../spec/light-client/verification/README.md) to verify +State sync will use [light client verification](../../../light-client/verification/README.md) to verify the light blocks. If no state sync is in progress (i.e. during normal operation), any unsolicited response messages @@ -113,7 +113,7 @@ A reciever to the request will use the state store to fetch the consensus params | Name | Type | Description | Field Number | |----------|--------|---------------------------------|--------------| | height | uint64 | Height of the consensus params | 1 | -| consensus_params | [ConsensusParams](../../core/data_structures.md#ConsensusParams) | Consensus params at the height requested | 2 | +| consensus_params | [ConsensusParams](../../../core/data_structures.md#consensusparams) | Consensus params at the height requested | 2 | ### Message diff --git a/spec/p2p/node.md b/spec/p2p/legacy-docs/node.md similarity index 99% rename from spec/p2p/node.md rename to spec/p2p/legacy-docs/node.md index 1db0cdb6f80..492fb56bcb5 100644 --- a/spec/p2p/node.md +++ b/spec/p2p/legacy-docs/node.md @@ -1,3 +1,7 @@ +--- +order: 1 +--- + # Peer Discovery A CometBFT P2P network has different kinds of nodes with different requirements for connectivity to one another. diff --git a/spec/p2p/peer.md b/spec/p2p/legacy-docs/peer.md similarity index 99% rename from spec/p2p/peer.md rename to spec/p2p/legacy-docs/peer.md index 995babaf879..69217a626ad 100644 --- a/spec/p2p/peer.md +++ b/spec/p2p/legacy-docs/peer.md @@ -1,3 +1,7 @@ +--- +order: 1 +--- + # Peers This document explains how CometBFT Peers are identified and how they connect to one another. diff --git a/spec/p2p/reactor-api/README.md b/spec/p2p/reactor-api/README.md new file mode 100644 index 00000000000..b3bcabd1106 --- /dev/null +++ b/spec/p2p/reactor-api/README.md @@ -0,0 +1,47 @@ +--- +order: 1 +--- + +# Reactors + +Reactor is the generic name for a component that employs the p2p communication layer. + +This section documents the interaction of the p2p communication layer with the +reactors. +The diagram below summarizes this interaction, namely the **northbound interface** +of the p2p communication layer, representing some relevant event flows: + + + +Each of the protocols running a CometBFT node implements a reactor and registers +the implementation with the p2p layer. +The p2p layer provides network events to the registered reactors, the main +two being new connections with peers and received messages. +The reactors provide to the p2p layer messages to be sent to +peers and commands to control the operation of the p2p layer. + +It is worth noting that the components depicted in the diagram below run +multiple routines and that the illustrated actions happen in parallel. +For instance, the connection establishment routines run in parallel, invoking +the depicted `AddPeer` method concurrently. +Once a connection is fully established, each `Peer` instance runs a send and a +receive routines. +The send routine collects messages from multiple reactors to a peer, packaging +then into raw messages which are transmitted to the peer. +The receive routine processes incoming messages and forwards them to the +destination reactors, invoking the depicted `Receive` methods. +In addition, the reactors run multiple routines for interacting +with the peers (for example, to send messages to them) or with the `Switch`. + +The remaining of the documentation is organized as follows: + +- [Reactor API](./reactor.md): documents the [`p2p.Reactor`][reactor-interface] + interface and specifies the behaviour of the p2p layer when interacting with + a reactor. + In other words, the interaction of the p2p layer with the protocol layer (bottom-up). + +- [P2P API](./p2p-api.md): documents the interface provided by the p2p + layer to the reactors, through the `Switch` and `Peer` abstractions. + In other words, the interaction of the protocol layer with the p2p layer (top-down). + +[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go diff --git a/spec/p2p/reactor-api/p2p-api.md b/spec/p2p/reactor-api/p2p-api.md new file mode 100644 index 00000000000..ad1fbff3110 --- /dev/null +++ b/spec/p2p/reactor-api/p2p-api.md @@ -0,0 +1,315 @@ +--- +order: 3 +--- + +# API for Reactors + +This document describes the API provided by the p2p layer to the protocol +layer, namely to the registered reactors. + +This API consists of two interfaces: the one provided by the `Switch` instance, +and the ones provided by multiple `Peer` instances, one per connected peer. +The `Switch` instance is provided to every reactor as part of the reactor's +[registration procedure][reactor-registration]. +The multiple `Peer` instances are provided to every registered reactor whenever +a [new connection with a peer][reactor-addpeer] is established. + +> **Note** +> +> The practical reasons that lead to the interface to be provided in two parts, +> `Switch` and `Peer` instances are discussed in more datail in the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/switch-peer.md). + +## `Switch` API + +The [`Switch`][switch-type] is the central component of the p2p layer +implementation. It manages all the reactors running in a node and keeps track +of the connections with peers. +The table below summarizes the interaction of the standard reactors with the `Switch`: + +| `Switch` API method | consensus | block sync | state sync | mempool | evidence | PEX | +|--------------------------------------------|-----------|------------|------------|---------|-----------|-------| +| `Peers() IPeerSet` | x | x | | | | x | +| `NumPeers() (int, int, int)` | | x | | | | x | +| `Broadcast(Envelope) chan bool` | x | x | x | | | | +| `MarkPeerAsGood(Peer)` | x | | | | | | +| `StopPeerForError(Peer, interface{})` | x | x | x | x | x | x | +| `StopPeerGracefully(Peer)` | | | | | | x | +| `Reactor(string) Reactor` | | x | | | | | + +The above list is not exhaustive as it does not include all the `Switch` methods +invoked by the PEX reactor, a special component that should be considered part +of the p2p layer. This document does not cover the operation of the PEX reactor +as a connection manager. + +### Peers State + +The first two methods in the switch API allow reactors to query the state of +the p2p layer: the set of connected peers. + + func (sw *Switch) Peers() IPeerSet + +The `Peers()` method returns the current set of connected peers. +The returned `IPeerSet` is an immutable concurrency-safe copy of this set. +Observe that the `Peer` handlers returned by this method were previously +[added to the reactor][reactor-addpeer] via the `InitPeer(Peer)` method, +but not yet removed via the `RemovePeer(Peer)` method. +Thus, a priori, reactors should already have this information. + + func (sw *Switch) NumPeers() (outbound, inbound, dialing int) + +The `NumPeers()` method returns the current number of connected peers, +distinguished between `outbound` and `inbound` peers. +An `outbound` peer is a peer the node has dialed to, while an `inbound` peer is +a peer the node has accepted a connection from. +The third field `dialing` reports the number of peers to which the node is +currently attempting to connect, so not (yet) connected peers. + +> **Note** +> +> The third field returned by `NumPeers()`, the number of peers in `dialing` +> state, is not an information that should regard the protocol layer. +> In fact, with the exception of the PEX reactor, which can be considered part +> of the p2p layer implementation, no standard reactor actually uses this +> information, that could be removed when this interface is refactored. + +### Broadcast + +The switch provides, mostly for historical or retro-compatibility reasons, +a method for sending a message to all connected peers: + + func (sw *Switch) Broadcast(e Envelope) chan bool + +The `Broadcast()` method is not blocking and returns a channel of booleans. +For every connected `Peer`, it starts a background thread for sending the +message to that peer, using the `Peer.Send()` method +(which is blocking, as detailed in [Send Methods](#send-methods)). +The result of each unicast send operation (success or failure) is added to the +returned channel, which is closed when all operations are completed. + +> **Note** +> +> - The current _implementation_ of the `Switch.Broadcast(Envelope)` method is +> not efficient, as the marshalling of the provided message is performed as +> part of the `Peer.Send(Envelope)` helper method, that is, once per +> connected peer. +> - The return value of the broadcast method is not considered by any of the +> standard reactors that employ the method. One of the reasons is that is is +> not possible to associate each of the boolean outputs added to the +> returned channel to a peer. + +### Vetting Peers + +The p2p layer relies on the registered reactors to gauge the _quality_ of peers. +The following method can be invoked by a reactor to inform the p2p layer that a +peer has presented a "good" behaviour. +This information is registered in the node's address book and influences the +operation of the Peer Exchange (PEX) protocol, as node discovery adopts a bias +towards "good" peers: + + func (sw *Switch) MarkPeerAsGood(peer Peer) + +At the moment, it is up to the consensus reactor to vet a peer. +In the current logic, a peer is marked as good whenever the consensus protocol +collects a multiple of `votesToContributeToBecomeGoodPeer = 10000` useful votes +or `blocksToContributeToBecomeGoodPeer = 10000` useful block parts from that peer. +By "useful", the consensus implementation considers messages that are valid and +that are received by the node when the node is expected for such information, +which excludes duplicated or late received messages. + +> **Note** +> +> The switch doesn't currently provide a method to mark a peer as a bad peer. +> In fact, the peer quality management is really implemented in the current +> version of the p2p layer. +> This topic is being discussed in the [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-quality.md). + +### Stopping Peers + +Reactors can instruct the p2p layer to disconnect from a peer. +Using the p2p layer's nomenclature, the reactor requests a peer to be stopped. +The peer's send and receive routines are in fact stopped, interrupting the +communication with the peer. +The `Peer` is then [removed from every registered reactor][reactor-removepeer], +using the `RemovePeer(Peer)` method, and from the set of connected peers. + + func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) + +All the standard reactors employ the above method for disconnecting from a peer +in case of errors. +These are errors that occur when processing a message received from a `Peer`. +The produced `error` is provided to the method as the `reason`. + +The `StopPeerForError()` method has an important *caveat*: if the peer to be +stopped is configured as a _persistent peer_, the switch will attempt +reconnecting to that same peer. +While this behaviour makes sense when the method is invoked by other components +of the p2p layer (e.g., in the case of communication errors), it does not make +sense when it is invoked by a reactor. + +> **Note** +> +> A more comprehensive discussion regarding this topic can be found on the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/stop-peer.md). + + func (sw *Switch) StopPeerGracefully(peer Peer) + +The second method instructs the switch to disconnect from a peer for no +particular reason. +This method is only adopted by the PEX reactor of a node operating in _seed mode_, +as seed nodes disconnect from a peer after exchanging peer addresses with it. + +### Reactors Table + +The switch keeps track of all registered reactors, indexed by unique reactor names. +A reactor can therefore use the switch to access another `Reactor` from its `name`: + + func (sw *Switch) Reactor(name string) Reactor + +This method is currently only used by the Block Sync reactor to access the +Consensus reactor implementation, from which it uses the exported +`SwitchToConsensus()` method. +While available, this inter-reactor interaction approach is discouraged and +should be avoided, as it violates the assumption that reactors are independent. + + +## `Peer` API + +The [`Peer`][peer-interface] interface represents a connected peer. +A `Peer` instance encapsulates a multiplex connection that implements the +actual communication (sending and receiving messages) with a peer. +When a connection is established with a peer, the `Switch` provides the +corresponding `Peer` instance to all registered reactors. +From this point, reactors can use the methods of the new `Peer` instance. + +The table below summarizes the interaction of the standard reactors with +connected peers, with the `Peer` methods used by them: + +| `Peer` API method | consensus | block sync | state sync | mempool | evidence | PEX | +|--------------------------------------------|-----------|------------|------------|---------|-----------|-------| +| `ID() ID` | x | x | x | x | x | x | +| `IsRunning() bool` | x | | | x | x | | +| `Quit() <-chan struct{}` | | | | x | x | | +| `Get(string) interface{}` | x | | | x | x | | +| `Set(string, interface{})` | x | | | | | | +| `Send(Envelope) bool` | x | x | x | x | x | x | +| `TrySend(Envelope) bool` | x | x | | | | | + +The above list is not exhaustive as it does not include all the `Peer` methods +invoked by the PEX reactor, a special component that should be considered part +of the p2p layer. This document does not cover the operation of the PEX reactor +as a connection manager. + +### Identification + +Nodes in the p2p network are configured with a unique cryptographic key pair. +The public part of this key pair is verified when establishing a connection +with the peer, as part of the authentication handshake, and constitutes the +peer's `ID`: + + func (p Peer) ID() p2p.ID + +Observe that each time the node connects to a peer (e.g., after disconnecting +from it), a new (distinct) `Peer` handler is provided to the reactors via +`InitPeer(Peer)` method. +In fact, the `Peer` handler is associated to a _connection_ with a peer, not to +the actual _node_ in the network. +To keep track of actual peers, the unique peer `p2p.ID` provided by the above +method should be employed. + +### Peer state + +The switch starts the peer's send and receive routines before adding the peer +to every registered reactor using the `AddPeer(Peer)` method. +The reactors then usually start routines to interact with the new connected +peer using the received `Peer` handler. +For these routines it is useful to check whether the peer is still connected +and its send and receive routines are still running: + + func (p Peer) IsRunning() bool + func (p Peer) Quit() <-chan struct{} + +The above two methods provide the same information about the state of a `Peer` +instance in two different ways. +Both of them are defined in the [`Service`][service-interface] interface. +The `IsRunning()` method is synchronous and returns whether the peer has been +started and has not been stopped. +The `Quit()` method returns a channel that is closed when the peer is stopped; +it is an asynchronous state query. + +### Key-value store + +Each `Peer` instance provides a synchronized key-value store that allows +sharing peer-specific state between reactors: + + + func (p Peer) Get(key string) interface{} + func (p Peer) Set(key string, data interface{}) + +This key-value store can be seen as an asynchronous mechanism to exchange the +state of a peer between reactors. +In the current use-case of this mechanism, the Consensus reactor populates the +key-value store with a `PeerState` instance for each connected peer. +The Consensus reactor routines interacting with a peer read and update the +shared peer state. +The Evidence and Mempool reactors, in their turn, periodically query the +key-value store of each peer for retrieving, in particular, the last height +reported by the peer. +This information, produced by the Consensus reactor, influences the interaction +of these two reactors with their peers. + +> **Note** +> +> More details of how this key-value store is used to share state between reactors can be found on the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-kvstore.md). + +### Send methods + +Finally, a `Peer` instance allows a reactor to send messages to companion +reactors running at that peer. +This is ultimately the goal of the switch when it provides `Peer` instances to +the registered reactors. +There are two methods for sending messages: + + func (p Peer) Send(e Envelope) bool + func (p Peer) TrySend(e Envelope) bool + +The two message-sending methods receive an `Envelope`, whose content should be +set as follows: + +- `ChannelID`: the channel the message should be sent through, which defines + the reactor that will process the message; +- `Src`: this field represents the source of an incoming message, which is + irrelevant for outgoing messages; +- `Message`: the actual message's payload, which is marshalled using protocol buffers. + +The two message-sending methods attempt to add the message (`e.Payload`) to the +send queue of the peer's destination channel (`e.ChannelID`). +There is a send queue for each registered channel supported by the peer, and +each send queue has a capacity. +The capacity of the send queues for each channel are [configured][reactor-channels] +by reactors via the corresponding `ChannelDescriptor`. + +The two message-sending methods return whether it was possible to enqueue +the marshalled message to the channel's send queue. +The most common reason for these methods to return `false` is the channel's +send queue being full. +Further reasons for returning `false` are: the peer being stopped, providing a +non-registered channel ID, or errors when marshalling the message's payload. + +The difference between the two message-sending methods is _when_ they return `false`. +The `Send()` method is a _blocking_ method, it returns `false` if the message +could not be enqueued, because the channel's send queue is still full, after a +10-second _timeout_. +The `TrySend()` method is a _non-blocking_ method, it _immediately_ returns +`false` when the channel's send queue is full. + +[peer-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/peer.go +[service-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/libs/service/service.go +[switch-type]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/switch.go + +[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go +[reactor-registration]: ./reactor.md#registration +[reactor-channels]: ./reactor.md#registration +[reactor-addpeer]: ./reactor.md#peer-management +[reactor-removepeer]: ./reactor.md#stop-peer diff --git a/spec/p2p/reactor-api/reactor.md b/spec/p2p/reactor-api/reactor.md new file mode 100644 index 00000000000..a7862faeab0 --- /dev/null +++ b/spec/p2p/reactor-api/reactor.md @@ -0,0 +1,234 @@ +--- +order: 2 +--- + +# Reactor API + +A component has to implement the [`p2p.Reactor` interface][reactor-interface] +in order to use communication services provided by the p2p layer. +This interface is currently the main source of documentation for a reactor. + +The goal of this document is to specify the behaviour of the p2p communication +layer when interacting with a reactor. +So while the [`Reactor interface`][reactor-interface] declares the methods +invoked and determines what the p2p layer expects from a reactor, +this documentation focuses on the **temporal behaviour** that a reactor implementation +should expect from the p2p layer. (That is, in which orders the functions may be called) + +This specification is accompanied by the [`reactor.qnt`](./reactor.qnt) file, +a more comprehensive model of the reactor's operation written in +[Quint][quint-repo], an executable specification language. +The methods declared in the [`Reactor`][reactor-interface] interface are +modeled in Quint, in the form of `pure def` methods, providing some examples of +how they should be implemented. +The behaviour of the p2p layer when interacting with a reactor, by invoking the +interface methods, is modeled in the form of state transitions, or `action`s in +the Quint nomenclature. + +## Overview + +The following _grammar_ is a simplified representation of the expected sequence of calls +from the p2p layer to a reactor. +Note that the grammar represents events referring to a _single reactor_, while +the p2p layer supports the execution of multiple reactors. +For a more detailed representation of the sequence of calls from the p2p layer +to reactors, please refer to the companion Quint model. + +While useful to provide an overview of the operation of a reactor, +grammars have some limitations in terms of the behaviour they can express. +For instance, the following grammar only represents the management of _a single peer_, +namely of a peer with a given ID which can connect, disconnect, and reconnect +multiple times to the node. +The p2p layer and every reactor should be able to handle multiple distinct peers in parallel. +This means that multiple occurrences of non-terminal `peer-management` of the +grammar below can "run" independently and in parallel, each one referring and +producing events associated to a different peer: + +```abnf +start = registration on-start *peer-management on-stop +registration = get-channels set-switch + +; Refers to a single peer, a reactor must support multiple concurrent peers +peer-management = init-peer start-peer stop-peer +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +stop-peer = [peer-error] remove-peer + +; Service interface +on-start = %s"OnStart()" +on-stop = %s"OnStop()" +; Reactor interface +get-channels = %s"GetChannels()" +set-switch = %s"SetSwitch(*Switch)" +init-peer = %s"InitPeer(Peer)" +add-peer = %s"AddPeer(Peer)" +remove-peer = %s"RemovePeer(Peer, reason)" +receive = %s"Receive(Envelope)" + +; Errors, for reference +start-error = %s"log(Error starting peer)" +peer-error = %s"log(Stopping peer for error)" +``` + +The grammar is written in case-sensitive Augmented Backus–Naur form (ABNF, +specified in [IETF RFC 7405](https://datatracker.ietf.org/doc/html/rfc7405)). +It is inspired on the grammar produced to specify the interaction of CometBFT +with an ABCI++ application, available [here](../../abci/abci%2B%2B_comet_expected_behavior.md). + +## Registration + +To become a reactor, a component has first to implement the +[`Reactor`][reactor-interface] interface, +then to register the implementation with the p2p layer, using the +`Switch.AddReactor(name string, reactor Reactor)` method, +with a global unique `name` for the reactor. + +The registration must happen before the node, in general, and the p2p layer, +in particular, are started. +In other words, there is no support for registering a reactor on a running node: +reactors must be registered as part of the setup of a node. + +```abnf +registration = get-channels set-switch +``` + +The p2p layer retrieves from the reactor a list of channels the reactor is +responsible for, using the `GetChannels()` method. +The reactor implementation should thereafter expect the delivery of every +message received by the p2p layer in the informed channels. + +The second method `SetSwitch(Switch)` concludes the handshake between the +reactor and the p2p layer. +The `Switch` is the main component of the p2p layer, being responsible for +establishing connections with peers and routing messages. +The `Switch` instance provides a number of methods for all registered reactors, +documented in the companion [API for Reactors](./p2p-api.md#switch-api) document. + +## Service interface + +A reactor must implement the [`Service`](https://github.com/cometbft/cometbft/blob/v0.38.x/libs/service/service.go) interface, +in particular, a startup `OnStart()` and a shutdown `OnStop()` methods: + +```abnf +start = registration on-start *peer-management on-stop +``` + +As part of the startup of a node, all registered reactors are started by the p2p layer. +And when the node is shut down, all registered reactors are stopped by the p2p layer. +Observe that the `Service` interface specification establishes that a service +can be started and stopped only once. +So before being started or once stopped by the p2p layer, the reactor should +not expect any interaction. + +## Peer management + +The core of a reactor's operation is the interaction with peers or, more +precisely, with companion reactors operating on the same channels in peers connected to the node. +The grammar extract below represents the interaction of the reactor with a +single peer: + +```abnf +; Refers to a single peer, a reactor must support multiple concurrent peers +peer-management = init-peer start-peer stop-peer +``` + +The p2p layer informs all registered reactors when it establishes a connection +with a `Peer`, using the `InitPeer(Peer)` method. +When this method is invoked, the `Peer` has not yet been started, namely the +routines for sending messages to and receiving messages from the peer are not running. +This method should be used to initialize state or data related to the new +peer, but not to interact with it. + +The next step is to start the communication routines with the new `Peer`. +As detailed in the following, this procedure may or may not succeed. +In any case, the peer is eventually stopped, which concludes the management of +that `Peer` instance. + +## Start peer + +Once `InitPeer(Peer)` is invoked for every registered reactor, the p2p layer starts the peer's +communication routines and adds the `Peer` to the set of connected peers. +If both steps are concluded without errors, the reactor's `AddPeer(Peer)` is invoked: + +```abnf +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +``` + +In case of errors, a message is logged informing that the p2p layer failed to start the peer. +This is not a common scenario and it is only expected to happen when +interacting with a misbehaving or slow peer. A practical example is reported on this +[issue](https://github.com/tendermint/tendermint/pull/9500). + +It is up to the reactor to define how to process the `AddPeer(Peer)` event. +The typical behavior is to start routines that, given some conditions or events, +send messages to the added peer, using the provided `Peer` instance. +The companion [API for Reactors](./p2p-api.md#peer-api) documents the methods +provided by `Peer` instances, available from when they are added to the reactors. + +## Stop Peer + +The p2p layer informs all registered reactors when it disconnects from a `Peer`, +using the `RemovePeer(Peer, reason)` method: + +```abnf +stop-peer = [peer-error] remove-peer +``` + +This method is invoked after the p2p layer has stopped peer's send and receive routines. +Depending of the `reason` for which the peer was stopped, different log +messages can be produced. +After removing a peer from all reactors, the `Peer` instance is also removed from +the set of connected peers. +This enables the same peer to reconnect and `InitPeer(Peer)` to be invoked for +the new connection. + +From the removal of a `Peer` , the reactor should not receive any further message +from the peer and must not try sending messages to the removed peer. +This usually means stopping the routines that were started by the companion +`Add(Peer)` method. + +## Receive messages + +The main duty of a reactor is to handle incoming messages on the channels it +has registered with the p2p layer. + +The _pre-condition_ for receiving a message from a `Peer` is that the p2p layer +has previously invoked `InitPeer(Peer)`. +This means that the reactor must be able to receive a message from a `Peer` +_before_ `AddPeer(Peer)` is invoked. +This happens because the peer's send and receive routines are started before, +and should be already running when the p2p layer adds the peer to every +registered reactor. + +```abnf +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +``` + +The most common scenario, however, is to start receiving messages from a peer +after `AddPeer(Peer)` is invoked. +An arbitrary number of messages can be received, until the peer is stopped and +`RemovePeer(Peer)` is invoked. + +When a message is received from a connected peer on any of the channels +registered by the reactor, the p2p layer will deliver the message to the +reactor via the `Receive(Envelope)` method. +The message is packed into an `Envelope` that contains: + +- `ChannelID`: the channel the message belongs to +- `Src`: the source `Peer` handler, from which the message was received +- `Message`: the actual message's payload, unmarshalled using protocol buffers + +Two important observations regarding the implementation of the `Receive` method: + +1. Concurrency: the implementation should consider concurrent invocations of + the `Receive` method carrying messages from different peers, as the + interaction with different peers is independent and messages can be received in parallel. +1. Non-blocking: the implementation of the `Receive` method is expected not to block, + as it is invoked directly by the receive routines. + In other words, while `Receive` does not return, other messages from the + same sender are not delivered to any reactor. + +[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go +[quint-repo]: https://github.com/informalsystems/quint diff --git a/spec/p2p/reactor-api/reactor.qnt b/spec/p2p/reactor-api/reactor.qnt new file mode 100644 index 00000000000..002c57023af --- /dev/null +++ b/spec/p2p/reactor-api/reactor.qnt @@ -0,0 +1,276 @@ +// -*- mode: Bluespec; -*- +/* + * Reactor is responsible for handling incoming messages on one or more + * Channel. Switch calls GetChannels when reactor is added to it. When a new + * peer joins our node, InitPeer and AddPeer are called. RemovePeer is called + * when the peer is stopped. Receive is called when a message is received on a + * channel associated with this reactor. + */ +// Code: https://github.com/cometbft/cometbft/blob/main/p2p/base_reactor.go +module reactor { + + // Unique ID of a node. + type NodeID = str + + /* + * Peer is an interface representing a peer connected on a reactor. + */ + type Peer = { + ID: NodeID, + + // Other fields can be added to represent the p2p operation. + } + + // Byte ID used by channels, must be globally unique. + type Byte = str + + // Channel configuration. + type ChannelDescriptor = { + ID: Byte, + Priority: int, + } + + /* + * Envelope contains a message with sender routing info. + */ + type Envelope = { + Src: Peer, // Sender + Message: str, // Payload + ChannelID: Byte, + } + + // A Routine is used to interact with an active Peer. + type Routine = { + name: str, + peer: Peer, + } + + type ReactorState = { + // Peers that have been initialized but not yet removed. + // The reactor should expect receiving messages from them. + peers: Set[NodeID], + + // The reactor runs multiple routines. + routines: Set[Routine], + + // Values: init -> registered -> running -> stopped + state: str, + + // Name with which the reactor was registered. + name: str, + + // Channels the reactor is responsible for. + channels: Set[ChannelDescriptor], + } + + // Produces a new, uninitialized reactor. + pure def NewReactor(): ReactorState = { + { + peers: Set(), + routines: Set(), + state: "init", + name: "", + channels: Set(), + } + } + + // Pure definitions below represent the `p2p.Reactor` interface methods: + + /* + * GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + * that each ID is unique across all the reactors added to the switch. + */ + pure def GetChannels(s: ReactorState): Set[ChannelDescriptor] = { + s.channels // Static list, configured at initialization. + } + + /* + * SetSwitch allows setting a switch. + */ + pure def SetSwitch(s: ReactorState, switch: bool): ReactorState = { + s.with("state", "registered") + } + + /* + * Start the service. + * If it's already started or stopped, will return an error. + */ + pure def OnStart(s: ReactorState): ReactorState = { + // Startup procedures should come here. + s.with("state", "running") + } + + /* + * Stop the service. + * If it's already stopped, will return an error. + */ + pure def OnStop(s: ReactorState): ReactorState = { + // Shutdown procedures should come here. + s.with("state", "stopped") + } + + /* + * InitPeer is called by the switch before the peer is started. Use it to + * initialize data for the peer (e.g. peer state). + */ + pure def InitPeer(s: ReactorState, peer: Peer): (ReactorState, Peer) = { + // This method can update the received peer, which is returned. + val updatedPeer = peer + (s.with("peers", s.peers.union(Set(peer.ID))), updatedPeer) + } + + /* + * AddPeer is called by the switch after the peer is added and successfully + * started. Use it to start goroutines communicating with the peer. + */ + pure def AddPeer(s: ReactorState, peer: Peer): ReactorState = { + // This method can be used to start routines to handle the peer. + // Below an example of an arbitrary 'ioRoutine' routine. + val startedRoutines = Set( {name: "ioRoutine", peer: peer} ) + s.with("routines", s.routines.union(startedRoutines)) + } + + /* + * RemovePeer is called by the switch when the peer is stopped (due to error + * or other reason). + */ + pure def RemovePeer(s: ReactorState, peer: Peer, reason: str): ReactorState = { + // This method should stop routines created by `AddPeer(Peer)`. + val stoppedRoutines = s.routines.filter(r => r.peer.ID == peer.ID) + s.with("peers", s.peers.exclude(Set(peer.ID))) + .with("routines", s.routines.exclude(stoppedRoutines)) + } + + /* + * Receive is called by the switch when an envelope is received from any connected + * peer on any of the channels registered by the reactor. + */ + pure def Receive(s: ReactorState, e: Envelope): ReactorState = { + // This method should process the message payload: e.Message. + s + } + + // Global state + + // Reactors are uniquely identified by their names. + var reactors: str -> ReactorState + + // Reactor (name) assigned to each channel ID. + var reactorsByCh: Byte -> str + + // Helper action to (only) update the state of a given reactor. + action updateReactorTo(reactor: ReactorState): bool = all { + reactors' = reactors.set(reactor.name, reactor), + reactorsByCh' = reactorsByCh + } + + // State transitions performed by the p2p layer, invoking `p2p.Reactor` methods: + + // Code: Switch.AddReactor(name string, reactor Reactor) + action register(name: str, reactor: ReactorState): bool = all { + reactor.state == "init", + // Assign the reactor as responsible for its channel IDs, which + // should not be already assigned to another reactor. + val chIDs = reactor.GetChannels().map(c => c.ID) + all { + size(chIDs.intersect(reactorsByCh.keys())) == 0, + reactorsByCh' = reactorsByCh.keys().union(chIDs). + mapBy(id => if (id.in(chIDs)) name + else reactorsByCh.get(id)), + }, + // Register the reactor by its name, which must be unique. + not(name.in(reactors.keys())), + reactors' = reactors.put(name, + reactor.SetSwitch(true).with("name", name)) + } + + // Code: Switch.OnStart() + action start(reactor: ReactorState): bool = all { + reactor.state == "registered", + updateReactorTo(reactor.OnStart()) + } + + // Code: Switch.addPeer(p Peer): preamble + action initPeer(reactor: ReactorState, peer: Peer): bool = all { + reactor.state == "running", + not(peer.ID.in(reactor.peers)), + updateReactorTo(reactor.InitPeer(peer)._1) + } + + // Code: Switch.addPeer(p Peer): conclusion + action addPeer(reactor: ReactorState, peer: Peer): bool = all { + reactor.state == "running", + peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer) + reactor.routines.filter(r => r.peer.ID == peer.ID).size() == 0, + updateReactorTo(reactor.AddPeer(peer)) + } + + // Code: Switch.stopAndRemovePeer(peer Peer, reason interface{}) + action removePeer(reactor: ReactorState, peer: Peer, reason: str): bool = all { + reactor.state == "running", + peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer) + // Routines might not be started, namely: not AddPeer(peer) + // Routines could also be already stopped if Peer has erroed. + updateReactorTo(reactor.RemovePeer(peer, reason)) + } + + // Code: Peer type, onReceive := func(chID byte, msgBytes []byte) + action receive(reactor: ReactorState, e: Envelope): bool = all { + reactor.state == "running", + // The message's sender is an active peer + e.Src.ID.in(reactor.peers), + // Reactor is assigned to the message's channel ID + e.ChannelID.in(reactorsByCh.keys()), + reactorsByCh.get(e.ChannelID) == reactor.name, + reactor.GetChannels().exists(c => c.ID == e.ChannelID), + updateReactorTo(reactor.Receive(e)) + } + + // Code: Switch.OnStop() + action stop(reactor: ReactorState): bool = all { + reactor.state == "running", + // Either no peer was added or all peers were removed + reactor.peers.size() == 0, + updateReactorTo(reactor.OnStop()) + } + + // Simulation support + + action init = all { + reactors' = Map(), + reactorsByCh' = Map(), + } + + // Modelled reactor configuration + pure val reactorName = "myReactor" + pure val reactorChannels = Set({ID: "3", Priority: 1}, {ID: "7", Priority: 2}) + + // For retro-compatibility: the state of the modelled reactor + def state(): ReactorState = { + reactors.get(reactorName) + } + + pure val samplePeers = Set({ID: "p1"}, {ID: "p3"}) + pure val sampleChIDs = Set("1", "3", "7") // ChannelID 1 not registered + pure val sampleMsgs = Set("ping", "pong") + + action step = any { + register(reactorName, NewReactor.with("channels", reactorChannels)), + val reactor = reactors.get(reactorName) + any { + reactor.start(), + reactor.stop(), + nondet peer = oneOf(samplePeers) + any { + // Peer-specific actions + reactor.initPeer(peer), + reactor.addPeer(peer), + reactor.removePeer(peer, "no reason"), + reactor.receive({Src: peer, + ChannelID: oneOf(sampleChIDs), + Message: oneOf(sampleMsgs)}), + } + } + } + +} diff --git a/spec/p2p/readme.md b/spec/p2p/readme.md deleted file mode 100644 index e7f57c30263..00000000000 --- a/spec/p2p/readme.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -order: 1 -parent: - title: P2P - order: 6 ---- - -# Peer-to-Peer Communication - -The operation of the p2p adopted in production CometBFT networks is [HERE](./v0.34/). - -> This is part of an ongoing [effort](https://github.com/cometbft/cometbft/issues/19) -> to produce a high-level specification of the operation of the p2p layer. diff --git a/spec/p2p/v0.34/README.md b/spec/p2p/v0.34/README.md deleted file mode 100644 index 017e1001f66..00000000000 --- a/spec/p2p/v0.34/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Peer-to-Peer Communication - -This document describes the implementation of the peer-to-peer (p2p) -communication layer in CometBFT. - -It is part of an [effort](https://github.com/cometbft/cometbft/issues/19) -to produce a high-level specification of the operation of the p2p layer adopted -in production CometBFT networks. - -This documentation, therefore, considers the releases `0.34.*` of CometBFT, more -specifically, the branch [`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x) -of this repository. - -## Overview - -A CometBFT network is composed of multiple CometBFT instances, hereafter -called **nodes**, that interact by exchanging messages. - -CometBFT assumes a partially-connected network model. -This means that a node is not assumed to be directly connected to every other -node in the network. -Instead, each node is directly connected to a subset of other nodes in the -network, hereafter called its **peers**. - -The peer-to-peer (p2p) communication layer is responsible for establishing -connections between nodes in a CometBFT network, -for managing the communication between a node and its peers, -and for intermediating the exchange of messages between peers in CometBFT protocols. - -## Contents - -The documentation follows the organization of the `p2p` package of CometBFT, -which implements the following abstractions: - -- [Transport](./transport.md): establishes secure and authenticated - connections with peers; -- [Switch](./switch.md): responsible for dialing peers and accepting - connections from peers, for managing established connections, and for - routing messages between the reactors and peers, - that is, between local and remote instances of the CometBFT protocols; -- [PEX Reactor](./pex.md): a reactor is the implementation of a protocol which - exchanges messages through the p2p layer. The PEX reactor manages the [Address Book](./addressbook.md) and implements both the [PEX protocol](./pex-protocol.md) and the [Peer Manager](./peer_manager.md) role. - - [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer addresses, thus implementing a peer discovery service; - - [Address Book](./addressbook.md): stores discovered peer addresses and - quality metrics associated to peers with which the node has interacted; - - [Peer Manager](./peer_manager.md): defines when and to which peers a node - should dial, in order to establish outbound connections; -- Finally, [Types](./types.md) and [Configuration](./configuration.md) provide - a list of existing types and configuration parameters used by the p2p layer implementation. - -## Further References - -Existing documentation referring to the p2p layer: - -- : p2p-related - configuration flags; overview of connections, peer instances, and reactors; - overview of peer discovery and node types; peer identity, secure connections - and peer authentication handshake. -- : message - types and channel IDs of Block Sync, Mempool, Evidence, State Sync, PEX, and - Consensus reactors. -- : the p2p layer - configuration and operation is documented in several pages. - This content is not necessarily up-to-date, some settings and concepts may - refer to the release `v0.35`, that was [discontinued][v35postmorten]. -- : - peer types, peer discovery, peer management overview, address book and peer - ranking. This documentation refers to the release `v0.35`, that was [discontinued][v35postmorten]. - -[v35postmorten]: https://interchain-io.medium.com/discontinuing-tendermint-v0-35-a-postmortem-on-the-new-networking-layer-3696c811dabc diff --git a/state/compatibility_test.go b/state/compatibility_test.go new file mode 100644 index 00000000000..79846abfed5 --- /dev/null +++ b/state/compatibility_test.go @@ -0,0 +1,281 @@ +package state_test + +import ( + "fmt" + "testing" + "time" + + dbm "github.com/cometbft/cometbft-db" + cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + sm "github.com/cometbft/cometbft/state" + + abci "github.com/cometbft/cometbft/abci/types" + cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" + "github.com/stretchr/testify/require" +) + +// Compatibility test across different state proto versions + +func calcABCIResponsesKey(height int64) []byte { + return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) +} + +var lastABCIResponseKey = []byte("lastABCIResponseKey") + +var ( + _ sm.Store = (*MultiStore)(nil) + _ LegacyStore = (*MultiStore)(nil) +) + +// MultiStore represents a state store that implements the Store interface +// and contains additional store and database options. +type MultiStore struct { + sm.Store + db dbm.DB + sm.StoreOptions +} + +// NewMultiStore returns a new MultiStore. +// It sets the store, db, and StoreOptions fields of the MultiStore struct. +func NewMultiStore(db dbm.DB, options sm.StoreOptions, store sm.Store) *MultiStore { + return &MultiStore{ + Store: store, + db: db, + StoreOptions: options, + } +} + +// LegacyStore represents a legacy data store. +type LegacyStore interface { + SaveABCIResponses(height int64, abciResponses *cmtstate.LegacyABCIResponses) error +} + +// SaveABCIResponses saves the ABCIResponses for a given height in the MultiStore. +// It strips out any nil values from the DeliverTxs field, and saves the ABCIResponses to +// disk if the DiscardABCIResponses flag is set to false. It also saves the last ABCI response +// for crash recovery, overwriting the previously saved response. +func (multi MultiStore) SaveABCIResponses(height int64, abciResponses *cmtstate.LegacyABCIResponses) error { + var dtxs []*abci.ExecTxResult + // strip nil values, + for _, tx := range abciResponses.DeliverTxs { + if tx != nil { + dtxs = append(dtxs, tx) + } + } + abciResponses.DeliverTxs = dtxs + + // If the flag is false then we save the ABCIResponse. This can be used for the /block_results + // query or to reindex an event using the command line. + if !multi.StoreOptions.DiscardABCIResponses { + bz, err := abciResponses.Marshal() + if err != nil { + return err + } + if err := multi.db.Set(calcABCIResponsesKey(height), bz); err != nil { + return err + } + } + + // We always save the last ABCI response for crash recovery. + // This overwrites the previous saved ABCI Response. + response := &cmtstate.ABCIResponsesInfo{ + LegacyAbciResponses: abciResponses, + Height: height, + } + bz, err := response.Marshal() + if err != nil { + return err + } + + return multi.db.SetSync(lastABCIResponseKey, bz) +} + +// TestLegacySaveAndLoadFinalizeBlock tests saving and loading of ABCIResponses +// using the multiStore. It verifies that the loaded ABCIResponses match the +// original ones and that missing fields are correctly handled. +// This test is important for the LoadFinalizeBlockResponse method in the state store. +func TestLegacySaveAndLoadFinalizeBlock(t *testing.T) { + tearDown, stateDB, _, store := setupTestCaseWithStore(t) + defer tearDown(t) + options := sm.StoreOptions{ + DiscardABCIResponses: false, + } + + height := int64(1) + multiStore := NewMultiStore(stateDB, options, store) + + // try with a complete ABCI Response + legacyABCIResponses := newLegacyABCIResponses() + err := multiStore.SaveABCIResponses(height, &legacyABCIResponses) + require.NoError(t, err) + require.Equal(t, 1, len(legacyABCIResponses.DeliverTxs)) + require.Equal(t, 1, len(legacyABCIResponses.BeginBlock.Events)) + require.Equal(t, 1, len(legacyABCIResponses.EndBlock.Events)) + + responseFinalizeBlock, err := multiStore.LoadFinalizeBlockResponse(height) + require.NoError(t, err) + + // Test for not nil + require.NotNil(t, responseFinalizeBlock.TxResults) + require.NotNil(t, responseFinalizeBlock.Events) + require.NotNil(t, responseFinalizeBlock.ValidatorUpdates) + require.NotNil(t, responseFinalizeBlock.ConsensusParamUpdates) + require.Nil(t, responseFinalizeBlock.AppHash) + + // Test for equality + require.Equal(t, 1, len(responseFinalizeBlock.TxResults)) + require.Equal(t, len(legacyABCIResponses.DeliverTxs), len(responseFinalizeBlock.TxResults)) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Code, responseFinalizeBlock.TxResults[0].Code) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Data, responseFinalizeBlock.TxResults[0].Data) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Log, responseFinalizeBlock.TxResults[0].Log) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].GasWanted, responseFinalizeBlock.TxResults[0].GasWanted) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].GasUsed, responseFinalizeBlock.TxResults[0].GasUsed) + require.Equal(t, len(legacyABCIResponses.DeliverTxs[0].Events), len(responseFinalizeBlock.TxResults[0].Events)) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Events[0].Type, responseFinalizeBlock.TxResults[0].Events[0].Type) + require.Equal(t, len(legacyABCIResponses.DeliverTxs[0].Events[0].Attributes), len(responseFinalizeBlock.TxResults[0].Events[0].Attributes)) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key, responseFinalizeBlock.TxResults[0].Events[0].Attributes[0].Key) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value, responseFinalizeBlock.TxResults[0].Events[0].Attributes[0].Value) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].Codespace, responseFinalizeBlock.TxResults[0].Codespace) + + require.Equal(t, 2, len(responseFinalizeBlock.Events)) + require.Equal(t, len(legacyABCIResponses.BeginBlock.Events)+len(legacyABCIResponses.EndBlock.Events), len(responseFinalizeBlock.Events)) + + require.Equal(t, legacyABCIResponses.BeginBlock.Events[0].Type, responseFinalizeBlock.Events[0].Type) + require.Equal(t, len(legacyABCIResponses.BeginBlock.Events[0].Attributes)+1, len(responseFinalizeBlock.Events[0].Attributes)) // +1 for inject 'mode' attribute + require.Equal(t, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Key, responseFinalizeBlock.Events[0].Attributes[0].Key) + require.Equal(t, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Value, responseFinalizeBlock.Events[0].Attributes[0].Value) + + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Block.MaxBytes, responseFinalizeBlock.ConsensusParamUpdates.Block.MaxBytes) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Block.MaxGas, responseFinalizeBlock.ConsensusParamUpdates.Block.MaxGas) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks, responseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxAgeDuration, responseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxAgeDuration) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxBytes, responseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxBytes) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Validator.PubKeyTypes, responseFinalizeBlock.ConsensusParamUpdates.Validator.PubKeyTypes) + require.Equal(t, legacyABCIResponses.EndBlock.ConsensusParamUpdates.Version.App, responseFinalizeBlock.ConsensusParamUpdates.Version.App) + + require.Nil(t, responseFinalizeBlock.ConsensusParamUpdates.Abci) + require.Nil(t, responseFinalizeBlock.AppHash) + + require.Equal(t, len(legacyABCIResponses.EndBlock.ValidatorUpdates), len(responseFinalizeBlock.ValidatorUpdates)) + require.Equal(t, legacyABCIResponses.EndBlock.ValidatorUpdates[0].Power, responseFinalizeBlock.ValidatorUpdates[0].Power) + + // skip until an equivalency test is possible + require.Equal(t, legacyABCIResponses.EndBlock.ValidatorUpdates[0].PubKey.GetEd25519(), responseFinalizeBlock.ValidatorUpdates[0].PubKey.GetEd25519()) + + // try with an ABCI Response missing fields + height = int64(2) + legacyABCIResponses = newLegacyABCIResponsesWithNullFields() + require.Equal(t, 1, len(legacyABCIResponses.DeliverTxs)) + require.Equal(t, 1, len(legacyABCIResponses.BeginBlock.Events)) + require.Nil(t, legacyABCIResponses.EndBlock) + err = multiStore.SaveABCIResponses(height, &legacyABCIResponses) + require.NoError(t, err) + responseFinalizeBlock, err = multiStore.LoadFinalizeBlockResponse(height) + require.NoError(t, err) + + require.Equal(t, len(legacyABCIResponses.DeliverTxs), len(responseFinalizeBlock.TxResults)) + require.Equal(t, legacyABCIResponses.DeliverTxs[0].String(), responseFinalizeBlock.TxResults[0].String()) + require.Equal(t, len(legacyABCIResponses.BeginBlock.Events), len(responseFinalizeBlock.Events)) +} + +// Generate a Legacy ABCIResponses with data for all fields. +func newLegacyABCIResponses() cmtstate.LegacyABCIResponses { + eventAttr := abci.EventAttribute{ + Key: "key", + Value: "value", + } + + deliverTxEvent := abci.Event{ + Type: "deliver_tx_event", + Attributes: []abci.EventAttribute{eventAttr}, + } + + endBlockEvent := abci.Event{ + Type: "end_block_event", + Attributes: []abci.EventAttribute{eventAttr}, + } + + beginBlockEvent := abci.Event{ + Type: "begin_block_event", + Attributes: []abci.EventAttribute{eventAttr}, + } + + responseDeliverTx := abci.ExecTxResult{ + Code: abci.CodeTypeOK, + Events: []abci.Event{deliverTxEvent}, + } + + validatorUpdates := []abci.ValidatorUpdate{{ + PubKey: cmtcrypto.PublicKey{Sum: &cmtcrypto.PublicKey_Ed25519{Ed25519: make([]byte, 1)}}, + Power: int64(10), + }} + + consensusParams := &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxBytes: int64(100000), + MaxGas: int64(10000), + }, + Evidence: &cmtproto.EvidenceParams{ + MaxAgeNumBlocks: int64(10), + MaxAgeDuration: time.Duration(1000), + MaxBytes: int64(10000), + }, + Validator: &cmtproto.ValidatorParams{ + PubKeyTypes: []string{"ed25519"}, + }, + Version: &cmtproto.VersionParams{ + App: uint64(10), + }, + } + + // Legacy ABCI Responses + legacyABCIResponses := cmtstate.LegacyABCIResponses{ + DeliverTxs: []*abci.ExecTxResult{ + &responseDeliverTx, + }, + EndBlock: &cmtstate.ResponseEndBlock{ + Events: []abci.Event{endBlockEvent}, + ConsensusParamUpdates: consensusParams, + ValidatorUpdates: validatorUpdates, + }, + BeginBlock: &cmtstate.ResponseBeginBlock{ + Events: []abci.Event{beginBlockEvent}, + }, + } + return legacyABCIResponses +} + +// Generate a Legacy ABCIResponses with null data for some fields. +func newLegacyABCIResponsesWithNullFields() cmtstate.LegacyABCIResponses { + eventAttr := abci.EventAttribute{ + Key: "key", + Value: "value", + } + + deliverTxEvent := abci.Event{ + Type: "deliver_tx_event", + Attributes: []abci.EventAttribute{eventAttr}, + } + + beginBlockEvent := abci.Event{ + Type: "begin_block_event", + Attributes: []abci.EventAttribute{eventAttr}, + } + + responseDeliverTx := abci.ExecTxResult{ + Code: abci.CodeTypeOK, + Events: []abci.Event{deliverTxEvent}, + } + + // Legacy ABCI Responses + legacyABCIResponses := cmtstate.LegacyABCIResponses{ + DeliverTxs: []*abci.ExecTxResult{ + &responseDeliverTx, + }, + BeginBlock: &cmtstate.ResponseBeginBlock{ + Events: []abci.Event{beginBlockEvent}, + }, + } + return legacyABCIResponses +} diff --git a/state/errors.go b/state/errors.go index 91a6565c588..29c9d3c2fb7 100644 --- a/state/errors.go +++ b/state/errors.go @@ -51,6 +51,15 @@ type ( ErrNoABCIResponsesForHeight struct { Height int64 } + + ErrABCIResponseResponseUnmarshalForHeight struct { + Height int64 + } + + ErrABCIResponseCorruptedOrSpecChangeForHeight struct { + Err error + Height int64 + } ) func (e ErrUnknownBlock) Error() string { @@ -103,4 +112,16 @@ func (e ErrNoABCIResponsesForHeight) Error() string { return fmt.Sprintf("could not find results for height #%d", e.Height) } +func (e ErrABCIResponseResponseUnmarshalForHeight) Error() string { + return fmt.Sprintf("could not decode results for height %d", e.Height) +} + +func (e ErrABCIResponseCorruptedOrSpecChangeForHeight) Error() string { + return fmt.Sprintf("failed to unmarshall FinalizeBlockResponse (also tried as legacy ABCI response) for height %d", e.Height) +} + +func (e ErrABCIResponseCorruptedOrSpecChangeForHeight) Unwrap() error { + return e.Err +} + var ErrFinalizeBlockResponsesNotPersisted = errors.New("node is not persisting finalize block responses") diff --git a/state/execution.go b/state/execution.go index 5c4aecdc7b7..4accf1639d6 100644 --- a/state/execution.go +++ b/state/execution.go @@ -94,7 +94,7 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // CreateProposalBlock calls state.MakeBlock with evidence from the evpool // and txs from the mempool. The max bytes must be big enough to fit the commit. -// Up to 1/10th of the block space is allocated for maximum sized evidence. +// The block space is first allocated to outstanding evidence. // The rest is given to txs, up to the max gas. // // Contract: application will not return more bytes than are sent over the wire. @@ -107,14 +107,23 @@ func (blockExec *BlockExecutor) CreateProposalBlock( ) (*types.Block, error) { maxBytes := state.ConsensusParams.Block.MaxBytes + emptyMaxBytes := maxBytes == -1 + if emptyMaxBytes { + maxBytes = int64(types.MaxBlockSizeBytes) + } + maxGas := state.ConsensusParams.Block.MaxGas evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) // Fetch a limited amount of valid txs maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) + maxReapBytes := maxDataBytes + if emptyMaxBytes { + maxReapBytes = -1 + } - txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + txs := blockExec.mempool.ReapMaxBytesMaxGas(maxReapBytes, maxGas) commit := lastExtCommit.ToCommit() block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) rpp, err := blockExec.proxyApp.PrepareProposal( @@ -122,7 +131,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( &abci.RequestPrepareProposal{ MaxTxBytes: maxDataBytes, Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), + LocalLastCommit: buildExtendedCommitInfoFromStore(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), Misbehavior: block.Evidence.Evidence.ToABCI(), Height: block.Height, Time: block.Time, @@ -159,7 +168,7 @@ func (blockExec *BlockExecutor) ProcessProposal( Height: block.Header.Height, Time: block.Header.Time, Txs: block.Data.Txs.ToSliceOfBytes(), - ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + ProposedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), ProposerAddress: block.ProposerAddress, NextValidatorsHash: block.NextValidatorsHash, @@ -186,6 +195,13 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e return blockExec.evpool.CheckEvidence(block.Evidence.Evidence) } +// ApplyVerifiedBlock does the same as `ApplyBlock`, but skips verification. +func (blockExec *BlockExecutor) ApplyVerifiedBlock( + state State, blockID types.BlockID, block *types.Block, +) (State, error) { + return blockExec.applyBlock(state, blockID, block) +} + // ApplyBlock validates the block against the state, executes it against the app, // fires the relevant events, commits the app, and saves the new state and responses. // It returns the new state. @@ -200,15 +216,18 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrInvalidBlock(err) } - commitInfo := buildLastCommitInfo(block, blockExec.store, state.InitialHeight) + return blockExec.applyBlock(state, blockID, block) +} +func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { startTime := time.Now().UnixNano() abciResponse, err := blockExec.proxyApp.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ Hash: block.Hash(), NextValidatorsHash: block.NextValidatorsHash, ProposerAddress: block.ProposerAddress, Height: block.Height, - DecidedLastCommit: commitInfo, + Time: block.Time, + DecidedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), Txs: block.Txs.ToSliceOfBytes(), }) @@ -232,7 +251,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Data.Txs), len(abciResponse.TxResults)) } - blockExec.logger.Info("executed block", "height", block.Height, "app_hash", abciResponse.AppHash) + blockExec.logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", abciResponse.AppHash)) fail.Fail() // XXX @@ -254,7 +273,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, err } if len(validatorUpdates) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Info("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) blockExec.metrics.ValidatorSetUpdates.Add(1) } if abciResponse.ConsensusParamUpdates != nil { @@ -303,10 +322,28 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, nil } -func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote) ([]byte, error) { +func (blockExec *BlockExecutor) ExtendVote( + ctx context.Context, + vote *types.Vote, + block *types.Block, + state State, +) ([]byte, error) { + if !block.HashesTo(vote.BlockID.Hash) { + panic(fmt.Sprintf("vote's hash does not match the block it is referring to %X!=%X", block.Hash(), vote.BlockID.Hash)) + } + if vote.Height != block.Height { + panic(fmt.Sprintf("vote's and block's heights do not match %d!=%d", block.Height, vote.Height)) + } + req := abci.RequestExtendVote{ - Hash: vote.BlockID.Hash, - Height: vote.Height, + Hash: vote.BlockID.Hash, + Height: vote.Height, + Time: block.Time, + Txs: block.Txs.ToSliceOfBytes(), + ProposedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), + Misbehavior: block.Evidence.Evidence.ToABCI(), + NextValidatorsHash: block.NextValidatorsHash, + ProposerAddress: block.ProposerAddress, } resp, err := blockExec.proxyApp.ExtendVote(ctx, &req) @@ -392,8 +429,8 @@ func (blockExec *BlockExecutor) Commit( //--------------------------------------------------------- // Helper functions for executing blocks and updating state -func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { - if block.Height == initialHeight { +func buildLastCommitInfoFromStore(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { + if block.Height == initialHeight { // check for initial height before loading validators // there is no last commit for the initial height. // return an empty value. return abci.CommitInfo{} @@ -404,6 +441,19 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a panic(fmt.Errorf("failed to load validator set at height %d: %w", block.Height-1, err)) } + return BuildLastCommitInfo(block, lastValSet, initialHeight) +} + +// BuildLastCommitInfo builds a CommitInfo from the given block and validator set. +// If you want to load the validator set from the store instead of providing it, +// use buildLastCommitInfoFromStore. +func BuildLastCommitInfo(block *types.Block, lastValSet *types.ValidatorSet, initialHeight int64) abci.CommitInfo { + if block.Height == initialHeight { + // there is no last commit for the initial height. + // return an empty value. + return abci.CommitInfo{} + } + var ( commitSize = block.LastCommit.Size() valSetLen = len(lastValSet.Validators) @@ -433,7 +483,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a } } -// buildExtendedCommitInfo populates an ABCI extended commit from the +// buildExtendedCommitInfoFromStore populates an ABCI extended commit from the // corresponding CometBFT extended commit ec, using the stored validator set // from ec. It requires ec to include the original precommit votes along with // the vote extensions from the last commit. @@ -442,7 +492,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a // data, it returns an empty record. // // Assumes that the commit signatures are sorted according to validator index. -func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { +func buildExtendedCommitInfoFromStore(ec *types.ExtendedCommit, store Store, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { if ec.Height < initialHeight { // There are no extended commits for heights below the initial height. return abci.ExtendedCommitInfo{} @@ -453,6 +503,18 @@ func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeigh panic(fmt.Errorf("failed to load validator set at height %d, initial height %d: %w", ec.Height, initialHeight, err)) } + return BuildExtendedCommitInfo(ec, valSet, initialHeight, ap) +} + +// BuildExtendedCommitInfo builds an ExtendedCommitInfo from the given block and validator set. +// If you want to load the validator set from the store instead of providing it, +// use buildExtendedCommitInfoFromStore. +func BuildExtendedCommitInfo(ec *types.ExtendedCommit, valSet *types.ValidatorSet, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { + if ec.Height < initialHeight { + // There are no extended commits for heights below the initial height. + return abci.ExtendedCommitInfo{} + } + var ( ecSize = ec.Size() valSetLen = len(valSet.Validators) @@ -673,13 +735,14 @@ func ExecCommitBlock( store Store, initialHeight int64, ) ([]byte, error) { - commitInfo := buildLastCommitInfo(block, store, initialHeight) + commitInfo := buildLastCommitInfoFromStore(block, store, initialHeight) resp, err := appConnConsensus.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ Hash: block.Hash(), NextValidatorsHash: block.NextValidatorsHash, ProposerAddress: block.ProposerAddress, Height: block.Height, + Time: block.Time, DecidedLastCommit: commitInfo, Misbehavior: block.Evidence.Evidence.ToABCI(), Txs: block.Txs.ToSliceOfBytes(), @@ -694,7 +757,7 @@ func ExecCommitBlock( return nil, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Data.Txs), len(resp.TxResults)) } - logger.Info("executed block", "height", block.Height, "app_hash", resp.AppHash) + logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", resp.AppHash)) // Commit block _, err = appConnConsensus.Commit(context.TODO()) diff --git a/state/execution_test.go b/state/execution_test.go index a48954c56c3..797bd8a5778 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -36,7 +36,7 @@ import ( var ( chainID = "execution_chain" - testPartSize uint32 = 65536 + testPartSize uint32 = types.BlockPartSizeBytes ) func TestApplyBlock(t *testing.T) { @@ -85,6 +85,7 @@ func TestApplyBlock(t *testing.T) { // block. func TestFinalizeBlockDecidedLastCommit(t *testing.T) { app := &testApp{} + baseTime := time.Now() cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() @@ -146,6 +147,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} _, err = blockExec.ApplyBlock(state, blockID, block) require.NoError(t, err) + require.True(t, app.LastTime.After(baseTime)) // -> app receives a list of validators with a bool indicating if they signed for i, v := range app.CommitVotes { @@ -204,10 +206,11 @@ func TestFinalizeBlockValidators(t *testing.T) { desc string lastCommitSigs []types.ExtendedCommitSig expectedAbsentValidators []int + shouldHaveTime bool }{ - {"none absent", []types.ExtendedCommitSig{commitSig0, commitSig1}, []int{}}, - {"one absent", []types.ExtendedCommitSig{commitSig0, absentSig}, []int{1}}, - {"multiple absent", []types.ExtendedCommitSig{absentSig, absentSig}, []int{0, 1}}, + {"none absent", []types.ExtendedCommitSig{commitSig0, commitSig1}, []int{}, true}, + {"one absent", []types.ExtendedCommitSig{commitSig0, absentSig}, []int{1}, true}, + {"multiple absent", []types.ExtendedCommitSig{absentSig, absentSig}, []int{0, 1}, false}, } for _, tc := range testCases { @@ -221,7 +224,12 @@ func TestFinalizeBlockValidators(t *testing.T) { block := makeBlock(state, 2, lastCommit.ToCommit()) _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) - require.Nil(t, err, tc.desc) + require.NoError(t, err, tc.desc) + require.True(t, + !tc.shouldHaveTime || + app.LastTime.Equal(now) || app.LastTime.After(now), + "'last_time' should be at or after 'now'; tc %v, last_time %v, now %v", tc.desc, app.LastTime, now, + ) // -> app receives a list of validators with a bool indicating if they signed ctr := 0 @@ -899,6 +907,63 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { mp.AssertExpectations(t) } +// TestPrepareProposalCountSerializationOverhead tests that the block creation logic returns +// an error if the ResponsePrepareProposal returned from the application is at the limit of +// its size and will go beyond the limit upon serialization. +func TestPrepareProposalCountSerializationOverhead(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + state, stateDB, privVals := makeState(1, height) + // limit max block size + var bytesPerTx int64 = 4 + const nValidators = 1 + nonDataSize := 5000 - types.MaxDataBytes(5000, 0, nValidators) + state.ConsensusParams.Block.MaxBytes = bytesPerTx*1024 + nonDataSize + maxDataBytes := types.MaxDataBytes(state.ConsensusParams.Block.MaxBytes, 0, nValidators) + + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := test.MakeNTxs(height, maxDataBytes/bytesPerTx) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs) + + app := &abcimocks.Application{} + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + Txs: txs.ToSliceOfBytes(), + }, nil) + + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + blockStore := store.NewBlockStore(dbm.NewMemDB()) + blockExec := sm.NewBlockExecutor( + stateStore, + log.NewNopLogger(), + proxyApp.Consensus(), + mp, + evpool, + blockStore, + ) + pa, _ := state.Validators.GetByIndex(0) + commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + require.NoError(t, err) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) + require.Nil(t, block) + require.ErrorContains(t, err, "transaction data size exceeds maximum") + + mp.AssertExpectations(t) +} + // TestPrepareProposalErrorOnPrepareProposalError tests when the client returns an error // upon calling PrepareProposal on it. func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { diff --git a/state/export_test.go b/state/export_test.go index 24d76adb0f6..62d51e19769 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -42,5 +42,22 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.V // store.go, exported exclusively and explicitly for testing. func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}} - return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) + batch := stateStore.db.NewBatch() + err := stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet, batch) + if err != nil { + return err + } + err = batch.WriteSync() + if err != nil { + return err + } + return nil +} + +func Int64ToBytes(val int64) []byte { + return int64ToBytes(val) +} + +func Int64FromBytes(val []byte) int64 { + return int64FromBytes(val) } diff --git a/state/helpers_test.go b/state/helpers_test.go index f0de48bbadc..f094b79ab85 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "testing" "time" dbm "github.com/cometbft/cometbft-db" @@ -169,7 +168,6 @@ func genValSet(size int) *types.ValidatorSet { } func makeHeaderPartsResponsesValPubKeyChange( - t *testing.T, state sm.State, pubkey crypto.PubKey, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { @@ -188,7 +186,6 @@ func makeHeaderPartsResponsesValPubKeyChange( } func makeHeaderPartsResponsesValPowerChange( - t *testing.T, state sm.State, power int64, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { @@ -207,7 +204,6 @@ func makeHeaderPartsResponsesValPowerChange( } func makeHeaderPartsResponsesParams( - t *testing.T, state sm.State, params cmtproto.ConsensusParams, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { @@ -242,6 +238,7 @@ type testApp struct { CommitVotes []abci.VoteInfo Misbehavior []abci.Misbehavior + LastTime time.Time ValidatorUpdates []abci.ValidatorUpdate AppHash []byte } @@ -251,6 +248,7 @@ var _ abci.Application = (*testApp)(nil) func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.CommitVotes = req.DecidedLastCommit.Votes app.Misbehavior = req.Misbehavior + app.LastTime = req.Time txResults := make([]*abci.ExecTxResult, len(req.Txs)) for idx := range req.Txs { txResults[idx] = &abci.ExecTxResult{ diff --git a/state/indexer/block.go b/state/indexer/block.go index b79d66f9a3c..4044d2aaa7b 100644 --- a/state/indexer/block.go +++ b/state/indexer/block.go @@ -3,6 +3,7 @@ package indexer import ( "context" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/types" ) @@ -21,4 +22,6 @@ type BlockIndexer interface { // Search performs a query for block heights that match a given FinalizeBlock // event search criteria. Search(ctx context.Context, q *query.Query) ([]int64, error) + + SetLogger(l log.Logger) } diff --git a/state/indexer/block/indexer.go b/state/indexer/block/indexer.go index b489e022daf..0af185e9f50 100644 --- a/state/indexer/block/indexer.go +++ b/state/indexer/block/indexer.go @@ -16,32 +16,42 @@ import ( "github.com/cometbft/cometbft/state/txindex/null" ) -// EventSinksFromConfig constructs a slice of indexer.EventSink using the provided +// IndexerFromConfig constructs a slice of indexer.EventSink using the provided // configuration. -// -//nolint:lll -func IndexerFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) (txindex.TxIndexer, indexer.BlockIndexer, error) { +func IndexerFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) ( + txIdx txindex.TxIndexer, blockIdx indexer.BlockIndexer, err error, +) { + txidx, blkidx, _, err := IndexerFromConfigWithDisabledIndexers(cfg, dbProvider, chainID) + return txidx, blkidx, err +} + +// IndexerFromConfigWithDisabledIndexers constructs a slice of indexer.EventSink using the provided +// configuration. If all indexers are disabled in the configuration, it returns null indexers. +// Otherwise, it creates the appropriate indexers based on the configuration. +func IndexerFromConfigWithDisabledIndexers(cfg *config.Config, dbProvider config.DBProvider, chainID string) ( + txIdx txindex.TxIndexer, blockIdx indexer.BlockIndexer, allIndexersDisabled bool, err error, +) { switch cfg.TxIndex.Indexer { case "kv": store, err := dbProvider(&config.DBContext{ID: "tx_index", Config: cfg}) if err != nil { - return nil, nil, err + return nil, nil, false, err } - return kv.NewTxIndex(store), blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))), nil + return kv.NewTxIndex(store), blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))), false, nil case "psql": conn := cfg.TxIndex.PsqlConn if conn == "" { - return nil, nil, errors.New("the psql connection settings cannot be empty") + return nil, nil, false, errors.New("the psql connection settings cannot be empty") } es, err := psql.NewEventSink(cfg.TxIndex.PsqlConn, chainID) if err != nil { - return nil, nil, fmt.Errorf("creating psql indexer: %w", err) + return nil, nil, false, fmt.Errorf("creating psql indexer: %w", err) } - return es.TxIndexer(), es.BlockIndexer(), nil + return es.TxIndexer(), es.BlockIndexer(), false, nil default: - return &null.TxIndex{}, &blockidxnull.BlockerIndexer{}, nil + return &null.TxIndex{}, &blockidxnull.BlockerIndexer{}, true, nil } } diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 6128bd37fc5..3840e053a33 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "math/big" "sort" "strconv" "strings" @@ -14,6 +15,8 @@ import ( dbm "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" + idxutil "github.com/cometbft/cometbft/internal/indexer" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" "github.com/cometbft/cometbft/state/indexer" @@ -31,6 +34,7 @@ type BlockerIndexer struct { // Add unique event identifier to use when querying // Matching will be done both on height AND eventSeq eventSeq int64 + log log.Logger } func New(store dbm.DB) *BlockerIndexer { @@ -39,6 +43,10 @@ func New(store dbm.DB) *BlockerIndexer { } } +func (idx *BlockerIndexer) SetLogger(l log.Logger) { + idx.log = l +} + // Has returns true if the given height has been indexed. An error is returned // upon database query failure. func (idx *BlockerIndexer) Has(height int64) (bool, error) { @@ -71,7 +79,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockEvents) error { } // 2. index block events - if err := idx.indexEvents(batch, bh.Events, "finalize_block", height); err != nil { + if err := idx.indexEvents(batch, bh.Events, height); err != nil { return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } @@ -213,6 +221,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, // fetch matching heights results = make([]int64, 0, len(filteredHeights)) resultMap := make(map[int64]struct{}) + +FOR_LOOP: for _, hBz := range filteredHeights { h := int64FromBytes(hBz) @@ -229,7 +239,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, select { case <-ctx.Done(): - break + break FOR_LOOP default: } @@ -285,27 +295,54 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(eventValue, 10, 64) - if err != nil { - continue LOOP + if _, ok := qr.AnyBound().(*big.Float); ok { + v := new(big.Int) + v, ok := v.SetString(eventValue, 10) + var vF *big.Float + if !ok { + // The precision here is 125. For numbers bigger than this, the value + // will not be parsed properly + vF, _, err = big.ParseFloat(eventValue, 10, 125, big.ToNearestEven) + if err != nil { + continue LOOP + } } if qr.Key != types.BlockHeightKey { keyHeight, err := parseHeightFromEventKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + if err != nil { + idx.log.Error("failure to parse height from key:", err) + continue LOOP + } + withinHeight, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + idx.log.Error("failure checking for height bounds:", err) + continue LOOP + } + if !withinHeight { continue LOOP } } - if checkBounds(qr, v) { - idx.setTmpHeights(tmpHeights, it) + + var withinBounds bool + var err error + if !ok { + withinBounds, err = idxutil.CheckBounds(qr, vF) + } else { + withinBounds, err = idxutil.CheckBounds(qr, v) + } + if err != nil { + idx.log.Error("failed to parse bounds:", err) + } else { + if withinBounds { + idx.setTmpHeights(tmpHeights, it) + } } } select { case <-ctx.Done(): - break - + break LOOP default: } } @@ -327,6 +364,7 @@ LOOP: // Remove/reduce matches in filteredHashes that were not found in this // match (tmpHashes). +FOR_LOOP: for k, v := range filteredHeights { tmpHeight := tmpHeights[k] @@ -337,8 +375,7 @@ LOOP: select { case <-ctx.Done(): - break - + break FOR_LOOP default: } } @@ -356,21 +393,6 @@ func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.It } -func checkBounds(ranges indexer.QueryRange, v int64) bool { - include := true - lowerBound := ranges.LowerBoundValue() - upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { - include = false - } - - if upperBound != nil && v > upperBound.(int64) { - include = false - } - - return include -} - // match returns all matching heights that meet a given query condition and start // key. An already filtered result (filteredHeights) is provided such that any // non-intersecting matches are removed. @@ -404,7 +426,16 @@ func (idx *BlockerIndexer) match( for ; it.Valid(); it.Next() { keyHeight, err := parseHeightFromEventKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + if err != nil { + idx.log.Error("failure to parse height from key:", err) + continue + } + withinHeight, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + idx.log.Error("failure checking for height bounds:", err) + continue + } + if !withinHeight { continue } @@ -431,16 +462,28 @@ func (idx *BlockerIndexer) match( } defer it.Close() + LOOP_EXISTS: for ; it.Valid(); it.Next() { + keyHeight, err := parseHeightFromEventKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + if err != nil { + idx.log.Error("failure to parse height from key:", err) + continue + } + withinHeight, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + idx.log.Error("failure checking for height bounds:", err) + continue + } + if !withinHeight { continue } + idx.setTmpHeights(tmpHeights, it) select { case <-ctx.Done(): - break + break LOOP_EXISTS default: } @@ -462,6 +505,7 @@ func (idx *BlockerIndexer) match( } defer it.Close() + LOOP_CONTAINS: for ; it.Valid(); it.Next() { eventValue, err := parseValueFromEventKey(it.Key()) if err != nil { @@ -470,7 +514,16 @@ func (idx *BlockerIndexer) match( if strings.Contains(eventValue, c.Arg.Value()) { keyHeight, err := parseHeightFromEventKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + if err != nil { + idx.log.Error("failure to parse height from key:", err) + continue + } + withinHeight, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + idx.log.Error("failure checking for height bounds:", err) + continue + } + if !withinHeight { continue } idx.setTmpHeights(tmpHeights, it) @@ -478,7 +531,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break + break LOOP_CONTAINS default: } @@ -504,6 +557,7 @@ func (idx *BlockerIndexer) match( // Remove/reduce matches in filteredHeights that were not found in this // match (tmpHeights). +FOR_LOOP: for k, v := range filteredHeights { tmpHeight := tmpHeights[k] if tmpHeight == nil || !bytes.Equal(tmpHeight, v) { @@ -511,7 +565,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break + break FOR_LOOP default: } @@ -521,7 +575,7 @@ func (idx *BlockerIndexer) match( return filteredHeights, nil } -func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ string, height int64) error { +func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, height int64) error { heightBz := int64ToBytes(height) for _, event := range events { @@ -543,7 +597,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ } if attr.GetIndex() { - key, err := eventKey(compositeKey, typ, attr.Value, height, idx.eventSeq) + key, err := eventKey(compositeKey, attr.Value, height, idx.eventSeq) if err != nil { return fmt.Errorf("failed to create block index key: %w", err) } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index e28cf6ec02a..2f4e3f085ac 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -229,26 +229,14 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustCompile("block.height = 1"), results: []int64{1}, }, - "query return all events from a height - exact - no match.events": { - q: query.MustCompile("block.height = 1"), - results: []int64{1}, - }, "query return all events from a height - exact (deduplicate height)": { q: query.MustCompile("block.height = 1 AND block.height = 2"), results: []int64{1}, }, - "query return all events from a height - exact (deduplicate height) - no match.events": { - q: query.MustCompile("block.height = 1 AND block.height = 2"), - results: []int64{1}, - }, "query return all events from a height - range": { q: query.MustCompile("block.height < 2 AND block.height > 0 AND block.height > 0"), results: []int64{1}, }, - "query return all events from a height - range - no match.events": { - q: query.MustCompile("block.height < 2 AND block.height > 0 AND block.height > 0"), - results: []int64{1}, - }, "query return all events from a height - range 2": { q: query.MustCompile("block.height < 3 AND block.height < 2 AND block.height > 0 AND block.height > 0"), results: []int64{1}, @@ -261,10 +249,6 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustCompile("end_event.bar < 300 AND end_event.foo = 100 AND block.height > 0 AND block.height <= 2"), results: []int64{1, 2}, }, - "query matches fields from same event - no match.events": { - q: query.MustCompile("end_event.bar < 300 AND end_event.foo = 100 AND block.height > 0 AND block.height <= 2"), - results: []int64{1, 2}, - }, "query matches fields from multiple events": { q: query.MustCompile("end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2"), results: []int64{}, @@ -281,7 +265,7 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustCompile("block.height = 2 AND end_event.foo < 300"), results: []int64{2}, }, - "deduplication test - match.events multiple 2": { + "match attributes across events with height constraint": { q: query.MustCompile("end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2"), results: []int64{}, }, @@ -293,10 +277,6 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustCompile("end_event.bar > 100 AND end_event.bar <= 500"), results: []int64{1, 2}, }, - "query matches all fields from multiple events - no match.events": { - q: query.MustCompile("end_event.bar > 100 AND end_event.bar <= 500"), - results: []int64{1, 2}, - }, "query with height range and height equality - should ignore equality": { q: query.MustCompile("block.height = 2 AND end_event.foo >= 100 AND block.height < 2"), results: []int64{1}, @@ -320,3 +300,140 @@ func TestBlockIndexerMulti(t *testing.T) { }) } } + +func TestBigInt(t *testing.T) { + + bigInt := "10000000000000000000" + bigFloat := bigInt + ".76" + bigFloatLower := bigInt + ".1" + bigIntSmaller := "9999999999999999999" + store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + indexer := blockidxkv.New(store) + + require.NoError(t, indexer.Index(types.EventDataNewBlockEvents{ + Height: 1, + Events: []abci.Event{ + {}, + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: "100", + Index: true, + }, + { + Key: "bar", + Value: bigFloat, + Index: true, + }, + { + Key: "bar_lower", + Value: bigFloatLower, + Index: true, + }, + }, + }, + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: bigInt, + Index: true, + }, + { + Key: "bar", + Value: "500", + Index: true, + }, + { + Key: "bla", + Value: "500.5", + Index: true, + }, + }, + }, + }, + }, + )) + + testCases := map[string]struct { + q *query.Query + results []int64 + }{ + + "query return all events from a height - exact": { + q: query.MustCompile("block.height = 1"), + results: []int64{1}, + }, + "query return all events from a height - exact (deduplicate height)": { + q: query.MustCompile("block.height = 1 AND block.height = 2"), + results: []int64{1}, + }, + "query return all events from a height - range": { + q: query.MustCompile("block.height < 2 AND block.height > 0 AND block.height > 0"), + results: []int64{1}, + }, + "query matches fields with big int and height - no match": { + q: query.MustCompile("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int with less and height - no match": { + q: query.MustCompile("end_event.foo <= " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int and height - match": { + q: query.MustCompile("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 1"), + results: []int64{1}, + }, + "query matches big int in range": { + q: query.MustCompile("end_event.foo = " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with equality ": { + q: query.MustCompile("end_event.bar >= " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float ": { + q: query.MustCompile("end_event.bar > " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float lower dec point ": { + q: query.MustCompile("end_event.bar_lower > " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with less - found": { + q: query.MustCompile("end_event.foo <= " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with less with height range - found": { + q: query.MustCompile("end_event.foo <= " + bigInt + " AND block.height > 0"), + results: []int64{1}, + }, + "query matches big int in range with float with less - not found": { + q: query.MustCompile("end_event.foo < " + bigInt + " AND end_event.foo > 100"), + results: []int64{}, + }, + "query does not parse float": { + q: query.MustCompile("end_event.bla >= 500"), + results: []int64{1}, + }, + "query condition float": { + q: query.MustCompile("end_event.bla < " + bigFloat), + results: []int64{1}, + }, + "query condition big int plus one": { + q: query.MustCompile("end_event.foo > " + bigIntSmaller), + results: []int64{1}, + }, + } + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + results, err := indexer.Search(context.Background(), tc.q) + require.NoError(t, err) + require.Equal(t, tc.results, results) + }) + } +} diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 9ccb84720fa..a20a0fd81e7 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -3,10 +3,12 @@ package kv import ( "encoding/binary" "fmt" + "math/big" "strconv" "github.com/google/orderedcode" + idxutil "github.com/cometbft/cometbft/internal/indexer" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/types" @@ -49,13 +51,12 @@ func heightKey(height int64) ([]byte, error) { ) } -func eventKey(compositeKey, typ, eventValue string, height int64, eventSeq int64) ([]byte, error) { +func eventKey(compositeKey, eventValue string, height int64, eventSeq int64) ([]byte, error) { return orderedcode.Append( nil, compositeKey, eventValue, height, - typ, eventSeq, ) } @@ -80,11 +81,11 @@ func parseValueFromPrimaryKey(key []byte) (string, error) { func parseValueFromEventKey(key []byte) (string, error) { var ( - compositeKey, typ, eventValue string - height int64 + compositeKey, eventValue string + height int64 ) - _, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ) + _, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height) if err != nil { return "", fmt.Errorf("failed to parse event key: %w", err) } @@ -94,11 +95,11 @@ func parseValueFromEventKey(key []byte) (string, error) { func parseHeightFromEventKey(key []byte) (int64, error) { var ( - compositeKey, typ, eventValue string - height int64 + compositeKey, eventValue string + height int64 ) - _, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ) + _, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height) if err != nil { return -1, fmt.Errorf("failed to parse event key: %w", err) } @@ -108,27 +109,41 @@ func parseHeightFromEventKey(key []byte) (int64, error) { func parseEventSeqFromEventKey(key []byte) (int64, error) { var ( - compositeKey, typ, eventValue string - height int64 - eventSeq int64 + compositeKey, eventValue string + height int64 + eventSeq int64 ) - remaining, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &typ) + remaining, err := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height) if err != nil { - return 0, fmt.Errorf("failed to parse event key: %w", err) + return 0, fmt.Errorf("failed to parse event sequence: %w", err) } - // This is done to support previous versions that did not have event sequence in their key - if len(remaining) != 0 { - remaining, err = orderedcode.Parse(remaining, &eventSeq) - if err != nil { - return 0, fmt.Errorf("failed to parse event key: %w", err) + // We either have an event sequence or a function type (potentially) followed by an event sequence. + // Potential scenarios: + // 1. Events indexed with v0.38.x and later, will only have an event sequence + // 2. Events indexed between v0.34.27 and v0.37.x will have a function type and an event sequence + // 3. Events indexed before v0.34.27 will only have a function type + // function_type = 'being_block_event' | 'end_block_event' + + if len(remaining) == 0 { // The event was not properly indexed + return 0, fmt.Errorf("failed to parse event sequence, invalid event format") + } + var typ string + remaining2, err := orderedcode.Parse(remaining, &typ) // Check if we have scenarios 2. or 3. (described above). + if err != nil { // If it cannot parse the event function type, it could be 1. + remaining, err2 := orderedcode.Parse(string(key), &compositeKey, &eventValue, &height, &eventSeq) + if err2 != nil || len(remaining) != 0 { // We should not have anything else after the eventSeq. + return 0, fmt.Errorf("failed to parse event sequence: %w; and %w", err, err2) } - if len(remaining) != 0 { - return 0, fmt.Errorf("unexpected remainder in key: %s", remaining) + } else if len(remaining2) != 0 { // Are we in case 2 or 3 + remaining, err2 := orderedcode.Parse(remaining2, &eventSeq) // the event follows the scenario in 2., + // retrieve the eventSeq + // there should be no error + if err2 != nil || len(remaining) != 0 { // We should not have anything else after the eventSeq if in 2. + return 0, fmt.Errorf("failed to parse event sequence: %w", err2) } } - return eventSeq, nil } @@ -147,10 +162,12 @@ func dedupHeight(conditions []syntax.Condition) (dedupConditions []syntax.Condit if c.Op == syntax.TEq { if found || heightRangeExists { continue - } else { + } + hFloat := c.Arg.Number() + if hFloat != nil { + h, _ := hFloat.Int64() + heightInfo.height = h heightCondition = append(heightCondition, c) - heightInfo.height = int64(c.Arg.Number()) - found = true } } else { @@ -179,15 +196,16 @@ func dedupHeight(conditions []syntax.Condition) (dedupConditions []syntax.Condit return dedupConditions, heightInfo, found } -func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { +func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) (bool, error) { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { - return false + withinBounds, err := idxutil.CheckBounds(heightInfo.heightRange, big.NewInt(keyHeight)) + if err != nil || !withinBounds { + return false, err } } else { if heightInfo.height != 0 && keyHeight != heightInfo.height { - return false + return false, nil } } - return true + return true, nil } diff --git a/state/indexer/block/null/null.go b/state/indexer/block/null/null.go index 2af842c74a8..0a62a4273e0 100644 --- a/state/indexer/block/null/null.go +++ b/state/indexer/block/null/null.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/types" @@ -14,7 +15,7 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // TxIndex implements a no-op block indexer. type BlockerIndexer struct{} -func (idx *BlockerIndexer) Has(height int64) (bool, error) { +func (idx *BlockerIndexer) Has(int64) (bool, error) { return false, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } @@ -22,6 +23,9 @@ func (idx *BlockerIndexer) Index(types.EventDataNewBlockEvents) error { return nil } -func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { +func (idx *BlockerIndexer) Search(context.Context, *query.Query) ([]int64, error) { return []int64{}, nil } + +func (idx *BlockerIndexer) SetLogger(log.Logger) { +} diff --git a/state/indexer/mocks/block_indexer.go b/state/indexer/mocks/block_indexer.go index c17a6532247..04144883e74 100644 --- a/state/indexer/mocks/block_indexer.go +++ b/state/indexer/mocks/block_indexer.go @@ -5,6 +5,8 @@ package mocks import ( context "context" + log "github.com/cometbft/cometbft/libs/log" + mock "github.com/stretchr/testify/mock" query "github.com/cometbft/cometbft/libs/pubsub/query" @@ -21,6 +23,10 @@ type BlockIndexer struct { func (_m *BlockIndexer) Has(height int64) (bool, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(int64) (bool, error)); ok { @@ -45,6 +51,10 @@ func (_m *BlockIndexer) Has(height int64) (bool, error) { func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockEvents) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataNewBlockEvents) error); ok { r0 = rf(_a0) @@ -59,6 +69,10 @@ func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockEvents) error { func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { ret := _m.Called(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Search") + } + var r0 []int64 var r1 error if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]int64, error)); ok { @@ -81,13 +95,17 @@ func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, er return r0, r1 } -type mockConstructorTestingTNewBlockIndexer interface { - mock.TestingT - Cleanup(func()) +// SetLogger provides a mock function with given fields: l +func (_m *BlockIndexer) SetLogger(l log.Logger) { + _m.Called(l) } // NewBlockIndexer creates a new instance of BlockIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockIndexer(t mockConstructorTestingTNewBlockIndexer) *BlockIndexer { +// The first argument is typically a *testing.T value. +func NewBlockIndexer(t interface { + mock.TestingT + Cleanup(func()) +}) *BlockIndexer { mock := &BlockIndexer{} mock.Mock.Test(t) diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index e3cfdc6fda0..eb85b9bfee8 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -1,6 +1,7 @@ package indexer import ( + "math/big" "time" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" @@ -44,7 +45,17 @@ func (qr QueryRange) LowerBoundValue() interface{} { switch t := qr.LowerBound.(type) { case int64: return t + 1 - + case *big.Int: + tmp := new(big.Int) + return tmp.Add(t, big.NewInt(1)) + + case *big.Float: + // For floats we cannot simply add one as the float to float + // comparison is more finegrained. + // When comparing to integers, adding one is also incorrect: + // example: x >100.2 ; x = 101 float increased to 101.2 and condition + // is not satisfied + return t case time.Time: return t.Unix() + 1 @@ -67,7 +78,11 @@ func (qr QueryRange) UpperBoundValue() interface{} { switch t := qr.UpperBound.(type) { case int64: return t - 1 - + case *big.Int: + tmp := new(big.Int) + return tmp.Sub(t, big.NewInt(1)) + case *big.Float: + return t case time.Time: return t.Unix() - 1 @@ -81,14 +96,13 @@ func (qr QueryRange) UpperBoundValue() interface{} { func LookForRangesWithHeight(conditions []syntax.Condition) (queryRange QueryRanges, indexes []int, heightRange QueryRange) { queryRange = make(QueryRanges) for i, c := range conditions { - heightKey := false if IsRangeOperation(c.Op) { + heightKey := c.Tag == types.BlockHeightKey || c.Tag == types.TxHeightKey r, ok := queryRange[c.Tag] if !ok { r = QueryRange{Key: c.Tag} if c.Tag == types.BlockHeightKey || c.Tag == types.TxHeightKey { heightRange = QueryRange{Key: c.Tag} - heightKey = true } } @@ -182,7 +196,7 @@ func conditionArg(c syntax.Condition) interface{} { } switch c.Arg.Type { case syntax.TNumber: - return int64(c.Arg.Number()) + return c.Arg.Number() case syntax.TTime, syntax.TDate: return c.Arg.Time() default: diff --git a/state/indexer/sink/psql/backport.go b/state/indexer/sink/psql/backport.go index 429cf03d310..81a59ce1ffb 100644 --- a/state/indexer/sink/psql/backport.go +++ b/state/indexer/sink/psql/backport.go @@ -17,6 +17,8 @@ import ( "context" "errors" + "github.com/cometbft/cometbft/libs/log" + abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/txindex" @@ -58,6 +60,8 @@ func (BackportTxIndexer) Search(context.Context, *query.Query) ([]*abci.TxResult return nil, errors.New("the TxIndexer.Search method is not supported") } +func (BackportTxIndexer) SetLogger(log.Logger) {} + // BlockIndexer returns a bridge that implements the CometBFT v0.34 block // indexer interface, using the Postgres event sink as a backing store. func (es *EventSink) BlockIndexer() BackportBlockIndexer { @@ -70,7 +74,7 @@ type BackportBlockIndexer struct{ psql *EventSink } // Has is implemented to satisfy the BlockIndexer interface, but it is not // supported by the psql event sink and reports an error for all inputs. -func (BackportBlockIndexer) Has(height int64) (bool, error) { +func (BackportBlockIndexer) Has(_ int64) (bool, error) { return false, errors.New("the BlockIndexer.Has method is not supported") } @@ -85,3 +89,5 @@ func (b BackportBlockIndexer) Index(block types.EventDataNewBlockEvents) error { func (BackportBlockIndexer) Search(context.Context, *query.Query) ([]int64, error) { return nil, errors.New("the BlockIndexer.Search method is not supported") } + +func (BackportBlockIndexer) SetLogger(log.Logger) {} diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index 79c641b0790..e383c7aa289 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -90,6 +90,18 @@ func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { txIDArg = txID } + const ( + insertEventQuery = ` + INSERT INTO ` + tableEvents + ` (block_id, tx_id, type) + VALUES ($1, $2, $3) + RETURNING rowid; + ` + insertAttributeQuery = ` + INSERT INTO ` + tableAttributes + ` (event_id, key, composite_key, value) + VALUES ($1, $2, $3, $4); + ` + ) + // Add each event to the events table, and retrieve its row ID to use when // adding any attributes the event provides. for _, evt := range evts { @@ -98,10 +110,7 @@ func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { continue } - eid, err := queryWithID(dbtx, ` -INSERT INTO `+tableEvents+` (block_id, tx_id, type) VALUES ($1, $2, $3) - RETURNING rowid; -`, blockID, txIDArg, evt.Type) + eid, err := queryWithID(dbtx, insertEventQuery, blockID, txIDArg, evt.Type) if err != nil { return err } @@ -112,10 +121,7 @@ INSERT INTO `+tableEvents+` (block_id, tx_id, type) VALUES ($1, $2, $3) continue } compositeKey := evt.Type + "." + attr.Key - if _, err := dbtx.Exec(` -INSERT INTO `+tableAttributes+` (event_id, key, composite_key, value) - VALUES ($1, $2, $3, $4); -`, eid, attr.Key, compositeKey, attr.Value); err != nil { + if _, err := dbtx.Exec(insertAttributeQuery, eid, attr.Key, compositeKey, attr.Value); err != nil { return err } } @@ -219,7 +225,6 @@ INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) return fmt.Errorf("indexing transaction events: %w", err) } return nil - }); err != nil { return err } @@ -228,22 +233,22 @@ INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) } // SearchBlockEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { +func (es *EventSink) SearchBlockEvents(_ context.Context, _ *query.Query) ([]int64, error) { return nil, errors.New("block search is not supported via the postgres event sink") } // SearchTxEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (es *EventSink) SearchTxEvents(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { return nil, errors.New("tx search is not supported via the postgres event sink") } // GetTxByHash is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { +func (es *EventSink) GetTxByHash(_ []byte) (*abci.TxResult, error) { return nil, errors.New("getTxByHash is not supported via the postgres event sink") } // HasBlock is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) HasBlock(h int64) (bool, error) { +func (es *EventSink) HasBlock(_ int64) (bool, error) { return false, errors.New("hasBlock is not supported via the postgres event sink") } diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index d7467eeb708..daae2d09fc7 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -18,6 +18,10 @@ type BlockStore struct { func (_m *BlockStore) Base() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Base") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -32,6 +36,10 @@ func (_m *BlockStore) Base() int64 { func (_m *BlockStore) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -46,6 +54,10 @@ func (_m *BlockStore) Close() error { func (_m *BlockStore) DeleteLatestBlock() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DeleteLatestBlock") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -60,6 +72,10 @@ func (_m *BlockStore) DeleteLatestBlock() error { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -74,6 +90,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LoadBaseMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { r0 = rf() @@ -90,6 +110,10 @@ func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { func (_m *BlockStore) LoadBlock(height int64) *types.Block { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlock") + } + var r0 *types.Block if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { r0 = rf(height) @@ -106,6 +130,10 @@ func (_m *BlockStore) LoadBlock(height int64) *types.Block { func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockByHash") + } + var r0 *types.Block if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { r0 = rf(hash) @@ -122,6 +150,10 @@ func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -138,6 +170,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockExtendedCommit") + } + var r0 *types.ExtendedCommit if rf, ok := ret.Get(0).(func(int64) *types.ExtendedCommit); ok { r0 = rf(height) @@ -154,6 +190,10 @@ func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommi func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) @@ -170,6 +210,10 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMetaByHash") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok { r0 = rf(hash) @@ -186,6 +230,10 @@ func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { ret := _m.Called(height, index) + if len(ret) == 0 { + panic("no return value specified for LoadBlockPart") + } + var r0 *types.Part if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { r0 = rf(height, index) @@ -202,6 +250,10 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadSeenCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -218,6 +270,10 @@ func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { func (_m *BlockStore) PruneBlocks(height int64, _a1 state.State) (uint64, int64, error) { ret := _m.Called(height, _a1) + if len(ret) == 0 { + panic("no return value specified for PruneBlocks") + } + var r0 uint64 var r1 int64 var r2 error @@ -259,6 +315,10 @@ func (_m *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts func (_m *BlockStore) Size() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -269,13 +329,12 @@ func (_m *BlockStore) Size() int64 { return r0 } -type mockConstructorTestingTNewBlockStore interface { +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { +}) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index b5a3a8ac14c..479b125ccdd 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -18,6 +18,10 @@ type EvidencePool struct { func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for AddEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { r0 = rf(_a0) @@ -32,6 +36,10 @@ func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for CheckEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { r0 = rf(_a0) @@ -46,6 +54,10 @@ func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { ret := _m.Called(maxBytes) + if len(ret) == 0 { + panic("no return value specified for PendingEvidence") + } + var r0 []types.Evidence var r1 int64 if rf, ok := ret.Get(0).(func(int64) ([]types.Evidence, int64)); ok { @@ -73,13 +85,12 @@ func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { _m.Called(_a0, _a1) } -type mockConstructorTestingTNewEvidencePool interface { +// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEvidencePool(t interface { mock.TestingT Cleanup(func()) -} - -// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool { +}) *EvidencePool { mock := &EvidencePool{} mock.Mock.Test(t) diff --git a/state/mocks/store.go b/state/mocks/store.go index 8e8d1e35fee..49eb0cfcded 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -20,6 +20,10 @@ type Store struct { func (_m *Store) Bootstrap(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Bootstrap") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -34,6 +38,10 @@ func (_m *Store) Bootstrap(_a0 state.State) error { func (_m *Store) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -44,10 +52,42 @@ func (_m *Store) Close() error { return r0 } +// GetOfflineStateSyncHeight provides a mock function with given fields: +func (_m *Store) GetOfflineStateSyncHeight() (int64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetOfflineStateSyncHeight") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func() (int64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Load provides a mock function with given fields: func (_m *Store) Load() (state.State, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Load") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func() (state.State, error)); ok { @@ -72,6 +112,10 @@ func (_m *Store) Load() (state.State, error) { func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadConsensusParams") + } + var r0 types.ConsensusParams var r1 error if rf, ok := ret.Get(0).(func(int64) (types.ConsensusParams, error)); ok { @@ -96,6 +140,10 @@ func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { func (_m *Store) LoadFinalizeBlockResponse(_a0 int64) (*abcitypes.ResponseFinalizeBlock, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadFinalizeBlockResponse") + } + var r0 *abcitypes.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(int64) (*abcitypes.ResponseFinalizeBlock, error)); ok { @@ -122,6 +170,10 @@ func (_m *Store) LoadFinalizeBlockResponse(_a0 int64) (*abcitypes.ResponseFinali func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *types.GenesisDoc) (state.State, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadFromDBOrGenesisDoc") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(*types.GenesisDoc) (state.State, error)); ok { @@ -146,6 +198,10 @@ func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *types.GenesisDoc) (state.State, err func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadFromDBOrGenesisFile") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(string) (state.State, error)); ok { @@ -170,6 +226,10 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { func (_m *Store) LoadLastFinalizeBlockResponse(_a0 int64) (*abcitypes.ResponseFinalizeBlock, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadLastFinalizeBlockResponse") + } + var r0 *abcitypes.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(int64) (*abcitypes.ResponseFinalizeBlock, error)); ok { @@ -196,6 +256,10 @@ func (_m *Store) LoadLastFinalizeBlockResponse(_a0 int64) (*abcitypes.ResponseFi func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadValidators") + } + var r0 *types.ValidatorSet var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.ValidatorSet, error)); ok { @@ -222,6 +286,10 @@ func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) { func (_m *Store) PruneStates(_a0 int64, _a1 int64, _a2 int64) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for PruneStates") + } + var r0 error if rf, ok := ret.Get(0).(func(int64, int64, int64) error); ok { r0 = rf(_a0, _a1, _a2) @@ -236,6 +304,10 @@ func (_m *Store) PruneStates(_a0 int64, _a1 int64, _a2 int64) error { func (_m *Store) Save(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Save") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -250,6 +322,10 @@ func (_m *Store) Save(_a0 state.State) error { func (_m *Store) SaveFinalizeBlockResponse(_a0 int64, _a1 *abcitypes.ResponseFinalizeBlock) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SaveFinalizeBlockResponse") + } + var r0 error if rf, ok := ret.Get(0).(func(int64, *abcitypes.ResponseFinalizeBlock) error); ok { r0 = rf(_a0, _a1) @@ -260,13 +336,30 @@ func (_m *Store) SaveFinalizeBlockResponse(_a0 int64, _a1 *abcitypes.ResponseFin return r0 } -type mockConstructorTestingTNewStore interface { - mock.TestingT - Cleanup(func()) +// SetOfflineStateSyncHeight provides a mock function with given fields: height +func (_m *Store) SetOfflineStateSyncHeight(height int64) error { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for SetOfflineStateSyncHeight") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64) error); ok { + r0 = rf(height) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStore(t mockConstructorTestingTNewStore) *Store { +// The first argument is typically a *testing.T value. +func NewStore(t interface { + mock.TestingT + Cleanup(func()) +}) *Store { mock := &Store{} mock.Mock.Test(t) diff --git a/state/rollback.go b/state/rollback.go index 6420192cf72..57ea0396820 100644 --- a/state/rollback.go +++ b/state/rollback.go @@ -65,10 +65,11 @@ func Rollback(bs BlockStore, ss Store, removeBlock bool) (int64, []byte, error) return -1, nil, err } + nextHeight := rollbackHeight + 1 valChangeHeight := invalidState.LastHeightValidatorsChanged // this can only happen if the validator set changed since the last block - if valChangeHeight > rollbackHeight { - valChangeHeight = rollbackHeight + 1 + if valChangeHeight > nextHeight+1 { + valChangeHeight = nextHeight + 1 } paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged diff --git a/state/rollback_test.go b/state/rollback_test.go index 9495cb4649f..46f1f94f78e 100644 --- a/state/rollback_test.go +++ b/state/rollback_test.go @@ -263,7 +263,7 @@ func setupStateStore(t *testing.T, height int64) state.Store { LastValidators: valSet, Validators: valSet.CopyIncrementProposerPriority(1), NextValidators: valSet.CopyIncrementProposerPriority(2), - LastHeightValidatorsChanged: height + 1, + LastHeightValidatorsChanged: height + 1 + 1, ConsensusParams: *params, LastHeightConsensusParamsChanged: height + 1, } diff --git a/state/services.go b/state/services.go index b1506e9efb4..280a945668f 100644 --- a/state/services.go +++ b/state/services.go @@ -59,10 +59,10 @@ type EvidencePool interface { // to the consensus evidence pool interface type EmptyEvidencePool struct{} -func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { +func (EmptyEvidencePool) PendingEvidence(int64) (ev []types.Evidence, size int64) { return nil, 0 } func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } func (EmptyEvidencePool) Update(State, types.EvidenceList) {} -func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } -func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *types.Vote) {} +func (EmptyEvidencePool) CheckEvidence(types.EvidenceList) error { return nil } +func (EmptyEvidencePool) ReportConflictingVotes(*types.Vote, *types.Vote) {} diff --git a/state/state_test.go b/state/state_test.go index 2e6be6ea915..863395dca32 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -24,6 +24,14 @@ import ( // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { + t.Helper() + tearDown, stateDB, state, _ := setupTestCaseWithStore(t) + return tearDown, stateDB, state +} + +// setupTestCase does setup common to all test cases. +func setupTestCaseWithStore(t *testing.T) (func(t *testing.T), dbm.DB, sm.State, sm.Store) { + t.Helper() config := test.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) @@ -32,13 +40,16 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { }) require.NoError(t, err) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) - assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") + require.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } + tearDown := func(t *testing.T) { + t.Helper() + os.RemoveAll(config.RootDir) + } - return tearDown, stateDB, state + return tearDown, stateDB, state, stateStore } // TestStateCopy tests the correct copying behavior of State. @@ -117,6 +128,8 @@ func TestFinalizeBlockResponsesSaveLoad1(t *testing.T) { types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), } + abciResponses.AppHash = make([]byte, 1) + err := stateStore.SaveFinalizeBlockResponse(block.Height, abciResponses) require.NoError(t, err) loadedABCIResponses, err := stateStore.LoadFinalizeBlockResponse(block.Height) @@ -277,7 +290,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ power++ } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) @@ -953,7 +966,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() // Swap the first validator with a new one (validator set size stays the same). - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(t, state, pubkey) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. var validatorUpdates []*types.Validator @@ -1036,7 +1049,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, blockID, responses := makeHeaderPartsResponsesParams(t, state, cp.ToProto()) + header, blockID, responses := makeHeaderPartsResponsesParams(state, cp.ToProto()) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) diff --git a/state/store.go b/state/store.go index 5b0dee2afd6..4d64db1f040 100644 --- a/state/store.go +++ b/state/store.go @@ -1,6 +1,7 @@ package state import ( + "encoding/binary" "errors" "fmt" @@ -41,6 +42,7 @@ func calcABCIResponsesKey(height int64) []byte { //---------------------- var lastABCIResponseKey = []byte("lastABCIResponseKey") +var offlineStateSyncHeight = []byte("offlineStateSyncHeightKey") //go:generate ../scripts/mockery_generate.sh Store @@ -61,7 +63,7 @@ type Store interface { LoadValidators(int64) (*types.ValidatorSet, error) // LoadFinalizeBlockResponse loads the abciResponse for a given height LoadFinalizeBlockResponse(int64) (*abci.ResponseFinalizeBlock, error) - // LoadLastABCIResponse loads the last abciResponse for a given height + // LoadLastFinalizeBlockResponse loads the last abciResponse for a given height LoadLastFinalizeBlockResponse(int64) (*abci.ResponseFinalizeBlock, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(int64) (types.ConsensusParams, error) @@ -73,6 +75,10 @@ type Store interface { Bootstrap(State) error // PruneStates takes the height from which to start pruning and which height stop at PruneStates(int64, int64, int64) error + // Saves the height at which the store is bootstrapped after out of band statesync + SetOfflineStateSyncHeight(height int64) error + // Gets the height at which the store is bootstrapped after out of band statesync + GetOfflineStateSyncHeight() (int64, error) // Close closes the connection with the database Close() error } @@ -94,6 +100,14 @@ type StoreOptions struct { var _ Store = (*dbStore)(nil) +func IsEmpty(store dbStore) (bool, error) { + state, err := store.Load() + if err != nil { + return false, err + } + return state.IsEmpty(), nil +} + // NewStore creates the dbStore of the state pkg. func NewStore(db dbm.DB, options StoreOptions) Store { return dbStore{db, options} @@ -173,61 +187,83 @@ func (store dbStore) Save(state State) error { } func (store dbStore) save(state State, key []byte) error { + batch := store.db.NewBatch() + defer func(batch dbm.Batch) { + err := batch.Close() + if err != nil { + panic(err) + } + }(batch) nextHeight := state.LastBlockHeight + 1 // If first block, save validators for the block. if nextHeight == 1 { nextHeight = state.InitialHeight // This extra logic due to validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. - if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators); err != nil { + if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators, batch); err != nil { return err } } // Save next validators. - if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators, batch); err != nil { return err } - // Save next consensus params. if err := store.saveConsensusParamsInfo(nextHeight, - state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + state.LastHeightConsensusParamsChanged, state.ConsensusParams, batch); err != nil { return err } - err := store.db.SetSync(key, state.Bytes()) - if err != nil { + if err := batch.Set(key, state.Bytes()); err != nil { return err } - + if err := batch.WriteSync(); err != nil { + panic(err) + } return nil } // BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. func (store dbStore) Bootstrap(state State) error { + batch := store.db.NewBatch() + defer func(batch dbm.Batch) { + err := batch.Close() + if err != nil { + panic(err) + } + }(batch) height := state.LastBlockHeight + 1 if height == 1 { height = state.InitialHeight } if height > 1 && !state.LastValidators.IsNilOrEmpty() { - if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators); err != nil { + if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators, batch); err != nil { return err } } - if err := store.saveValidatorsInfo(height, height, state.Validators); err != nil { + if err := store.saveValidatorsInfo(height, height, state.Validators, batch); err != nil { return err } - if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators); err != nil { + if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators, batch); err != nil { return err } if err := store.saveConsensusParamsInfo(height, - state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + state.LastHeightConsensusParamsChanged, state.ConsensusParams, batch); err != nil { return err } - return store.db.SetSync(stateKey, state.Bytes()) + if err := batch.Set(stateKey, state.Bytes()); err != nil { + return err + } + + if err := batch.WriteSync(); err != nil { + panic(err) + } + + return batch.Close() } // PruneStates deletes states between the given heights (including from, excluding to). It is not @@ -394,15 +430,21 @@ func (store dbStore) LoadFinalizeBlockResponse(height int64) (*abci.ResponseFina resp := new(abci.ResponseFinalizeBlock) err = resp.Unmarshal(buf) - if err != nil { + // Check for an error or if the resp.AppHash is nil if so + // this means the unmarshalling should be a LegacyABCIResponses + // Depending on a source message content (serialized as ABCIResponses) + // there are instances where it can be deserialized as a FinalizeBlockResponse + // without causing an error. But the values will not be deserialized properly + // and, it will contain zero values, and one of them is an AppHash == nil + // This can be verified in the /state/compatibility_test.go file + if err != nil || resp.AppHash == nil { // The data might be of the legacy ABCI response type, so // we try to unmarshal that legacyResp := new(cmtstate.LegacyABCIResponses) - rerr := legacyResp.Unmarshal(buf) - if rerr != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmtos.Exit(fmt.Sprintf(`LoadFinalizeBlockResponse: Data has been corrupted or its spec has - changed: %v\n`, err)) + if err := legacyResp.Unmarshal(buf); err != nil { + // only return an error, this method is only invoked through the `/block_results` not for state logic and + // some tests, so no need to exit cometbft if there's an error, just return it. + return nil, ErrABCIResponseCorruptedOrSpecChangeForHeight{Height: height, Err: err} } // The state store contains the old format. Migrate to // the new ResponseFinalizeBlock format. Note that the @@ -415,7 +457,7 @@ func (store dbStore) LoadFinalizeBlockResponse(height int64) (*abci.ResponseFina return resp, nil } -// LoadLastFinalizeBlockResponses loads the FinalizeBlockResponses from the most recent height. +// LoadLastFinalizeBlockResponse loads the FinalizeBlockResponses from the most recent height. // The height parameter is used to ensure that the response corresponds to the latest height. // If not, an error is returned. // @@ -576,7 +618,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*cmtstate.ValidatorsInfo, erro // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet) error { +func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet, batch dbm.Batch) error { if lastHeightChanged > height { return errors.New("lastHeightChanged cannot be greater than ValidatorsInfo height") } @@ -598,7 +640,7 @@ func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet return err } - err = store.db.Set(calcValidatorsKey(height), bz) + err = batch.Set(calcValidatorsKey(height), bz) if err != nil { return err } @@ -662,7 +704,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*cmtstate.ConsensusP // It should be called from s.Save(), right before the state itself is persisted. // If the consensus params did not change after processing the latest block, // only the last height for which they changed is persisted. -func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params types.ConsensusParams) error { +func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params types.ConsensusParams, batch dbm.Batch) error { paramsInfo := &cmtstate.ConsensusParamsInfo{ LastHeightChanged: changeHeight, } @@ -675,7 +717,7 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par return err } - err = store.db.Set(calcConsensusParamsKey(nextHeight), bz) + err = batch.Set(calcConsensusParamsKey(nextHeight), bz) if err != nil { return err } @@ -683,6 +725,34 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par return nil } +func (store dbStore) SetOfflineStateSyncHeight(height int64) error { + err := store.db.SetSync(offlineStateSyncHeight, int64ToBytes(height)) + if err != nil { + return err + } + return nil + +} + +// Gets the height at which the store is bootstrapped after out of band statesync +func (store dbStore) GetOfflineStateSyncHeight() (int64, error) { + + buf, err := store.db.Get(offlineStateSyncHeight) + if err != nil { + return 0, err + } + + if len(buf) == 0 { + return 0, errors.New("value empty") + } + + height := int64FromBytes(buf) + if height < 0 { + return 0, errors.New("invalid value for height: height cannot be negative") + } + return height, nil +} + func (store dbStore) Close() error { return store.db.Close() } @@ -697,12 +767,61 @@ func min(a int64, b int64) int64 { // responseFinalizeBlockFromLegacy is a convenience function that takes the old abci responses and morphs // it to the finalize block response. Note that the app hash is missing func responseFinalizeBlockFromLegacy(legacyResp *cmtstate.LegacyABCIResponses) *abci.ResponseFinalizeBlock { - return &abci.ResponseFinalizeBlock{ - TxResults: legacyResp.DeliverTxs, - ValidatorUpdates: legacyResp.EndBlock.ValidatorUpdates, - ConsensusParamUpdates: legacyResp.EndBlock.ConsensusParamUpdates, - Events: append(legacyResp.BeginBlock.Events, legacyResp.EndBlock.Events...), - // NOTE: AppHash is missing in the response but will - // be caught and filled in consensus/replay.go + var response abci.ResponseFinalizeBlock + events := make([]abci.Event, 0) + + if legacyResp.DeliverTxs != nil { + response.TxResults = legacyResp.DeliverTxs + } + + // Check for begin block and end block and only append events or assign values if they are not nil + if legacyResp.BeginBlock != nil { + if legacyResp.BeginBlock.Events != nil { + // Add BeginBlock attribute to BeginBlock events + for idx := range legacyResp.BeginBlock.Events { + legacyResp.BeginBlock.Events[idx].Attributes = append(legacyResp.BeginBlock.Events[idx].Attributes, abci.EventAttribute{ + Key: "mode", + Value: "BeginBlock", + Index: false, + }) + } + events = append(events, legacyResp.BeginBlock.Events...) + } + } + if legacyResp.EndBlock != nil { + if legacyResp.EndBlock.ValidatorUpdates != nil { + response.ValidatorUpdates = legacyResp.EndBlock.ValidatorUpdates + } + if legacyResp.EndBlock.ConsensusParamUpdates != nil { + response.ConsensusParamUpdates = legacyResp.EndBlock.ConsensusParamUpdates + } + if legacyResp.EndBlock.Events != nil { + // Add EndBlock attribute to BeginBlock events + for idx := range legacyResp.EndBlock.Events { + legacyResp.EndBlock.Events[idx].Attributes = append(legacyResp.EndBlock.Events[idx].Attributes, abci.EventAttribute{ + Key: "mode", + Value: "EndBlock", + Index: false, + }) + } + events = append(events, legacyResp.EndBlock.Events...) + } } + + response.Events = events + + // NOTE: AppHash is missing in the response but will + // be caught and filled in consensus/replay.go + return &response +} + +func int64FromBytes(bz []byte) int64 { + v, _ := binary.Varint(bz) + return v +} + +func int64ToBytes(i int64) []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(buf, i) + return buf[:n] } diff --git a/state/store_test.go b/state/store_test.go index a7688fe6b5e..4f60c3e0cd0 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -11,10 +11,8 @@ import ( dbm "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/internal/test" - cmtrand "github.com/cometbft/cometbft/libs/rand" cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" @@ -127,7 +125,7 @@ func TestPruneStates(t *testing.T) { // Generate a bunch of state data. Validators change for heights ending with 3, and // parameters when ending with 5. - validator := &types.Validator{Address: cmtrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} + validator := &types.Validator{Address: pk.Address(), VotingPower: 100, PubKey: pk} validatorSet := &types.ValidatorSet{ Validators: []*types.Validator{validator}, Proposer: validator, @@ -168,6 +166,7 @@ func TestPruneStates(t *testing.T) { {Data: []byte{2}}, {Data: []byte{3}}, }, + AppHash: make([]byte, 1), }) require.NoError(t, err) } @@ -257,6 +256,7 @@ func TestLastFinalizeBlockResponses(t *testing.T) { TxResults: []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, + AppHash: make([]byte, 1), } // create new db and state store and set discard abciresponses to false. stateDB = dbm.NewMemDB() @@ -347,6 +347,12 @@ func TestFinalizeBlockRecoveryUsingLegacyABCIResponses(t *testing.T) { resp, err := stateStore.LoadLastFinalizeBlockResponse(height) require.NoError(t, err) require.Equal(t, resp.ConsensusParamUpdates, &cp) - require.Equal(t, resp.Events, legacyResp.LegacyAbciResponses.BeginBlock.Events) + require.Equal(t, len(resp.Events), len(legacyResp.LegacyAbciResponses.BeginBlock.Events)) require.Equal(t, resp.TxResults[0], legacyResp.LegacyAbciResponses.DeliverTxs[0]) } + +func TestIntConversion(t *testing.T) { + x := int64(10) + b := sm.Int64ToBytes(x) + require.Equal(t, x, sm.Int64FromBytes(b)) +} diff --git a/state/tx_filter.go b/state/tx_filter.go index 8be843ee905..337a7a5e7fd 100644 --- a/state/tx_filter.go +++ b/state/tx_filter.go @@ -8,8 +8,12 @@ import ( // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. func TxPreCheck(state State) mempl.PreCheckFunc { + maxBytes := state.ConsensusParams.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(types.MaxBlockSizeBytes) + } maxDataBytes := types.MaxDataBytesNoEvidence( - state.ConsensusParams.Block.MaxBytes, + maxBytes, state.Validators.Size(), ) return mempl.PreCheckMaxBytes(maxDataBytes) diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index a70c461c2f6..083e8288e5e 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -4,6 +4,8 @@ import ( "context" "errors" + "github.com/cometbft/cometbft/libs/log" + abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/pubsub/query" ) @@ -26,6 +28,9 @@ type TxIndexer interface { // Search allows you to query for transactions. Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) + + //Set Logger + SetLogger(l log.Logger) } // Batch groups together multiple Index operations to be performed at the same time. diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 17440530428..0e1b25a89d6 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -4,15 +4,20 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" + "math/big" "strconv" "strings" + "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/gogoproto/proto" dbm "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" + idxutil "github.com/cometbft/cometbft/internal/indexer" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" "github.com/cometbft/cometbft/state/indexer" @@ -21,8 +26,9 @@ import ( ) const ( - tagKeySeparator = "/" - eventSeqSeparator = "$es$" + tagKeySeparator = "/" + tagKeySeparatorRune = '/' + eventSeqSeparator = "$es$" ) var _ txindex.TxIndexer = (*TxIndex)(nil) @@ -32,6 +38,8 @@ type TxIndex struct { store dbm.DB // Number the events in the event list eventSeq int64 + + log log.Logger } // NewTxIndex creates new KV indexer. @@ -41,6 +49,10 @@ func NewTxIndex(store dbm.DB) *TxIndex { } } +func (txi *TxIndex) SetLogger(l log.Logger) { + txi.log = l +} + // Get gets transaction from the TxIndex storage and returns it or nil if the // transaction is not found. func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { @@ -328,9 +340,9 @@ func lookForHash(conditions []syntax.Condition) (hash []byte, ok bool, err error return } -func (txi *TxIndex) setTmpHashes(tmpHeights map[string][]byte, it dbm.Iterator) { - eventSeq := extractEventSeqFromKey(it.Key()) - tmpHeights[string(it.Value())+eventSeq] = it.Value() +func (*TxIndex) setTmpHashes(tmpHeights map[string][]byte, key, value []byte) { + eventSeq := extractEventSeqFromKey(key) + tmpHeights[string(value)+eventSeq] = value } // match returns all matching txs by hash that meet a given condition and start @@ -367,12 +379,21 @@ func (txi *TxIndex) match( // If we have a height range in a query, we need only transactions // for this height - keyHeight, err := extractHeightFromKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + key := it.Key() + keyHeight, err := extractHeightFromKey(key) + if err != nil { + txi.log.Error("failure to parse height from key:", err) continue } - - txi.setTmpHashes(tmpHashes, it) + withinBounds, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + txi.log.Error("failure checking for height bounds:", err) + continue + } + if !withinBounds { + continue + } + txi.setTmpHashes(tmpHashes, key, it.Value()) // Potentially exit early. select { case <-ctx.Done(): @@ -395,11 +416,21 @@ func (txi *TxIndex) match( EXISTS_LOOP: for ; it.Valid(); it.Next() { - keyHeight, err := extractHeightFromKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + key := it.Key() + keyHeight, err := extractHeightFromKey(key) + if err != nil { + txi.log.Error("failure to parse height from key:", err) continue } - txi.setTmpHashes(tmpHashes, it) + withinBounds, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + txi.log.Error("failure checking for height bounds:", err) + continue + } + if !withinBounds { + continue + } + txi.setTmpHashes(tmpHashes, key, it.Value()) // Potentially exit early. select { @@ -429,11 +460,21 @@ func (txi *TxIndex) match( } if strings.Contains(extractValueFromKey(it.Key()), c.Arg.Value()) { - keyHeight, err := extractHeightFromKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + key := it.Key() + keyHeight, err := extractHeightFromKey(key) + if err != nil { + txi.log.Error("failure to parse height from key:", err) + continue + } + withinBounds, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + txi.log.Error("failure checking for height bounds:", err) + continue + } + if !withinBounds { continue } - txi.setTmpHashes(tmpHashes, it) + txi.setTmpHashes(tmpHashes, key, it.Value()) } // Potentially exit early. @@ -507,27 +548,55 @@ func (txi *TxIndex) matchRange( panic(err) } defer it.Close() + bigIntValue := new(big.Int) LOOP: for ; it.Valid(); it.Next() { - if !isTagKey(it.Key()) { + // TODO: We need to make a function for getting it.Key() as a byte slice with no copies. + // It currently copies the source data (which can change on a subsequent .Next() call) but that + // is not an issue for us. + key := it.Key() + if !isTagKey(key) { continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - if err != nil { - continue LOOP - } - if qr.Key != types.TxHeightKey { - keyHeight, err := extractHeightFromKey(it.Key()) - if err != nil || !checkHeightConditions(heightInfo, keyHeight) { + if _, ok := qr.AnyBound().(*big.Float); ok { + value := extractValueFromKey(key) + v, ok := bigIntValue.SetString(value, 10) + var vF *big.Float + if !ok { + vF, _, err = big.ParseFloat(value, 10, 125, big.ToNearestEven) + if err != nil { continue LOOP } } - if checkBounds(qr, v) { - txi.setTmpHashes(tmpHashes, it) + if qr.Key != types.TxHeightKey { + keyHeight, err := extractHeightFromKey(key) + if err != nil { + txi.log.Error("failure to parse height from key:", err) + continue + } + withinBounds, err := checkHeightConditions(heightInfo, keyHeight) + if err != nil { + txi.log.Error("failure checking for height bounds:", err) + continue + } + if !withinBounds { + continue + } + } + var withinBounds bool + var err error + if !ok { + withinBounds, err = idxutil.CheckBounds(qr, vF) + } else { + withinBounds, err = idxutil.CheckBounds(qr, v) + } + if err != nil { + txi.log.Error("failed to parse bounds:", err) + } else if withinBounds { + txi.setTmpHashes(tmpHashes, key, it.Value()) } // XXX: passing time in a ABCI Events is not yet implemented @@ -587,29 +656,62 @@ func isTagKey(key []byte) bool { // tags should 4. Alternatively it should be 3 if the event was not indexed // with the corresponding event sequence. However, some attribute values in // production can contain the tag separator. Therefore, the condition is >= 3. - numTags := strings.Count(string(key), tagKeySeparator) - return numTags >= 3 + numTags := 0 + for i := 0; i < len(key); i++ { + if key[i] == tagKeySeparatorRune { + numTags++ + if numTags >= 3 { + return true + } + } + } + return false } func extractHeightFromKey(key []byte) (int64, error) { - parts := strings.SplitN(string(key), tagKeySeparator, -1) + // the height is the second last element in the key. + // Find the position of the last occurrence of tagKeySeparator + endPos := bytes.LastIndexByte(key, tagKeySeparatorRune) + if endPos == -1 { + return 0, errors.New("separator not found") + } + + // Find the position of the second last occurrence of tagKeySeparator + startPos := bytes.LastIndexByte(key[:endPos-1], tagKeySeparatorRune) + if startPos == -1 { + return 0, errors.New("second last separator not found") + } - return strconv.ParseInt(parts[len(parts)-2], 10, 64) + // Extract the height part of the key + height, err := strconv.ParseInt(string(key[startPos+1:endPos]), 10, 64) + if err != nil { + return 0, err + } + return height, nil } -func extractValueFromKey(key []byte) string { - keyString := string(key) - parts := strings.SplitN(keyString, tagKeySeparator, -1) - partsLen := len(parts) - value := strings.TrimPrefix(keyString, parts[0]+tagKeySeparator) - suffix := "" - suffixLen := 2 +func extractValueFromKey(key []byte) string { + // Find the positions of tagKeySeparator in the byte slice + var indices []int + for i, b := range key { + if b == tagKeySeparatorRune { + indices = append(indices, i) + } + } - for i := 1; i <= suffixLen; i++ { - suffix = tagKeySeparator + parts[partsLen-i] + suffix + // If there are less than 2 occurrences of tagKeySeparator, return an empty string + if len(indices) < 2 { + return "" } - return strings.TrimSuffix(value, suffix) + // Extract the value between the first and second last occurrence of tagKeySeparator + value := key[indices[0]+1 : indices[len(indices)-2]] + + // Trim any leading or trailing whitespace + value = bytes.TrimSpace(value) + + // TODO: Do an unsafe cast to avoid an extra allocation here + return string(value) } func extractEventSeqFromKey(key []byte) string { @@ -622,6 +724,7 @@ func extractEventSeqFromKey(key []byte) string { } return "0" } + func keyForEvent(key string, value string, result *abci.TxResult, eventSeq int64) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d%s", key, @@ -658,18 +761,3 @@ func startKey(fields ...interface{}) []byte { } return b.Bytes() } - -func checkBounds(ranges indexer.QueryRange, v int64) bool { - include := true - lowerBound := ranges.LowerBoundValue() - upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { - include = false - } - - if upperBound != nil && v > upperBound.(int64) { - include = false - } - - return include -} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 899a099892e..ea8d4ea308b 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -155,7 +155,6 @@ func TestTxSearch(t *testing.T) { } func TestTxSearchEventMatch(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ @@ -212,23 +211,19 @@ func TestTxSearchEventMatch(t *testing.T) { q: "tx.height < 2 AND account.number = 3 AND account.number = 2 AND account.number = 5", resultsLength: 0, }, - "Deduplication test - should return nothing if attribute repeats multiple times with match events": { - q: "tx.height < 2 AND account.number = 3 AND account.number = 2 AND account.number = 5", - resultsLength: 0, - }, - " Match range with match events": { + " Match range with special character": { q: "account.number < 2 AND account.owner = '/Ivan/.test'", resultsLength: 0, }, - " Match range with match events 2": { + " Match range with special character 2": { q: "account.number <= 2 AND account.owner = '/Ivan/.test' AND tx.height > 0", resultsLength: 1, }, - " Match range with match events contains with multiple items": { + " Match range with contains with multiple items": { q: "account.number <= 2 AND account.owner CONTAINS '/Iv' AND account.owner CONTAINS 'an' AND tx.height = 1", resultsLength: 1, }, - " Match range with match events contains": { + " Match range with contains": { q: "account.number <= 2 AND account.owner CONTAINS 'an' AND tx.height > 0", resultsLength: 1, }, @@ -251,6 +246,89 @@ func TestTxSearchEventMatch(t *testing.T) { }) } } + +func TestTxSearchEventMatchByHeight(t *testing.T) { + + indexer := NewTxIndex(db.NewMemDB()) + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}, {Key: "owner", Value: "Ana", Index: true}}}, + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + txResult10 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}, {Key: "owner", Value: "/Ivan/.test", Index: true}}}, + }) + txResult10.Tx = types.Tx("HELLO WORLD 10") + txResult10.Height = 10 + + err = indexer.Index(txResult10) + require.NoError(t, err) + + testCases := map[string]struct { + q string + resultsLength int + }{ + "Return all events from a height 1": { + q: "tx.height = 1", + resultsLength: 1, + }, + "Return all events from a height 10": { + q: "tx.height = 10", + resultsLength: 1, + }, + "Return all events from a height 5": { + q: "tx.height = 5", + resultsLength: 0, + }, + "Return all events from a height in [2; 5]": { + q: "tx.height >= 2 AND tx.height <= 5", + resultsLength: 0, + }, + "Return all events from a height in [1; 5]": { + q: "tx.height >= 1 AND tx.height <= 5", + resultsLength: 1, + }, + "Return all events from a height in [1; 10]": { + q: "tx.height >= 1 AND tx.height <= 10", + resultsLength: 2, + }, + "Return all events from a height in [1; 5] by account.number": { + q: "tx.height >= 1 AND tx.height <= 5 AND account.number=1", + resultsLength: 1, + }, + "Return all events from a height in [1; 10] by account.number 2": { + q: "tx.height >= 1 AND tx.height <= 10 AND account.number=1", + resultsLength: 2, + }, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustCompile(tc.q)) + assert.NoError(t, err) + + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 { + for _, txr := range results { + if txr.Height == 1 { + assert.True(t, proto.Equal(txResult, txr)) + } else if txr.Height == 10 { + assert.True(t, proto.Equal(txResult10, txr)) + } else { + assert.True(t, false) + } + } + } + }) + } +} + func TestTxSearchWithCancelation(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) @@ -361,6 +439,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { require.NoError(t, err) testCases := []struct { + name string q string found bool }{ @@ -421,11 +500,11 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { for _, tc := range testCases { results, err := indexer.Search(ctx, query.MustCompile(tc.q)) assert.NoError(t, err) - len := 0 + n := 0 if tc.found { - len = 1 + n = 1 } - assert.Len(t, results, len) + assert.Len(t, results, n) assert.True(t, !tc.found || proto.Equal(txResult, results[0])) } @@ -637,6 +716,84 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { } } +func TestBigInt(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + bigInt := "10000000000000000000" + bigIntPlus1 := "10000000000000000001" + bigFloat := bigInt + ".76" + bigFloatLower := bigInt + ".1" + bigFloatSmaller := "9999999999999999999" + ".1" + bigIntSmaller := "9999999999999999999" + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigInt, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloatSmaller, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigIntPlus1, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloatLower, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "owner", Value: "/Ivan/", Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}}, + }) + hash := types.Tx(txResult.Tx).Hash() + + err := indexer.Index(txResult) + + require.NoError(t, err) + + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}, {Key: "amount", Value: "5", Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigIntSmaller, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigInt, Index: true}, {Key: "amount", Value: "3", Index: true}}}}) + + txResult2.Tx = types.Tx("NEW TX") + txResult2.Height = 2 + txResult2.Index = 2 + + hash2 := types.Tx(txResult2.Tx).Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + testCases := []struct { + q string + txRes *abci.TxResult + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), txResult, 1}, + // search by hash (lower) + {fmt.Sprintf("tx.hash = '%x'", hash), txResult, 1}, + {fmt.Sprintf("tx.hash = '%x'", hash2), txResult2, 1}, + // search by exact match (one key) - bigint + {"account.number >= " + bigInt, nil, 2}, + // search by exact match (one key) - bigint range + {"account.number >= " + bigInt + " AND tx.height > 0", nil, 2}, + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.owner = '/Ivan/'", nil, 0}, + // Floats are not parsed + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.amount > 4", txResult2, 1}, + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.amount = 5", txResult2, 1}, + {"account.number >= " + bigInt + " AND account.amount <= 5", txResult2, 1}, + {"account.number > " + bigFloatSmaller + " AND account.amount = 3", txResult2, 1}, + {"account.number < " + bigInt + " AND tx.height >= 1", nil, 2}, + {"account.number < " + bigInt + " AND tx.height = 1", nil, 1}, + {"account.number < " + bigInt + " AND tx.height = 2", nil, 1}, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustCompile(tc.q)) + assert.NoError(t, err) + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 && tc.txRes != nil { + assert.True(t, proto.Equal(results[0], tc.txRes)) + } + }) + } +} + func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) } func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) } func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) } diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index 3f00d342be4..753b64e4d2a 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -2,7 +2,9 @@ package kv import ( "fmt" + "math/big" + idxutil "github.com/cometbft/cometbft/internal/indexer" cmtsyntax "github.com/cometbft/cometbft/libs/pubsub/query/syntax" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/types" @@ -45,6 +47,7 @@ func ParseEventSeqFromEventKey(key []byte) (int64, error) { return eventSeq, nil } + func dedupHeight(conditions []cmtsyntax.Condition) (dedupConditions []cmtsyntax.Condition, heightInfo HeightInfo) { heightInfo.heightEqIdx = -1 heightRangeExists := false @@ -57,10 +60,13 @@ func dedupHeight(conditions []cmtsyntax.Condition) (dedupConditions []cmtsyntax. if c.Op == cmtsyntax.TEq { if heightRangeExists || found { continue - } else { + } + hFloat := c.Arg.Number() + if hFloat != nil { + h, _ := hFloat.Int64() + heightInfo.height = h found = true heightCondition = append(heightCondition, c) - heightInfo.height = int64(c.Arg.Number()) } } else { heightInfo.onlyHeightEq = false @@ -87,15 +93,16 @@ func dedupHeight(conditions []cmtsyntax.Condition) (dedupConditions []cmtsyntax. return dedupConditions, heightInfo } -func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { +func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) (bool, error) { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { - return false + withinBounds, err := idxutil.CheckBounds(heightInfo.heightRange, big.NewInt(keyHeight)) + if err != nil || !withinBounds { + return false, err } } else { if heightInfo.height != 0 && keyHeight != heightInfo.height { - return false + return false, nil } } - return true + return true, nil } diff --git a/state/txindex/mocks/tx_indexer.go b/state/txindex/mocks/tx_indexer.go index fb50fd96dd9..6aeed28c6f6 100644 --- a/state/txindex/mocks/tx_indexer.go +++ b/state/txindex/mocks/tx_indexer.go @@ -5,9 +5,11 @@ package mocks import ( context "context" - query "github.com/cometbft/cometbft/libs/pubsub/query" + log "github.com/cometbft/cometbft/libs/log" mock "github.com/stretchr/testify/mock" + query "github.com/cometbft/cometbft/libs/pubsub/query" + txindex "github.com/cometbft/cometbft/state/txindex" types "github.com/cometbft/cometbft/abci/types" @@ -22,6 +24,10 @@ type TxIndexer struct { func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { ret := _m.Called(b) + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + var r0 error if rf, ok := ret.Get(0).(func(*txindex.Batch) error); ok { r0 = rf(b) @@ -36,6 +42,10 @@ func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *types.TxResult var r1 error if rf, ok := ret.Get(0).(func([]byte) (*types.TxResult, error)); ok { @@ -62,6 +72,10 @@ func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { func (_m *TxIndexer) Index(result *types.TxResult) error { ret := _m.Called(result) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 error if rf, ok := ret.Get(0).(func(*types.TxResult) error); ok { r0 = rf(result) @@ -76,6 +90,10 @@ func (_m *TxIndexer) Index(result *types.TxResult) error { func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) { ret := _m.Called(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Search") + } + var r0 []*types.TxResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]*types.TxResult, error)); ok { @@ -98,13 +116,17 @@ func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxRes return r0, r1 } -type mockConstructorTestingTNewTxIndexer interface { - mock.TestingT - Cleanup(func()) +// SetLogger provides a mock function with given fields: l +func (_m *TxIndexer) SetLogger(l log.Logger) { + _m.Called(l) } // NewTxIndexer creates a new instance of TxIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTxIndexer(t mockConstructorTestingTNewTxIndexer) *TxIndexer { +// The first argument is typically a *testing.T value. +func NewTxIndexer(t interface { + mock.TestingT + Cleanup(func()) +}) *TxIndexer { mock := &TxIndexer{} mock.Mock.Test(t) diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 3e881e826fa..49338154c39 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -4,6 +4,8 @@ import ( "context" "errors" + "github.com/cometbft/cometbft/libs/log" + abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/txindex" @@ -15,20 +17,24 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) type TxIndex struct{} // Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { +func (txi *TxIndex) Get(_ []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } // AddBatch is a noop and always returns nil. -func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { +func (txi *TxIndex) AddBatch(_ *txindex.Batch) error { return nil } // Index is a noop and always returns nil. -func (txi *TxIndex) Index(result *abci.TxResult) error { +func (txi *TxIndex) Index(_ *abci.TxResult) error { return nil } -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (txi *TxIndex) Search(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { return []*abci.TxResult{}, nil } + +func (txi *TxIndex) SetLogger(log.Logger) { + +} diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go index cbbe3c0b5a5..d45d5ce01f1 100644 --- a/statesync/mocks/state_provider.go +++ b/statesync/mocks/state_provider.go @@ -20,6 +20,10 @@ type StateProvider struct { func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for AppHash") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { @@ -46,6 +50,10 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, er func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.Commit var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*types.Commit, error)); ok { @@ -72,6 +80,10 @@ func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Comm func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for State") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.State, error)); ok { @@ -92,13 +104,12 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } -type mockConstructorTestingTNewStateProvider interface { +// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateProvider(t interface { mock.TestingT Cleanup(func()) -} - -// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider { +}) *StateProvider { mock := &StateProvider{} mock.Mock.Test(t) diff --git a/statesync/reactor.go b/statesync/reactor.go index dfc911be722..a7374a29182 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -47,10 +47,8 @@ func NewReactor( cfg config.StateSyncConfig, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - tempDir string, metrics *Metrics, ) *Reactor { - r := &Reactor{ cfg: cfg, conn: conn, @@ -97,7 +95,7 @@ func (r *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements p2p.Reactor. -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (r *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { r.mtx.RLock() defer r.mtx.RUnlock() if r.syncer != nil { diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go index e5678111d61..a057cb69781 100644 --- a/statesync/reactor_test.go +++ b/statesync/reactor_test.go @@ -26,11 +26,13 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { "chunk is returned": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{1, 2, 3}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}, + }, "empty chunk is returned, as nil": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}, + }, "nil (missing) chunk is returned as missing": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, nil, @@ -71,7 +73,7 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { // Start a reactor and send a ssproto.ChunkRequest, then wait for and check response cfg := config.DefaultStateSyncConfig() - r := NewReactor(*cfg, conn, nil, "", NopMetrics()) + r := NewReactor(*cfg, conn, nil, NopMetrics()) err := r.Start() require.NoError(t, err) t.Cleanup(func() { @@ -161,7 +163,7 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { // Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses cfg := config.DefaultStateSyncConfig() - r := NewReactor(*cfg, conn, nil, "", NopMetrics()) + r := NewReactor(*cfg, conn, nil, NopMetrics()) err := r.Start() require.NoError(t, err) t.Cleanup(func() { diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index 7abef23fd36..4fbb47a2e13 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -29,7 +29,7 @@ import ( const testAppVersion = 9 // Sets up a basic syncer that can be used to test OfferSnapshot requests -func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { +func setupOfferSyncer() (*syncer, *proxymocks.AppConnSnapshot) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} @@ -124,17 +124,17 @@ func TestSyncer_SyncAny(t *testing.T) { // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in // format 2, which will be rejected by the ABCI application. - new, err := syncer.AddSnapshot(peerA, s) + isNew, err := syncer.AddSnapshot(peerA, s) require.NoError(t, err) - assert.True(t, new) + assert.True(t, isNew) - new, err = syncer.AddSnapshot(peerB, s) + isNew, err = syncer.AddSnapshot(peerB, s) require.NoError(t, err) - assert.False(t, new) + assert.False(t, isNew) - new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) + isNew, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) require.NoError(t, err) - assert.True(t, new) + assert.True(t, isNew) // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. @@ -232,13 +232,13 @@ func TestSyncer_SyncAny(t *testing.T) { } func TestSyncer_SyncAny_noSnapshots(t *testing.T) { - syncer, _ := setupOfferSyncer(t) + syncer, _ := setupOfferSyncer() _, _, err := syncer.SyncAny(0, func() {}) assert.Equal(t, errNoSnapshots, err) } func TestSyncer_SyncAny_abort(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} _, err := syncer.AddSnapshot(simplePeer("id"), s) @@ -253,7 +253,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { } func TestSyncer_SyncAny_reject(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -284,7 +284,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) { } func TestSyncer_SyncAny_reject_format(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -311,7 +311,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { } func TestSyncer_SyncAny_reject_sender(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() peerA := simplePeer("a") peerB := simplePeer("b") @@ -349,7 +349,7 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { } func TestSyncer_SyncAny_abciError(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -385,7 +385,7 @@ func TestSyncer_offerSnapshot(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s), @@ -447,7 +447,8 @@ func TestSyncer_applyChunks_Results(t *testing.T) { connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, nil) } err = syncer.applyChunks(chunks) diff --git a/store/bench_test.go b/store/bench_test.go new file mode 100644 index 00000000000..ceb7a94e50e --- /dev/null +++ b/store/bench_test.go @@ -0,0 +1,35 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" +) + +// TestLoadBlockExtendedCommit tests loading the extended commit for a previously +// saved block. The load method should return nil when only a commit was saved and +// return the extended commit otherwise. +func BenchmarkRepeatedLoadSeenCommitSameBlock(b *testing.B) { + state, bs, cleanup := makeStateAndBlockStore() + defer cleanup() + h := bs.Height() + 1 + block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + seenCommit := makeTestExtCommitWithNumSigs(block.Header.Height, cmttime.Now(), 100).ToCommit() + ps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(b, err) + bs.SaveBlock(block, ps, seenCommit) + + // sanity check + res := bs.LoadSeenCommit(block.Height) + require.Equal(b, seenCommit, res) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := bs.LoadSeenCommit(block.Height) + require.NotNil(b, res) + } +} diff --git a/store/store.go b/store/store.go index a82f947609d..baffc704d10 100644 --- a/store/store.go +++ b/store/store.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/cosmos/gogoproto/proto" + lru "github.com/hashicorp/golang-lru/v2" dbm "github.com/cometbft/cometbft-db" @@ -17,6 +18,12 @@ import ( "github.com/cometbft/cometbft/types" ) +// Assuming the length of a block part is 64kB (`types.BlockPartSizeBytes`), +// the maximum size of a block, that will be batch saved, is 640kB. The +// benchmarks have shown that `goleveldb` still performs well with blocks of +// this size. However, if the block is larger than 1MB, the performance degrades. +const maxBlockPartsToBatch = 10 + /* BlockStore is a simple low level store for blocks. @@ -37,25 +44,58 @@ The store can be assumed to contain all contiguous blocks between base and heigh type BlockStore struct { db dbm.DB - // mtx guards access to the struct fields listed below it. We rely on the database to enforce - // fine-grained concurrency control for its data, and thus this mutex does not apply to - // database contents. The only reason for keeping these fields in the struct is that the data + // mtx guards access to the struct fields listed below it. Although we rely on the database + // to enforce fine-grained concurrency control for its data, we need to make sure that + // no external observer can get data from the database that is not in sync with the fields below, + // and vice-versa. Hence, when updating the fields below, we use the mutex to make sure + // that the database is also up to date. This prevents any concurrent external access from + // obtaining inconsistent data. + // The only reason for keeping these fields in the struct is that the data // can't efficiently be queried from the database since the key encoding we use is not // lexicographically ordered (see https://github.com/tendermint/tendermint/issues/4567). mtx cmtsync.RWMutex base int64 height int64 + + seenCommitCache *lru.Cache[int64, *types.Commit] + blockCommitCache *lru.Cache[int64, *types.Commit] + blockExtendedCommitCache *lru.Cache[int64, *types.ExtendedCommit] } // NewBlockStore returns a new BlockStore with the given DB, // initialized to the last height that was committed to the DB. func NewBlockStore(db dbm.DB) *BlockStore { bs := LoadBlockStoreState(db) - return &BlockStore{ + bStore := &BlockStore{ base: bs.Base, height: bs.Height, db: db, } + bStore.addCaches() + return bStore +} + +func (bs *BlockStore) addCaches() { + var err error + // err can only occur if the argument is non-positive, so is impossible in context. + bs.blockCommitCache, err = lru.New[int64, *types.Commit](100) + if err != nil { + panic(err) + } + bs.blockExtendedCommitCache, err = lru.New[int64, *types.ExtendedCommit](100) + if err != nil { + panic(err) + } + bs.seenCommitCache, err = lru.New[int64, *types.Commit](100) + if err != nil { + panic(err) + } +} + +func (bs *BlockStore) IsEmpty() bool { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + return bs.base == bs.height && bs.base == 0 } // Base returns the first known contiguous block height, or 0 for empty block stores. @@ -190,7 +230,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { panic(fmt.Errorf("unmarshal to cmtproto.BlockMeta: %w", err)) } - blockMeta, err := types.BlockMetaFromProto(pbbm) + blockMeta, err := types.BlockMetaFromTrustedProto(pbbm) if err != nil { panic(fmt.Errorf("error from proto blockMeta: %w", err)) } @@ -222,6 +262,10 @@ func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { // and it comes from the block.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { + comm, ok := bs.blockCommitCache.Get(height) + if ok { + return comm.Clone() + } pbc := new(cmtproto.Commit) bz, err := bs.db.Get(calcBlockCommitKey(height)) if err != nil { @@ -238,13 +282,18 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { if err != nil { panic(fmt.Errorf("converting commit to proto: %w", err)) } - return commit + bs.blockCommitCache.Add(height, commit) + return commit.Clone() } // LoadExtendedCommit returns the ExtendedCommit for the given height. // The extended commit is not guaranteed to contain the same +2/3 precommits data // as the commit in the block. func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { + comm, ok := bs.blockExtendedCommitCache.Get(height) + if ok { + return comm.Clone() + } pbec := new(cmtproto.ExtendedCommit) bz, err := bs.db.Get(calcExtCommitKey(height)) if err != nil { @@ -261,13 +310,18 @@ func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommi if err != nil { panic(fmt.Errorf("converting extended commit: %w", err)) } - return extCommit + bs.blockExtendedCommitCache.Add(height, extCommit) + return extCommit.Clone() } // LoadSeenCommit returns the locally seen Commit for the given height. // This is useful when we've seen a commit, but there has not yet been // a new block at `height + 1` that includes this commit in its block.LastCommit. func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { + comm, ok := bs.seenCommitCache.Get(height) + if ok { + return comm.Clone() + } pbc := new(cmtproto.Commit) bz, err := bs.db.Get(calcSeenCommitKey(height)) if err != nil { @@ -285,7 +339,8 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { if err != nil { panic(fmt.Errorf("converting seen commit: %w", err)) } - return commit + bs.seenCommitCache.Add(height, commit) + return commit.Clone() } // PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned and the evidence retain height - the height at which data needed to prove evidence must not be removed. @@ -312,16 +367,10 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, // We can't trust batches to be atomic, so update base first to make sure noone // tries to access missing blocks. bs.mtx.Lock() + defer batch.Close() + defer bs.mtx.Unlock() bs.base = base - bs.mtx.Unlock() - bs.saveState() - - err := batch.WriteSync() - if err != nil { - return fmt.Errorf("failed to prune up to height %v: %w", base, err) - } - batch.Close() - return nil + return bs.saveStateAndWriteDB(batch, "failed to prune") } evidencePoint := height @@ -393,12 +442,26 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s if block == nil { panic("BlockStore can only save a non-nil block") } - if err := bs.saveBlockToBatch(block, blockParts, seenCommit); err != nil { + + batch := bs.db.NewBatch() + defer batch.Close() + + if err := bs.saveBlockToBatch(block, blockParts, seenCommit, batch); err != nil { panic(err) } + bs.mtx.Lock() + defer bs.mtx.Unlock() + bs.height = block.Height + if bs.base == 0 { + bs.base = block.Height + } + // Save new BlockStoreState descriptor. This also flushes the database. - bs.saveState() + err := bs.saveStateAndWriteDB(batch, "failed to save block") + if err != nil { + panic(err) + } } // SaveBlockWithExtendedCommit persists the given block, blockParts, and @@ -413,22 +476,41 @@ func (bs *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts if err := seenExtendedCommit.EnsureExtensions(true); err != nil { panic(fmt.Errorf("problems saving block with extensions: %w", err)) } - if err := bs.saveBlockToBatch(block, blockParts, seenExtendedCommit.ToCommit()); err != nil { + + batch := bs.db.NewBatch() + defer batch.Close() + + if err := bs.saveBlockToBatch(block, blockParts, seenExtendedCommit.ToCommit(), batch); err != nil { panic(err) } height := block.Height pbec := seenExtendedCommit.ToProto() extCommitBytes := mustEncode(pbec) - if err := bs.db.Set(calcExtCommitKey(height), extCommitBytes); err != nil { + if err := batch.Set(calcExtCommitKey(height), extCommitBytes); err != nil { panic(err) } + bs.mtx.Lock() + defer bs.mtx.Unlock() + bs.height = height + if bs.base == 0 { + bs.base = height + } + // Save new BlockStoreState descriptor. This also flushes the database. - bs.saveState() + err := bs.saveStateAndWriteDB(batch, "failed to save block with extended commit") + if err != nil { + panic(err) + } } -func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) error { +func (bs *BlockStore) saveBlockToBatch( + block *types.Block, + blockParts *types.PartSet, + seenCommit *types.Commit, + batch dbm.Batch, +) error { if block == nil { panic("BlockStore can only save a non-nil block") } @@ -446,13 +528,17 @@ func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.Par return fmt.Errorf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", height, seenCommit.Height) } + // If the block is small, batch save the block parts. Otherwise, save the + // parts individually. + saveBlockPartsToBatch := blockParts.Count() <= maxBlockPartsToBatch + // Save block parts. This must be done before the block meta, since callers // typically load the block meta first as an indication that the block exists // and then go on to load block parts - we must make sure the block is // complete as soon as the block meta is written. for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - bs.saveBlockPart(height, i, part) + bs.saveBlockPart(height, i, part, batch, saveBlockPartsToBatch) } // Save block meta @@ -462,17 +548,17 @@ func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.Par return errors.New("nil blockmeta") } metaBytes := mustEncode(pbm) - if err := bs.db.Set(calcBlockMetaKey(height), metaBytes); err != nil { + if err := batch.Set(calcBlockMetaKey(height), metaBytes); err != nil { return err } - if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { + if err := batch.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { return err } // Save block commit (duplicate and separate from the Block) pbc := block.LastCommit.ToProto() blockCommitBytes := mustEncode(pbc) - if err := bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { + if err := batch.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { return err } @@ -480,40 +566,43 @@ func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.Par // NOTE: we can delete this at a later height pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) - if err := bs.db.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil { + if err := batch.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil { return err } - // Done! - bs.mtx.Lock() - bs.height = height - if bs.base == 0 { - bs.base = height - } - bs.mtx.Unlock() - return nil } -func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { +func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch, saveBlockPartsToBatch bool) { pbp, err := part.ToProto() if err != nil { panic(fmt.Errorf("unable to make part into proto: %w", err)) } partBytes := mustEncode(pbp) - if err := bs.db.Set(calcBlockPartKey(height, index), partBytes); err != nil { + if saveBlockPartsToBatch { + err = batch.Set(calcBlockPartKey(height, index), partBytes) + } else { + err = bs.db.Set(calcBlockPartKey(height, index), partBytes) + } + if err != nil { panic(err) } } -func (bs *BlockStore) saveState() { - bs.mtx.RLock() +// Contract: the caller MUST have, at least, a read lock on `bs`. +func (bs *BlockStore) saveStateAndWriteDB(batch dbm.Batch, errMsg string) error { bss := cmtstore.BlockStoreState{ Base: bs.base, Height: bs.height, } - bs.mtx.RUnlock() - SaveBlockStoreState(&bss, bs.db) + SaveBlockStoreStateBatch(&bss, batch) + + err := batch.WriteSync() + if err != nil { + return fmt.Errorf("error writing batch to DB %q: (base %d, height %d): %w", + errMsg, bs.base, bs.height, err) + } + return nil } // SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. @@ -561,12 +650,31 @@ func calcBlockHashKey(hash []byte) []byte { var blockStoreKey = []byte("blockStore") // SaveBlockStoreState persists the blockStore state to the database. +// deprecated: still present in this version for API compatibility func SaveBlockStoreState(bsj *cmtstore.BlockStoreState, db dbm.DB) { + saveBlockStoreStateBatchInternal(bsj, db, nil) +} + +// SaveBlockStoreStateBatch persists the blockStore state to the database. +// It uses the DB batch passed as parameter +func SaveBlockStoreStateBatch(bsj *cmtstore.BlockStoreState, batch dbm.Batch) { + saveBlockStoreStateBatchInternal(bsj, nil, batch) +} + +func saveBlockStoreStateBatchInternal(bsj *cmtstore.BlockStoreState, db dbm.DB, batch dbm.Batch) { bytes, err := proto.Marshal(bsj) if err != nil { - panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) + panic(fmt.Sprintf("could not marshal state bytes: %v", err)) + } + if batch != nil { + err = batch.Set(blockStoreKey, bytes) + } else { + if db == nil { + panic("both 'db' and 'batch' cannot be nil") + } + err = db.SetSync(blockStoreKey, bytes) } - if err := db.SetSync(blockStoreKey, bytes); err != nil { + if err != nil { panic(err) } } @@ -643,13 +751,7 @@ func (bs *BlockStore) DeleteLatestBlock() error { } bs.mtx.Lock() + defer bs.mtx.Unlock() bs.height = targetHeight - 1 - bs.mtx.Unlock() - bs.saveState() - - err := batch.WriteSync() - if err != nil { - return fmt.Errorf("failed to delete height %v: %w", targetHeight, err) - } - return nil + return bs.saveStateAndWriteDB(batch, "failed to delete the latest block") } diff --git a/store/store_test.go b/store/store_test.go index ca56a7bd036..cafe7d34ffa 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -1,7 +1,7 @@ package store import ( - "bytes" + "encoding/json" "fmt" "os" "runtime/debug" @@ -17,7 +17,6 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" cmtrand "github.com/cometbft/cometbft/libs/rand" cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" @@ -34,15 +33,22 @@ type cleanupFunc func() // make an extended commit with a single vote containing just the height and a // timestamp func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit { - extCommitSigs := []types.ExtendedCommitSig{{ - CommitSig: types.CommitSig{ - BlockIDFlag: types.BlockIDFlagCommit, - ValidatorAddress: cmtrand.Bytes(crypto.AddressSize), - Timestamp: timestamp, - Signature: []byte("Signature"), - }, - ExtensionSignature: []byte("ExtensionSignature"), - }} + return makeTestExtCommitWithNumSigs(height, timestamp, 1) +} + +func makeTestExtCommitWithNumSigs(height int64, timestamp time.Time, numSigs int) *types.ExtendedCommit { + extCommitSigs := []types.ExtendedCommitSig{} + for i := 0; i < numSigs; i++ { + extCommitSigs = append(extCommitSigs, types.ExtendedCommitSig{ + CommitSig: types.CommitSig{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: cmtrand.Bytes(crypto.AddressSize), + Timestamp: timestamp, + Signature: cmtrand.Bytes(64), + }, + ExtensionSignature: []byte("ExtensionSignature"), + }) + } return &types.ExtendedCommit{ Height: height, BlockID: types.BlockID{ @@ -53,7 +59,7 @@ func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit } } -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { +func makeStateAndBlockStore() (sm.State, *BlockStore, cleanupFunc) { config := test.ResetTestRoot("blockchain_reactor_test") // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) @@ -87,9 +93,14 @@ func TestLoadBlockStoreState(t *testing.T) { for _, tc := range testCases { db := dbm.NewMemDB() - SaveBlockStoreState(tc.bss, db) + batch := db.NewBatch() + SaveBlockStoreStateBatch(tc.bss, batch) + err := batch.WriteSync() + require.NoError(t, err) retrBSJ := LoadBlockStoreState(db) assert.Equal(t, tc.want, retrBSJ, "expected the retrieved DBs to match: %s", tc.testName) + err = batch.Close() + require.NoError(t, err) } } @@ -138,7 +149,7 @@ func newInMemoryBlockStore() (*BlockStore, dbm.DB) { // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") @@ -151,10 +162,12 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } } - // save a block - block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - validPartSet, err := block.MakePartSet(2) + // save a block big enough to have two block parts + txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone + block := state.MakeBlock(bs.Height()+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address) + validPartSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) + require.GreaterOrEqual(t, validPartSet.Total(), uint32(2)) part2 := validPartSet.GetPart(1) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) @@ -209,7 +222,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { Height: 5, ChainID: "block_test", Time: cmttime.Now(), - ProposerAddress: cmtrand.Bytes(crypto.AddressSize)}, + ProposerAddress: cmtrand.Bytes(crypto.AddressSize), + }, makeTestExtCommit(5, cmttime.Now()).ToCommit(), ), parts: validPartSet, @@ -389,13 +403,13 @@ func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) - ps, err := block.MakePartSet(2) + ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) testCase.malleateCommit(seenCommit) if testCase.shouldPanic { @@ -430,12 +444,12 @@ func TestLoadBlockExtendedCommit(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) - ps, err := block.MakePartSet(2) + ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) if testCase.saveExtended { bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) @@ -464,7 +478,7 @@ func TestLoadBaseMeta(t *testing.T) { for h := int64(1); h <= 10; h++ { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) @@ -509,7 +523,7 @@ func TestLoadBlockPart(t *testing.T) { // 3. A good block serialized and saved to the DB should be retrievable block := state.MakeBlock(height, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) part1 := partSet.GetPart(0) @@ -520,7 +534,13 @@ func TestLoadBlockPart(t *testing.T) { gotPart, _, panicErr := doFn(loadPart) require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, res, "a properly saved block should return a proper block") - require.Equal(t, gotPart.(*types.Part), part1, + + // Having to do this because of https://github.com/stretchr/testify/issues/1141 + gotPartJSON, err := json.Marshal(gotPart.(*types.Part)) + require.NoError(t, err) + part1JSON, err := json.Marshal(part1) + require.NoError(t, err) + require.JSONEq(t, string(gotPartJSON), string(part1JSON), "expecting successful retrieval of previously saved block") } @@ -548,7 +568,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) @@ -677,7 +697,7 @@ func TestLoadBlockMetaByHash(t *testing.T) { bs := NewBlockStore(dbm.NewMemDB()) b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := b1.MakePartSet(2) + partSet, err := b1.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(1, cmttime.Now()) bs.SaveBlock(b1, partSet, seenCommit.ToCommit()) @@ -689,12 +709,12 @@ func TestLoadBlockMetaByHash(t *testing.T) { } func TestBlockFetchAtHeight(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index 00b5668d58d..5c7e2a1133a 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -1,12 +1,11 @@ package main import ( + "context" "encoding/hex" "fmt" "os" - "context" - cmtjson "github.com/cometbft/cometbft/libs/json" coregrpc "github.com/cometbft/cometbft/rpc/grpc" ) @@ -26,6 +25,7 @@ func main() { os.Exit(1) } + //nolint:staticcheck // SA1019: core_grpc.StartGRPCClient is deprecated: A new gRPC API will be introduced after v0.38. clientGRPC := coregrpc.StartGRPCClient(grpcAddr) res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes}) if err != nil { diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 3dd11ce7ead..1d3a685b2c2 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15 +FROM golang:1.22 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 794572d8428..5a4f69539d8 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -14,7 +14,7 @@ docker: # order to build a binary with a CometBFT node in it (for built-in # ABCI testing). node: - go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/node ./node + go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/node ./node generator: go build -o build/generator ./generator diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 8992695f67c..34489a892d3 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -23,6 +23,7 @@ import ( "github.com/cometbft/cometbft/libs/protoio" cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttypes "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" ) @@ -96,6 +97,17 @@ type Config struct { CheckTxDelay time.Duration `toml:"check_tx_delay"` FinalizeBlockDelay time.Duration `toml:"finalize_block_delay"` VoteExtensionDelay time.Duration `toml:"vote_extension_delay"` + + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + + // VoteExtensionsUpdateHeight configures the height at which consensus + // param VoteExtensionsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` } func DefaultConfig(dir string) *Config { @@ -125,15 +137,33 @@ func NewApplication(cfg *Config) (*Application, error) { } // Info implements ABCI. -func (app *Application) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { +func (app *Application) Info(context.Context, *abci.RequestInfo) (*abci.ResponseInfo, error) { + height, hash := app.state.Info() return &abci.ResponseInfo{ Version: version.ABCIVersion, AppVersion: appVersion, - LastBlockHeight: int64(app.state.Height), - LastBlockAppHash: app.state.Hash, + LastBlockHeight: int64(height), + LastBlockAppHash: hash, }, nil } +func (app *Application) updateVoteExtensionEnableHeight(currentHeight int64) *cmtproto.ConsensusParams { + var params *cmtproto.ConsensusParams + if app.cfg.VoteExtensionsUpdateHeight == currentHeight { + app.logger.Info("enabling vote extensions on the fly", + "current_height", currentHeight, + "enable_height", app.cfg.VoteExtensionsEnableHeight) + params = &cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: app.cfg.VoteExtensionsEnableHeight, + }, + } + app.logger.Info("updating VoteExtensionsHeight in app_state", "height", app.cfg.VoteExtensionsEnableHeight) + app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(app.cfg.VoteExtensionsEnableHeight, 10)) + } + return params +} + // Info implements ABCI. func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { var err error @@ -150,7 +180,7 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(req.ConsensusParams.Abci.VoteExtensionsEnableHeight, 10)) app.logger.Info("setting initial height in app_state", "initial_height", req.InitialHeight) app.state.Set(prefixReservedKey+suffixInitialHeight, strconv.FormatInt(req.InitialHeight, 10)) - //Get validators from genesis + // Get validators from genesis if req.Validators != nil { for _, val := range req.Validators { val := val @@ -159,8 +189,12 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) } } } + + params := app.updateVoteExtensionEnableHeight(0) + resp := &abci.ResponseInitChain{ - AppHash: app.state.Hash, + ConsensusParams: params, + AppHash: app.state.GetHash(), } if resp.Validators, err = app.validatorUpdates(0); err != nil { return nil, err @@ -187,7 +221,7 @@ func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*a // FinalizeBlock implements ABCI. func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - var txs = make([]*abci.ExecTxResult, len(req.Txs)) + txs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { key, value, err := parseTx(tx) @@ -202,19 +236,32 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali txs[i] = &abci.ExecTxResult{Code: kvstore.CodeTypeOK} } + for _, ev := range req.Misbehavior { + app.logger.Info("Misbehavior. Slashing validator", + "validator_address", ev.GetValidator().Address, + "type", ev.GetType(), + "height", ev.GetHeight(), + "time", ev.GetTime(), + "total_voting_power", ev.GetTotalVotingPower(), + ) + } + valUpdates, err := app.validatorUpdates(uint64(req.Height)) if err != nil { panic(err) } + params := app.updateVoteExtensionEnableHeight(req.Height) + if app.cfg.FinalizeBlockDelay != 0 { time.Sleep(app.cfg.FinalizeBlockDelay) } return &abci.ResponseFinalizeBlock{ - TxResults: txs, - ValidatorUpdates: valUpdates, - AppHash: app.state.Finalize(), + TxResults: txs, + ValidatorUpdates: valUpdates, + AppHash: app.state.Finalize(), + ConsensusParamUpdates: params, Events: []abci.Event{ { Type: "val_updates", @@ -261,15 +308,16 @@ func (app *Application) Commit(_ context.Context, _ *abci.RequestCommit) (*abci. // Query implements ABCI. func (app *Application) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { + value, height := app.state.Query(string(req.Data)) return &abci.ResponseQuery{ - Height: int64(app.state.Height), + Height: int64(height), Key: req.Data, - Value: []byte(app.state.Get(string(req.Data))), + Value: []byte(value), }, nil } // ListSnapshots implements ABCI. -func (app *Application) ListSnapshots(_ context.Context, req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { +func (app *Application) ListSnapshots(context.Context, *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { snapshots, err := app.snapshots.List() if err != nil { panic(err) @@ -336,8 +384,8 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA // The special vote extension-generated transaction must fit within an empty block // and takes precedence over all other transactions coming from the mempool. func (app *Application) PrepareProposal( - _ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - + _ context.Context, req *abci.RequestPrepareProposal, +) (*abci.ResponsePrepareProposal, error) { _, areExtensionsEnabled := app.checkHeightAndExtensions(true, req.Height, "PrepareProposal") txs := make([][]byte, 0, len(req.Txs)+1) @@ -354,7 +402,7 @@ func (app *Application) PrepareProposal( } extCommitHex := hex.EncodeToString(extCommitBytes) extTx := []byte(fmt.Sprintf("%s%d|%s", extTxPrefix, sum, extCommitHex)) - extTxLen := int64(len(extTx)) + extTxLen := cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{extTx}) app.logger.Info("preparing proposal with special transaction from vote extensions", "extTxLen", extTxLen) if extTxLen > req.MaxTxBytes { panic(fmt.Errorf("serious problem in the e2e app configuration; "+ @@ -376,10 +424,11 @@ func (app *Application) PrepareProposal( app.logger.Error("detected tx that should not come from the mempool", "tx", tx) continue } - if totalBytes+int64(len(tx)) > req.MaxTxBytes { + txLen := cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{tx}) + if totalBytes+txLen > req.MaxTxBytes { break } - totalBytes += int64(len(tx)) + totalBytes += txLen // Coherence: No need to call parseTx, as the check is stateless and has been performed by CheckTx txs = append(txs, tx) } @@ -493,7 +542,7 @@ func (app *Application) Rollback() error { } func (app *Application) getAppHeight() int64 { - initialHeightStr := app.state.Get(prefixReservedKey + suffixInitialHeight) + initialHeightStr, height := app.state.Query(prefixReservedKey + suffixInitialHeight) if len(initialHeightStr) == 0 { panic("initial height not set in database") } @@ -502,7 +551,7 @@ func (app *Application) getAppHeight() int64 { panic(fmt.Errorf("malformed initial height %q in database", initialHeightStr)) } - appHeight := int64(app.state.Height) + appHeight := int64(height) if appHeight == 0 { appHeight = initialHeight - 1 } @@ -638,7 +687,7 @@ func (app *Application) verifyAndSum( } cve := cmtproto.CanonicalVoteExtension{ Extension: vote.VoteExtension, - Height: currentHeight - 1, //the vote extension was signed in the previous height + Height: currentHeight - 1, // the vote extension was signed in the previous height Round: int64(extCommit.Round), ChainId: chainID, } @@ -728,7 +777,7 @@ func (app *Application) verifyExtensionTx(height int64, payload string) error { return fmt.Errorf("failed to sum and verify in process proposal: %w", err) } - //Final check that the proposer behaved correctly + // Final check that the proposer behaved correctly if int64(expSum) != sum { return fmt.Errorf("sum is not consistent with vote extension payload: %d!=%d", expSum, sum) } diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index fb0ce82b09a..79a223a4694 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -31,7 +31,7 @@ type SnapshotStore struct { // NewSnapshotStore creates a new snapshot store. func NewSnapshotStore(dir string) (*SnapshotStore, error) { store := &SnapshotStore{dir: dir} - if err := os.MkdirAll(dir, 0755); err != nil { + if err := os.MkdirAll(dir, 0o755); err != nil { return nil, err } if err := store.loadMetadata(); err != nil { @@ -84,17 +84,17 @@ func (s *SnapshotStore) saveMetadata() error { func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { s.Lock() defer s.Unlock() - bz, err := state.Export() + bz, height, stateHash, err := state.Export() if err != nil { return abci.Snapshot{}, err } snapshot := abci.Snapshot{ - Height: state.Height, + Height: height, Format: 1, - Hash: hashItems(state.Values, state.Height), + Hash: stateHash, Chunks: byteChunks(bz), } - err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0o644) //nolint:gosec + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height)), bz, 0o644) //nolint:gosec if err != nil { return abci.Snapshot{}, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index a10a5cbf39c..732bfbff482 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -17,14 +17,21 @@ const ( prevStateFileName = "prev_app_state.json" ) -// State is the application state. -type State struct { - sync.RWMutex +// Intermediate type used exclusively in serialization/deserialization of +// State, such that State need not expose any of its internal values publicly. +type serializedState struct { Height uint64 Values map[string]string Hash []byte +} + +// State is the application state. +type State struct { + sync.RWMutex + height uint64 + values map[string]string + hash []byte - // private fields aren't marshaled to disk. currentFile string // app saves current and previous state for rollback functionality previousFile string @@ -35,12 +42,12 @@ type State struct { // NewState creates a new state. func NewState(dir string, persistInterval uint64) (*State, error) { state := &State{ - Values: make(map[string]string), + values: make(map[string]string), currentFile: filepath.Join(dir, stateFileName), previousFile: filepath.Join(dir, prevStateFileName), persistInterval: persistInterval, } - state.Hash = hashItems(state.Values, state.Height) + state.hash = hashItems(state.values, state.height) err := state.load() switch { case errors.Is(err, os.ErrNotExist): @@ -66,8 +73,7 @@ func (s *State) load() error { return fmt.Errorf("failed to read state from %q: %w", s.currentFile, err) } } - err = json.Unmarshal(bz, s) - if err != nil { + if err := json.Unmarshal(bz, s); err != nil { return fmt.Errorf("invalid state data in %q: %w", s.currentFile, err) } return nil @@ -97,11 +103,39 @@ func (s *State) save() error { return os.Rename(newFile, s.currentFile) } +// GetHash provides a thread-safe way of accessing a copy of the current state +// hash. +func (s *State) GetHash() []byte { + s.RLock() + defer s.RUnlock() + hash := make([]byte, len(s.hash)) + copy(hash, s.hash) + return hash +} + +// Info returns both the height and hash simultaneously, and is used in the +// ABCI Info call. +func (s *State) Info() (uint64, []byte) { + s.RLock() + defer s.RUnlock() + height := s.height + hash := make([]byte, len(s.hash)) + copy(hash, s.hash) + return height, hash +} + // Export exports key/value pairs as JSON, used for state sync snapshots. -func (s *State) Export() ([]byte, error) { +// Additionally returns the current height and hash of the state. +func (s *State) Export() ([]byte, uint64, []byte, error) { s.RLock() defer s.RUnlock() - return json.Marshal(s.Values) + bz, err := json.Marshal(s.values) + if err != nil { + return nil, 0, nil, err + } + height := s.height + stateHash := hashItems(s.values, height) + return bz, height, stateHash, nil } // Import imports key/value pairs from JSON bytes, used for InitChain.AppStateBytes and @@ -114,9 +148,9 @@ func (s *State) Import(height uint64, jsonBytes []byte) error { if err != nil { return fmt.Errorf("failed to decode imported JSON data: %w", err) } - s.Height = height - s.Values = values - s.Hash = hashItems(values, height) + s.height = height + s.values = values + s.hash = hashItems(values, height) return s.save() } @@ -124,7 +158,7 @@ func (s *State) Import(height uint64, jsonBytes []byte) error { func (s *State) Get(key string) string { s.RLock() defer s.RUnlock() - return s.Values[key] + return s.values[key] } // Set sets a value. Setting an empty value is equivalent to deleting it. @@ -132,39 +166,49 @@ func (s *State) Set(key, value string) { s.Lock() defer s.Unlock() if value == "" { - delete(s.Values, key) + delete(s.values, key) } else { - s.Values[key] = value + s.values[key] = value } } +// Query is used in the ABCI Query call, and provides both the current height +// and the value associated with the given key. +func (s *State) Query(key string) (string, uint64) { + s.RLock() + defer s.RUnlock() + height := s.height + value := s.values[key] + return value, height +} + // Finalize is called after applying a block, updating the height and returning the new app_hash func (s *State) Finalize() []byte { s.Lock() defer s.Unlock() switch { - case s.Height > 0: - s.Height++ + case s.height > 0: + s.height++ case s.initialHeight > 0: - s.Height = s.initialHeight + s.height = s.initialHeight default: - s.Height = 1 + s.height = 1 } - s.Hash = hashItems(s.Values, s.Height) - return s.Hash + s.hash = hashItems(s.values, s.height) + return s.hash } // Commit commits the current state. func (s *State) Commit() (uint64, error) { s.Lock() defer s.Unlock() - if s.persistInterval > 0 && s.Height%s.persistInterval == 0 { + if s.persistInterval > 0 && s.height%s.persistInterval == 0 { err := s.save() if err != nil { return 0, err } } - return s.Height, nil + return s.height, nil } func (s *State) Rollback() error { @@ -172,13 +216,32 @@ func (s *State) Rollback() error { if err != nil { return fmt.Errorf("failed to read state from %q: %w", s.previousFile, err) } - err = json.Unmarshal(bz, s) - if err != nil { + if err := json.Unmarshal(bz, s); err != nil { return fmt.Errorf("invalid state data in %q: %w", s.previousFile, err) } return nil } +func (s *State) UnmarshalJSON(b []byte) error { + var ss serializedState + if err := json.Unmarshal(b, &ss); err != nil { + return err + } + s.height = ss.Height + s.values = ss.Values + s.hash = ss.Hash + return nil +} + +func (s *State) MarshalJSON() ([]byte, error) { + ss := &serializedState{ + Height: s.height, + Values: s.values, + Hash: s.hash, + } + return json.Marshal(ss) +} + // hashItems hashes a set of key/value items. func hashItems(items map[string]string, height uint64) []byte { keys := make([]string, 0, len(items)) diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index cff113638f1..81794cae2c1 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,10 +1,9 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.20 +FROM cometbft/cometbft-db-testing:v0.12.0 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null -RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null # Set up build directory /src/cometbft ENV COMETBFT_BUILD_OPTIONS badgerdb,boltdb,cleveldb,rocksdb @@ -25,6 +24,7 @@ RUN cd test/e2e && make node && cp build/node /usr/bin/app WORKDIR /cometbft VOLUME /cometbft ENV CMTHOME=/cometbft +ENV GORACE "halt_on_error=1" EXPOSE 26656 26657 26660 6060 ENTRYPOINT ["/usr/bin/entrypoint"] diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 7733e06c5a3..bc64501d4fe 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -36,7 +36,7 @@ var ( nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} ipv6 = uniformChoice{false, true} // FIXME: grpc disabled due to https://github.com/tendermint/tendermint/issues/5439 - nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "builtin_unsync"} // "grpc" + nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "builtin_connsync"} // "grpc" nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp"} nodeBlockSyncs = uniformChoice{"v0"} // "v2" nodeStateSyncs = uniformChoice{false, true} @@ -47,7 +47,7 @@ var ( 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight), } - evidence = uniformChoice{0, 1, 10} + evidence = uniformChoice{0, 1, 10, 20, 200} abciDelays = uniformChoice{"none", "small", "large"} nodePerturbations = probSetChoice{ "disconnect": 0.1, @@ -59,8 +59,9 @@ var ( lightNodePerturbations = probSetChoice{ "upgrade": 0.3, } - voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} - voteExtensionEnabled = uniformChoice{true, false} + voteExtensionUpdateHeight = uniformChoice{int64(-1), int64(0), int64(1)} // -1: genesis, 0: InitChain, 1: (use offset) + voteExtensionEnabled = weightedChoice{true: 3, false: 1} + voteExtensionHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} ) type generateConfig struct { @@ -147,9 +148,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st manifest.VoteExtensionDelay = 100 * time.Millisecond manifest.FinalizeBlockDelay = 500 * time.Millisecond } - + manifest.VoteExtensionsUpdateHeight = voteExtensionUpdateHeight.Choose(r).(int64) + if manifest.VoteExtensionsUpdateHeight == 1 { + manifest.VoteExtensionsUpdateHeight = manifest.InitialHeight + voteExtensionHeightOffset.Choose(r).(int64) + } if voteExtensionEnabled.Choose(r).(bool) { - manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) + baseHeight := max(manifest.VoteExtensionsUpdateHeight+1, manifest.InitialHeight) + manifest.VoteExtensionsEnableHeight = baseHeight + voteExtensionHeightOffset.Choose(r).(int64) } var numSeeds, numValidators, numFulls, numLightClients int @@ -171,7 +176,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( - r, e2e.ModeSeed, 0, manifest.InitialHeight, false) + r, e2e.ModeSeed, 0, false) } // Next, we generate validators. We make sure a BFT quorum of validators start @@ -187,7 +192,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st } name := fmt.Sprintf("validator%02d", i) manifest.Nodes[name] = generateNode( - r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) + r, e2e.ModeValidator, startAt, i <= 2) if startAt == 0 { (*manifest.Validators)[name] = int64(30 + r.Intn(71)) @@ -216,7 +221,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st nextStartAt += 5 } manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( - r, e2e.ModeFull, startAt, manifest.InitialHeight, false) + r, e2e.ModeFull, startAt, false) } // We now set up peer discovery for nodes. Seed nodes are fully meshed with @@ -279,7 +284,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, mode e2e.Mode, startAt int64, forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Version: nodeVersions.Choose(r).(string), diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 9fded05007a..ff968d10e71 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -3,6 +3,7 @@ ipv6 = true initial_height = 1000 +vote_extensions_update_height = 1004 vote_extensions_enable_height = 1007 evidence = 5 initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } @@ -54,13 +55,15 @@ seeds = ["seed01"] database = "badgerdb" privval_protocol = "unix" persist_interval = 3 -retain_blocks = 10 +retain_blocks = 20 perturb = ["kill"] +key_type = "secp256k1" [node.validator04] persistent_peers = ["validator01"] database = "rocksdb" perturb = ["pause"] +key_type = "sr25519" [node.validator05] start_at = 1005 # Becomes part of the validator set at 1010 @@ -73,7 +76,7 @@ perturb = ["kill", "pause", "disconnect", "restart"] start_at = 1010 mode = "full" persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] -retain_blocks = 10 +retain_blocks = 20 perturb = ["restart"] [node.full02] diff --git a/test/e2e/networks_regressions/blocksync_blocked.toml b/test/e2e/networks_regressions/blocksync_blocked.toml new file mode 100644 index 00000000000..c84e610b5a8 --- /dev/null +++ b/test/e2e/networks_regressions/blocksync_blocked.toml @@ -0,0 +1,11 @@ +vote_extensions_enable_height = 1 +pbts_enable_height = 1 + +[validators] +validator01 = 67 +validator02 = 33 + +[node.validator01] + +[node.validator02] +start_at = 5 diff --git a/test/e2e/networks_regressions/evidence_fail.toml b/test/e2e/networks_regressions/evidence_fail.toml new file mode 100644 index 00000000000..7b64cf9c9e7 --- /dev/null +++ b/test/e2e/networks_regressions/evidence_fail.toml @@ -0,0 +1,16 @@ +evidence = 120 +prometheus = true +pbts_enable_height = 1 + +[validators] + validator01 = 33 + validator02 = 67 + +[node] + [node.validator01] + mode = "validator" + persistent_peers = ["validator02"] + clock_skew = "40s" + [node.validator02] + mode = "validator" + diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index c2cb871d1f1..f06ddc80d3d 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -11,30 +11,34 @@ import ( // Config is the application configuration. type Config struct { - ChainID string `toml:"chain_id"` - Listen string `toml:"listen"` - Protocol string `toml:"protocol"` - Dir string `toml:"dir"` - Mode string `toml:"mode"` - PersistInterval uint64 `toml:"persist_interval"` - SnapshotInterval uint64 `toml:"snapshot_interval"` - RetainBlocks uint64 `toml:"retain_blocks"` - ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` - PrivValServer string `toml:"privval_server"` - PrivValKey string `toml:"privval_key"` - PrivValState string `toml:"privval_state"` - KeyType string `toml:"key_type"` + ChainID string `toml:"chain_id"` + Listen string `toml:"listen"` + Protocol string `toml:"protocol"` + Dir string `toml:"dir"` + Mode string `toml:"mode"` + PersistInterval uint64 `toml:"persist_interval"` + SnapshotInterval uint64 `toml:"snapshot_interval"` + RetainBlocks uint64 `toml:"retain_blocks"` + ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` + PrivValServer string `toml:"privval_server"` + PrivValKey string `toml:"privval_key"` + PrivValState string `toml:"privval_state"` + KeyType string `toml:"key_type"` + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` } // App extracts out the application specific configuration parameters func (cfg *Config) App() *app.Config { return &app.Config{ - Dir: cfg.Dir, - SnapshotInterval: cfg.SnapshotInterval, - RetainBlocks: cfg.RetainBlocks, - KeyType: cfg.KeyType, - ValidatorUpdates: cfg.ValidatorUpdates, - PersistInterval: cfg.PersistInterval, + Dir: cfg.Dir, + SnapshotInterval: cfg.SnapshotInterval, + RetainBlocks: cfg.RetainBlocks, + KeyType: cfg.KeyType, + ValidatorUpdates: cfg.ValidatorUpdates, + PersistInterval: cfg.PersistInterval, + VoteExtensionsEnableHeight: cfg.VoteExtensionsEnableHeight, + VoteExtensionsUpdateHeight: cfg.VoteExtensionsUpdateHeight, } } @@ -60,7 +64,7 @@ func (cfg Config) Validate() error { switch { case cfg.ChainID == "": return errors.New("chain_id parameter is required") - case cfg.Listen == "" && cfg.Protocol != "builtin" && cfg.Protocol != "builtin_unsync": + case cfg.Listen == "" && cfg.Protocol != "builtin" && cfg.Protocol != "builtin_connsync": return errors.New("listen parameter is required") default: return nil diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 8d008c475e8..b6451e493be 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -62,7 +62,7 @@ func run(configFile string) error { if err = startSigner(cfg); err != nil { return err } - if cfg.Protocol == "builtin" || cfg.Protocol == "builtin_unsync" { + if cfg.Protocol == "builtin" || cfg.Protocol == "builtin_connsync" { time.Sleep(1 * time.Second) } } @@ -71,7 +71,7 @@ func run(configFile string) error { switch cfg.Protocol { case "socket", "grpc": err = startApp(cfg) - case "builtin", "builtin_unsync": + case "builtin", "builtin_connsync": if cfg.Mode == string(e2e.ModeLight) { err = startLightClient(cfg) } else { @@ -124,9 +124,9 @@ func startNode(cfg *Config) error { } var clientCreator proxy.ClientCreator - if cfg.Protocol == string(e2e.ProtocolBuiltinUnsync) { - clientCreator = proxy.NewUnsyncLocalClientCreator(app) - nodeLogger.Info("Using unsynchronized local client creator") + if cfg.Protocol == string(e2e.ProtocolBuiltinConnSync) { + clientCreator = proxy.NewConnSyncLocalClientCreator(app) + nodeLogger.Info("Using connection-synchronized local client creator") } else { clientCreator = proxy.NewLocalClientCreator(app) nodeLogger.Info("Using default (synchronized) local client creator") diff --git a/test/e2e/pkg/exec/exec.go b/test/e2e/pkg/exec/exec.go new file mode 100644 index 00000000000..b0a3228fac6 --- /dev/null +++ b/test/e2e/pkg/exec/exec.go @@ -0,0 +1,40 @@ +package exec + +import ( + "context" + "fmt" + "os" + osexec "os/exec" +) + +// Command executes a shell command. +func Command(ctx context.Context, args ...string) error { + _, err := CommandOutput(ctx, args...) + return err +} + +// CommandOutput executes a shell command and returns the command's output. +func CommandOutput(ctx context.Context, args ...string) ([]byte, error) { + //nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + out, err := cmd.CombinedOutput() + switch err := err.(type) { + case nil: + return out, nil + case *osexec.ExitError: + return nil, fmt.Errorf("failed to run %q:\n%v", args, string(out)) + default: + return nil, err + } +} + +// CommandVerbose executes a shell command while displaying its output. +func CommandVerbose(ctx context.Context, args ...string) error { + //nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/test/e2e/pkg/infra/digitalocean/digitalocean.go b/test/e2e/pkg/infra/digitalocean/digitalocean.go new file mode 100644 index 00000000000..f30a11664bf --- /dev/null +++ b/test/e2e/pkg/infra/digitalocean/digitalocean.go @@ -0,0 +1,91 @@ +package digitalocean + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/exec" + "github.com/cometbft/cometbft/test/e2e/pkg/infra" +) + +var _ infra.Provider = (*Provider)(nil) + +// Provider implements a DigitalOcean-backed infrastructure provider. +type Provider struct { + infra.ProviderData +} + +// Noop currently. Setup is performed externally to the e2e test tool. +func (p *Provider) Setup() error { + return nil +} + +const ymlSystemd = "systemd-action.yml" + +func (p Provider) StartNodes(ctx context.Context, nodes ...*e2e.Node) error { + nodeIPs := make([]string, len(nodes)) + for i, n := range nodes { + nodeIPs[i] = n.ExternalIP.String() + } + if err := p.writePlaybook(ymlSystemd, true); err != nil { + return err + } + + return execAnsible(ctx, p.Testnet.Dir, ymlSystemd, nodeIPs) +} +func (p Provider) StopTestnet(ctx context.Context) error { + nodeIPs := make([]string, len(p.Testnet.Nodes)) + for i, n := range p.Testnet.Nodes { + nodeIPs[i] = n.ExternalIP.String() + } + + if err := p.writePlaybook(ymlSystemd, false); err != nil { + return err + } + return execAnsible(ctx, p.Testnet.Dir, ymlSystemd, nodeIPs) +} + +func (p Provider) writePlaybook(yaml string, starting bool) error { + playbook := ansibleSystemdBytes(starting) + //nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less + err := os.WriteFile(filepath.Join(p.Testnet.Dir, yaml), []byte(playbook), 0o644) + if err != nil { + return err + } + return nil +} + +// file as bytes to be written out to disk. +// ansibleStartBytes generates an Ansible playbook to start the network +func ansibleSystemdBytes(starting bool) string { + startStop := "stopped" + if starting { + startStop = "started" + } + playbook := fmt.Sprintf(`- name: start/stop testapp + hosts: all + gather_facts: yes + vars: + ansible_host_key_checking: false + + tasks: + - name: operate on the systemd-unit + ansible.builtin.systemd: + name: testappd + state: %s + enabled: yes`, startStop) + return playbook +} + +// ExecCompose runs a Docker Compose command for a testnet. +func execAnsible(ctx context.Context, dir, playbook string, nodeIPs []string, args ...string) error { + playbook = filepath.Join(dir, playbook) + return exec.CommandVerbose(ctx, append( + []string{"ansible-playbook", playbook, "-f", "50", "-u", "root", "--inventory", strings.Join(nodeIPs, ",") + ","}, + args...)...) +} diff --git a/test/e2e/pkg/infra/docker/docker.go b/test/e2e/pkg/infra/docker/docker.go index f1f6161ed02..c3588c0930d 100644 --- a/test/e2e/pkg/infra/docker/docker.go +++ b/test/e2e/pkg/infra/docker/docker.go @@ -2,19 +2,21 @@ package docker import ( "bytes" + "context" "os" "path/filepath" "text/template" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/exec" "github.com/cometbft/cometbft/test/e2e/pkg/infra" ) -var _ infra.Provider = &Provider{} +var _ infra.Provider = (*Provider)(nil) // Provider implements a docker-compose backed infrastructure provider. type Provider struct { - Testnet *e2e.Testnet + infra.ProviderData } // Setup generates the docker-compose file and write it to disk, erroring if @@ -33,6 +35,18 @@ func (p *Provider) Setup() error { return nil } +func (p Provider) StartNodes(ctx context.Context, nodes ...*e2e.Node) error { + nodeNames := make([]string, len(nodes)) + for i, n := range nodes { + nodeNames[i] = n.Name + } + return ExecCompose(ctx, p.Testnet.Dir, append([]string{"up", "-d"}, nodeNames...)...) +} + +func (p Provider) StopTestnet(ctx context.Context) error { + return ExecCompose(ctx, p.Testnet.Dir, "down") +} + // dockerComposeBytes generates a Docker Compose config file for a testnet and returns the // file as bytes to be written out to disk. func dockerComposeBytes(testnet *e2e.Testnet) ([]byte, error) { @@ -58,7 +72,7 @@ services: e2e: true container_name: {{ .Name }} image: {{ .Version }} -{{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_unsync") }} +{{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_connsync") }} entrypoint: /usr/bin/entrypoint-builtin {{- end }} init: true @@ -74,7 +88,7 @@ services: - ./{{ .Name }}:/tendermint networks: {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .InternalIP }} {{- if ne .Version $.UpgradeVersion}} {{ .Name }}_u: @@ -82,7 +96,7 @@ services: e2e: true container_name: {{ .Name }}_u image: {{ $.UpgradeVersion }} -{{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_unsync") }} +{{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_connsync") }} entrypoint: /usr/bin/entrypoint-builtin {{- end }} init: true @@ -98,7 +112,7 @@ services: - ./{{ .Name }}:/tendermint networks: {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .InternalIP }} {{- end }} {{end}}`) @@ -112,3 +126,29 @@ services: } return buf.Bytes(), nil } + +// ExecCompose runs a Docker Compose command for a testnet. +func ExecCompose(ctx context.Context, dir string, args ...string) error { + return exec.Command(ctx, append( + []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// ExecCompose runs a Docker Compose command for a testnet and returns the command's output. +func ExecComposeOutput(ctx context.Context, dir string, args ...string) ([]byte, error) { + return exec.CommandOutput(ctx, append( + []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// ExecComposeVerbose runs a Docker Compose command for a testnet and displays its output. +func ExecComposeVerbose(ctx context.Context, dir string, args ...string) error { + return exec.CommandVerbose(ctx, append( + []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// Exec runs a Docker command. +func Exec(ctx context.Context, args ...string) error { + return exec.Command(ctx, append([]string{"docker"}, args...)...) +} diff --git a/test/e2e/pkg/infra/provider.go b/test/e2e/pkg/infra/provider.go index 03b821de384..de9077282f9 100644 --- a/test/e2e/pkg/infra/provider.go +++ b/test/e2e/pkg/infra/provider.go @@ -1,5 +1,11 @@ package infra +import ( + "context" + + e2e "github.com/cometbft/cometbft/test/e2e/pkg" +) + // Provider defines an API for manipulating the infrastructure of a // specific set of testnet infrastructure. type Provider interface { @@ -7,14 +13,25 @@ type Provider interface { // Setup generates any necessary configuration for the infrastructure // provider during testnet setup. Setup() error -} -// NoopProvider implements the provider interface by performing noops for every -// interface method. This may be useful if the infrastructure is managed by a -// separate process. -type NoopProvider struct { + // Starts the nodes passed as parameter. A nodes MUST NOT + // be started twice before calling StopTestnet + // If no nodes are passed, start the whole network + StartNodes(context.Context, ...*e2e.Node) error + + // Stops the whole network + StopTestnet(context.Context) error + + // Returns the the provider's infrastructure data + GetInfrastructureData() *e2e.InfrastructureData } -func (NoopProvider) Setup() error { return nil } +type ProviderData struct { + Testnet *e2e.Testnet + InfrastructureData e2e.InfrastructureData +} -var _ Provider = NoopProvider{} +// Returns the the provider's infrastructure data +func (pd ProviderData) GetInfrastructureData() *e2e.InfrastructureData { + return &pd.InfrastructureData +} diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go index 2fc0e4bac6e..22f8e9ae0cf 100644 --- a/test/e2e/pkg/infrastructure.go +++ b/test/e2e/pkg/infrastructure.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "os" + "sort" ) const ( @@ -17,6 +18,7 @@ const ( // InfrastructureData contains the relevant information for a set of existing // infrastructure that is to be used for running a testnet. type InfrastructureData struct { + Path string // Provider is the name of infrastructure provider backing the testnet. // For example, 'docker' if it is running locally in a docker network or @@ -37,7 +39,19 @@ type InfrastructureData struct { // InstanceData contains the relevant information for a machine instance backing // one of the nodes in the testnet. type InstanceData struct { - IPAddress net.IP `json:"ip_address"` + IPAddress net.IP `json:"ip_address"` + ExtIPAddress net.IP `json:"ext_ip_address"` + Port uint32 `json:"port"` +} + +func sortNodeNames(m Manifest) []string { + // Set up nodes, in alphabetical order (IPs and ports get same order). + nodeNames := []string{} + for name := range m.Nodes { + nodeNames = append(nodeNames, name) + } + sort.Strings(nodeNames) + return nodeNames } func NewDockerInfrastructureData(m Manifest) (InfrastructureData, error) { @@ -49,16 +63,22 @@ func NewDockerInfrastructureData(m Manifest) (InfrastructureData, error) { if err != nil { return InfrastructureData{}, fmt.Errorf("invalid IP network address %q: %w", netAddress, err) } + + portGen := newPortGenerator(proxyPortFirst) ipGen := newIPGenerator(ipNet) ifd := InfrastructureData{ Provider: "docker", Instances: make(map[string]InstanceData), Network: netAddress, } - for name := range m.Nodes { + localHostIP := net.ParseIP("127.0.0.1") + for _, name := range sortNodeNames(m) { ifd.Instances[name] = InstanceData{ - IPAddress: ipGen.Next(), + IPAddress: ipGen.Next(), + ExtIPAddress: localHostIP, + Port: portGen.Next(), } + } return ifd, nil } @@ -76,5 +96,6 @@ func InfrastructureDataFromFile(p string) (InfrastructureData, error) { if ifd.Network == "" { ifd.Network = globalIPv4CIDR } + ifd.Path = p return ifd, nil } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index eb841b78bf6..7fd7afaf9d9 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -49,28 +49,23 @@ type Manifest struct { Nodes map[string]*ManifestNode `toml:"node"` // KeyType sets the curve that will be used by validators. - // Options are ed25519 & secp256k1 + // Options are ed25519, secp256k1 and sr25519. KeyType string `toml:"key_type"` // Evidence indicates the amount of evidence that will be injected into the // testnet via the RPC endpoint of a random node. Default is 0 Evidence int `toml:"evidence"` - // VoteExtensionsEnableHeight configures the first height during which - // the chain will use and require vote extension data to be present - // in precommit messages. - VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` - // ABCIProtocol specifies the protocol used to communicate with the ABCI - // application: "unix", "tcp", "grpc", "builtin" or "builtin_unsync". + // application: "unix", "tcp", "grpc", "builtin" or "builtin_connsync". // // Defaults to "builtin". "builtin" will build a complete CometBFT node // into the application and launch it instead of launching a separate // CometBFT process. // - // "builtin_unsync" is basically the same as "builtin", except that it uses - // an "unsynchronized" local client creator, which attempts to replicate the - // same concurrency model locally as the socket client. + // "builtin_connsync" is basically the same as "builtin", except that it + // uses a "connection-synchronized" local client creator, which attempts to + // replicate the same concurrency model locally as the socket client. ABCIProtocol string `toml:"abci_protocol"` // Add artificial delays to each of the main ABCI calls to mimic computation time @@ -88,10 +83,35 @@ type Manifest struct { LoadTxSizeBytes int `toml:"load_tx_size_bytes"` LoadTxBatchSize int `toml:"load_tx_batch_size"` LoadTxConnections int `toml:"load_tx_connections"` + LoadMaxTxs int `toml:"load_max_txs"` + + // LogLevel specifies the log level to be set on all nodes. + LogLevel string `toml:"log_level"` + + // LogFormat specifies the log format to be set on all nodes. + LogFormat string `toml:"log_format"` // Enable or disable Prometheus metrics on all nodes. // Defaults to false (disabled). Prometheus bool `toml:"prometheus"` + + // BlockMaxBytes specifies the maximum size in bytes of a block. This + // value will be written to the genesis file of all nodes. + BlockMaxBytes int64 `toml:"block_max_bytes"` + + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + + // VoteExtensionsUpdateHeight configures the height at which consensus + // param VoteExtensionsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` + // Maximum number of peers to which the node gossips transactions + ExperimentalMaxGossipConnectionsToPersistentPeers uint `toml:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers uint `toml:"experimental_max_gossip_connections_to_non_persistent_peers"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/templates/prometheus-yaml.tmpl b/test/e2e/pkg/templates/prometheus-yaml.tmpl new file mode 100644 index 00000000000..3c7636e0ddc --- /dev/null +++ b/test/e2e/pkg/templates/prometheus-yaml.tmpl @@ -0,0 +1,9 @@ +global: + scrape_interval: 1s + +scrape_configs: +{{- range .Nodes }} + - job_name: '{{ .Name }}' + static_configs: + - targets: ['localhost:{{ .PrometheusProxyPort }}'] +{{end}} \ No newline at end of file diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index d0d5ae67295..41c5e4ae814 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,21 +1,27 @@ package e2e import ( + "bytes" "errors" "fmt" "io" "math/rand" "net" + "os" "path/filepath" - "sort" "strconv" "strings" + "text/template" "time" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" + "github.com/cometbft/cometbft/crypto/sr25519" rpchttp "github.com/cometbft/cometbft/rpc/client/http" + "github.com/cometbft/cometbft/types" + + _ "embed" ) const ( @@ -42,12 +48,12 @@ const ( ModeLight Mode = "light" ModeSeed Mode = "seed" - ProtocolBuiltin Protocol = "builtin" - ProtocolBuiltinUnsync Protocol = "builtin_unsync" - ProtocolFile Protocol = "file" - ProtocolGRPC Protocol = "grpc" - ProtocolTCP Protocol = "tcp" - ProtocolUNIX Protocol = "unix" + ProtocolBuiltin Protocol = "builtin" + ProtocolBuiltinConnSync Protocol = "builtin_connsync" + ProtocolFile Protocol = "file" + ProtocolGRPC Protocol = "grpc" + ProtocolTCP Protocol = "tcp" + ProtocolUNIX Protocol = "unix" PerturbationDisconnect Perturbation = "disconnect" PerturbationKill Perturbation = "kill" @@ -55,35 +61,42 @@ const ( PerturbationRestart Perturbation = "restart" PerturbationUpgrade Perturbation = "upgrade" - EvidenceAgeHeight int64 = 7 - EvidenceAgeTime time.Duration = 500 * time.Millisecond + EvidenceAgeHeight int64 = 14 + EvidenceAgeTime time.Duration = 1500 * time.Millisecond ) // Testnet represents a single testnet. type Testnet struct { - Name string - File string - Dir string - IP *net.IPNet - InitialHeight int64 - InitialState map[string]string - Validators map[*Node]int64 - ValidatorUpdates map[int64]map[*Node]int64 - Nodes []*Node - KeyType string - Evidence int - LoadTxSizeBytes int - LoadTxBatchSize int - LoadTxConnections int - ABCIProtocol string - PrepareProposalDelay time.Duration - ProcessProposalDelay time.Duration - CheckTxDelay time.Duration - VoteExtensionDelay time.Duration - FinalizeBlockDelay time.Duration - UpgradeVersion string - Prometheus bool - VoteExtensionsEnableHeight int64 + Name string + File string + Dir string + IP *net.IPNet + InitialHeight int64 + InitialState map[string]string + Validators map[*Node]int64 + ValidatorUpdates map[int64]map[*Node]int64 + Nodes []*Node + KeyType string + Evidence int + LoadTxSizeBytes int + LoadTxBatchSize int + LoadTxConnections int + LoadMaxTxs int + ABCIProtocol string + PrepareProposalDelay time.Duration + ProcessProposalDelay time.Duration + CheckTxDelay time.Duration + VoteExtensionDelay time.Duration + FinalizeBlockDelay time.Duration + UpgradeVersion string + LogLevel string + LogFormat string + Prometheus bool + BlockMaxBytes int64 + VoteExtensionsEnableHeight int64 + VoteExtensionsUpdateHeight int64 + ExperimentalMaxGossipConnectionsToPersistentPeers uint + ExperimentalMaxGossipConnectionsToNonPersistentPeers uint } // Node represents a CometBFT node in a testnet. @@ -94,7 +107,8 @@ type Node struct { Mode Mode PrivvalKey crypto.PrivKey NodeKey crypto.PrivKey - IP net.IP + InternalIP net.IP + ExternalIP net.IP ProxyPort uint32 StartAt int64 BlockSyncVersion string @@ -131,7 +145,6 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa dir := strings.TrimSuffix(file, filepath.Ext(file)) keyGen := newKeyGenerator(randomSeed) - proxyPortGen := newPortGenerator(proxyPortFirst) prometheusProxyPortGen := newPortGenerator(prometheusProxyPortFirst) _, ipNet, err := net.ParseCIDR(ifd.Network) if err != nil { @@ -152,6 +165,7 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa LoadTxSizeBytes: manifest.LoadTxSizeBytes, LoadTxBatchSize: manifest.LoadTxBatchSize, LoadTxConnections: manifest.LoadTxConnections, + LoadMaxTxs: manifest.LoadMaxTxs, ABCIProtocol: manifest.ABCIProtocol, PrepareProposalDelay: manifest.PrepareProposalDelay, ProcessProposalDelay: manifest.ProcessProposalDelay, @@ -159,8 +173,14 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa VoteExtensionDelay: manifest.VoteExtensionDelay, FinalizeBlockDelay: manifest.FinalizeBlockDelay, UpgradeVersion: manifest.UpgradeVersion, + LogLevel: manifest.LogLevel, + LogFormat: manifest.LogFormat, Prometheus: manifest.Prometheus, + BlockMaxBytes: manifest.BlockMaxBytes, VoteExtensionsEnableHeight: manifest.VoteExtensionsEnableHeight, + VoteExtensionsUpdateHeight: manifest.VoteExtensionsUpdateHeight, + ExperimentalMaxGossipConnectionsToPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToPersistentPeers, + ExperimentalMaxGossipConnectionsToNonPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToNonPersistentPeers, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -184,19 +204,16 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa testnet.LoadTxSizeBytes = defaultTxSizeBytes } - // Set up nodes, in alphabetical order (IPs and ports get same order). - nodeNames := []string{} - for name := range manifest.Nodes { - nodeNames = append(nodeNames, name) - } - sort.Strings(nodeNames) - - for _, name := range nodeNames { + for _, name := range sortNodeNames(manifest) { nodeManifest := manifest.Nodes[name] ind, ok := ifd.Instances[name] if !ok { return nil, fmt.Errorf("information for node '%s' missing from infrastructure data", name) } + extIP := ind.ExtIPAddress + if len(extIP) == 0 { + extIP = ind.IPAddress + } v := nodeManifest.Version if v == "" { v = localVersion @@ -208,8 +225,9 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa Testnet: testnet, PrivvalKey: keyGen.Generate(manifest.KeyType), NodeKey: keyGen.Generate("ed25519"), - IP: ind.IPAddress, - ProxyPort: proxyPortGen.Next(), + InternalIP: ind.IPAddress, + ExternalIP: extIP, + ProxyPort: ind.Port, Mode: ModeValidator, Database: "goleveldb", ABCIProtocol: Protocol(testnet.ABCIProtocol), @@ -335,6 +353,40 @@ func (t Testnet) Validate() error { if len(t.Nodes) == 0 { return errors.New("network has no nodes") } + if t.BlockMaxBytes > types.MaxBlockSizeBytes { + return fmt.Errorf("value of BlockMaxBytes cannot be higher than %d", types.MaxBlockSizeBytes) + } + if t.VoteExtensionsUpdateHeight < -1 { + return fmt.Errorf("value of VoteExtensionsUpdateHeight must be positive, 0 (InitChain), "+ + "or -1 (Genesis); update height %d", t.VoteExtensionsUpdateHeight) + } + if t.VoteExtensionsEnableHeight < 0 { + return fmt.Errorf("value of VoteExtensionsEnableHeight must be positive, or 0 (disable); "+ + "enable height %d", t.VoteExtensionsEnableHeight) + } + if t.VoteExtensionsUpdateHeight > 0 && t.VoteExtensionsUpdateHeight < t.InitialHeight { + return fmt.Errorf("a value of VoteExtensionsUpdateHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "update height %d, initial height %d", + t.VoteExtensionsUpdateHeight, t.InitialHeight, + ) + } + if t.VoteExtensionsEnableHeight > 0 { + if t.VoteExtensionsEnableHeight < t.InitialHeight { + return fmt.Errorf("a value of VoteExtensionsEnableHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "enable height %d, initial height %d", + t.VoteExtensionsEnableHeight, t.InitialHeight, + ) + } + if t.VoteExtensionsEnableHeight <= t.VoteExtensionsUpdateHeight { + return fmt.Errorf("a value of VoteExtensionsEnableHeight greater than 0 "+ + "must be greater than VoteExtensionsUpdateHeight; "+ + "update height %d, enable height %d", + t.VoteExtensionsUpdateHeight, t.VoteExtensionsEnableHeight, + ) + } + } for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) @@ -348,11 +400,11 @@ func (n Node) Validate(testnet Testnet) error { if n.Name == "" { return errors.New("node has no name") } - if n.IP == nil { + if n.InternalIP == nil { return errors.New("node has no IP address") } - if !testnet.IP.Contains(n.IP) { - return fmt.Errorf("node IP %v is not in testnet network %v", n.IP, testnet.IP) + if !testnet.IP.Contains(n.InternalIP) { + return fmt.Errorf("node IP %v is not in testnet network %v", n.InternalIP, testnet.IP) } if n.ProxyPort == n.PrometheusProxyPort { return fmt.Errorf("node local port %v used also for Prometheus local port", n.ProxyPort) @@ -364,7 +416,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("local port %v must be >1024", n.PrometheusProxyPort) } for _, peer := range testnet.Nodes { - if peer.Name != n.Name && peer.ProxyPort == n.ProxyPort { + if peer.Name != n.Name && peer.ProxyPort == n.ProxyPort && peer.ExternalIP.Equal(n.ExternalIP) { return fmt.Errorf("peer %q also has local port %v", peer.Name, n.ProxyPort) } if n.PrometheusProxyPort > 0 { @@ -384,11 +436,11 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid database setting %q", n.Database) } switch n.ABCIProtocol { - case ProtocolBuiltin, ProtocolBuiltinUnsync, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: + case ProtocolBuiltin, ProtocolBuiltinConnSync, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: default: return fmt.Errorf("invalid ABCI protocol setting %q", n.ABCIProtocol) } - if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin && n.ABCIProtocol != ProtocolBuiltinUnsync { + if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin && n.ABCIProtocol != ProtocolBuiltinConnSync { return errors.New("light client must use builtin protocol") } switch n.PrivvalProtocol { @@ -483,10 +535,38 @@ func (t Testnet) HasPerturbations() bool { return false } +//go:embed templates/prometheus-yaml.tmpl +var prometheusYamlTemplate string + +func (t Testnet) prometheusConfigBytes() ([]byte, error) { + tmpl, err := template.New("prometheus-yaml").Parse(prometheusYamlTemplate) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, t) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (t Testnet) WritePrometheusConfig() error { + bytes, err := t.prometheusConfigBytes() + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(t.Dir, "prometheus.yaml"), bytes, 0o644) //nolint:gosec + if err != nil { + return err + } + return nil +} + // Address returns a P2P endpoint address for the node. func (n Node) AddressP2P(withID bool) string { - ip := n.IP.String() - if n.IP.To4() == nil { + ip := n.InternalIP.String() + if n.InternalIP.To4() == nil { // IPv6 addresses must be wrapped in [] to avoid conflict with : port separator ip = fmt.Sprintf("[%v]", ip) } @@ -499,8 +579,8 @@ func (n Node) AddressP2P(withID bool) string { // Address returns an RPC endpoint address for the node. func (n Node) AddressRPC() string { - ip := n.IP.String() - if n.IP.To4() == nil { + ip := n.InternalIP.String() + if n.InternalIP.To4() == nil { // IPv6 addresses must be wrapped in [] to avoid conflict with : port separator ip = fmt.Sprintf("[%v]", ip) } @@ -509,7 +589,7 @@ func (n Node) AddressRPC() string { // Client returns an RPC client for a node. func (n Node) Client() (*rpchttp.HTTP, error) { - return rpchttp.New(fmt.Sprintf("http://127.0.0.1:%v", n.ProxyPort), "/websocket") + return rpchttp.New(fmt.Sprintf("http://%s:%v", n.ExternalIP, n.ProxyPort), "/websocket") } // Stateless returns true if the node is either a seed node or a light node @@ -538,6 +618,8 @@ func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { switch keyType { case "secp256k1": return secp256k1.GenPrivKeySecp256k1(seed) + case "sr25519": + return sr25519.GenPrivKeyFromSecret(seed) case "", "ed25519": return ed25519.GenPrivKeyFromSecret(seed) default: diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index 735a451fb87..852612312be 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -1,6 +1,7 @@ package main import ( + "context" "errors" "fmt" "os" @@ -8,6 +9,8 @@ import ( "github.com/cometbft/cometbft/libs/log" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/exec" + "github.com/cometbft/cometbft/test/e2e/pkg/infra/docker" ) // Cleanup removes the Docker Compose containers and testnet directory. @@ -32,13 +35,13 @@ func cleanupDocker() error { // does this by default. Ugly, but works. xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` - err := exec("bash", "-c", fmt.Sprintf( + err := exec.Command(context.Background(), "bash", "-c", fmt.Sprintf( "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) if err != nil { return err } - err = exec("bash", "-c", fmt.Sprintf( + err = exec.Command(context.Background(), "bash", "-c", fmt.Sprintf( "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) if err != nil { return err @@ -69,7 +72,7 @@ func cleanupDir(dir string) error { if err != nil { return err } - err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), + err = docker.Exec(context.Background(), "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), "cometbft/e2e-node", "sh", "-c", "rm -rf /network/*/") if err != nil { return err diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 42646adfb14..b0c723454f9 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -25,7 +25,7 @@ import ( // 1 in 4 evidence is light client evidence, the rest is duplicate vote evidence const lightClientEvidenceRatio = 4 -// InjectEvidence takes a running testnet and generates an amount of valid +// InjectEvidence takes a running testnet and generates an amount of valid/invalid // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. @@ -88,15 +88,17 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } var ev types.Evidence - for i := 1; i <= amount; i++ { + for i := 0; i < amount; i++ { + validEv := true if i%lightClientEvidenceRatio == 0 { + validEv = i%(lightClientEvidenceRatio*2) != 0 // Alternate valid and invalid evidence ev, err = generateLightClientAttackEvidence( - ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, + ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, validEv, ) } else { var dve *types.DuplicateVoteEvidence dve, err = generateDuplicateVoteEvidence( - ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, + privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) if dve.VoteA.Height < testnet.VoteExtensionsEnableHeight { dve.VoteA.Extension = nil @@ -111,9 +113,18 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } _, err := client.BroadcastEvidence(ctx, ev) - if err != nil { + if !validEv { + // The tests will count committed evidences later on, + // and only valid evidences will make it + amount++ + } + if validEv != (err == nil) { + if err == nil { + return errors.New("submitting invalid evidence didn't return an error") + } return err } + time.Sleep(5 * time.Second / time.Duration(amount)) } // wait for the node to reach the height above the forged height so that @@ -156,6 +167,7 @@ func generateLightClientAttackEvidence( vals *types.ValidatorSet, chainID string, evTime time.Time, + validEvidence bool, ) (*types.LightClientAttackEvidence, error) { // forge a random header forgedHeight := height + 2 @@ -165,7 +177,7 @@ func generateLightClientAttackEvidence( // add a new bogus validator and remove an existing one to // vary the validator set slightly - pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals) + pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals, !validEvidence) if err != nil { return nil, err } @@ -180,6 +192,11 @@ func generateLightClientAttackEvidence( return nil, err } + // malleate the last signature of the commit by adding one to its first byte + if !validEvidence { + commit.Signatures[len(commit.Signatures)-1].Signature[0]++ + } + ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -201,7 +218,6 @@ func generateLightClientAttackEvidence( // generateDuplicateVoteEvidence picks a random validator from the val set and // returns duplicate vote evidence against the validator func generateDuplicateVoteEvidence( - ctx context.Context, privVals []types.MockPV, height int64, vals *types.ValidatorSet, @@ -294,7 +310,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } } -func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *types.ValidatorSet, +func mutateValidatorSet( + ctx context.Context, + privVals []types.MockPV, + vals *types.ValidatorSet, + nop bool, ) ([]types.PrivValidator, *types.ValidatorSet, error) { newVal, newPrivVal, err := test.Validator(ctx, 10) if err != nil { @@ -302,10 +322,14 @@ func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *type } var newVals *types.ValidatorSet - if vals.Size() > 2 { - newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + if nop { + newVals = types.NewValidatorSet(vals.Copy().Validators) } else { - newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + if vals.Size() > 2 { + newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + } else { + newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + } } // we need to sort the priv validators with the same index as the validator set diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go deleted file mode 100644 index 38b758360a8..00000000000 --- a/test/e2e/runner/exec.go +++ /dev/null @@ -1,60 +0,0 @@ -package main - -import ( - "fmt" - "os" - osexec "os/exec" - "path/filepath" -) - -// execute executes a shell command. -func exec(args ...string) error { - _, err := execOutput(args...) - return err -} - -func execOutput(args ...string) ([]byte, error) { - cmd := osexec.Command(args[0], args[1:]...) //nolint:gosec - out, err := cmd.CombinedOutput() - switch err := err.(type) { - case nil: - return out, nil - case *osexec.ExitError: - return nil, fmt.Errorf("failed to run %q:\n%v", args, string(out)) - default: - return nil, err - } -} - -// execVerbose executes a shell command while displaying its output. -func execVerbose(args ...string) error { - cmd := osexec.Command(args[0], args[1:]...) //nolint:gosec - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} - -// execCompose runs a Docker Compose command for a testnet. -func execCompose(dir string, args ...string) error { - return exec(append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -func execComposeOutput(dir string, args ...string) ([]byte, error) { - return execOutput(append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. -func execComposeVerbose(dir string, args ...string) error { - return execVerbose(append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execDocker runs a Docker command. -func execDocker(args ...string) error { - return exec(append([]string{"docker"}, args...)...) -} diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 3bb211c5547..bd7744dfc91 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -50,6 +50,11 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { select { case <-chSuccess: success++ + if testnet.LoadMaxTxs > 0 && success >= testnet.LoadMaxTxs { + logger.Info("load", "msg", log.NewLazySprintf("Ending transaction load after reaching %v txs (%.1f tx/s)...", + success, float64(success)/time.Since(started).Seconds())) + return nil + } timeout = stallTimeout case <-time.After(timeout): return fmt.Errorf("unable to submit transactions for %v", timeout) diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 901b6d850e7..23c14a1833d 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -13,6 +13,7 @@ import ( "github.com/cometbft/cometbft/libs/log" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/infra" + "github.com/cometbft/cometbft/test/e2e/pkg/infra/digitalocean" "github.com/cometbft/cometbft/test/e2e/pkg/infra/docker" ) @@ -85,9 +86,23 @@ func NewCLI() *CLI { } cli.testnet = testnet - cli.infp = &infra.NoopProvider{} - if inft == "docker" { - cli.infp = &docker.Provider{Testnet: testnet} + switch inft { + case "docker": + cli.infp = &docker.Provider{ + ProviderData: infra.ProviderData{ + Testnet: testnet, + InfrastructureData: ifd, + }, + } + case "digital-ocean": + cli.infp = &digitalocean.Provider{ + ProviderData: infra.ProviderData{ + Testnet: testnet, + InfrastructureData: ifd, + }, + } + default: + return fmt.Errorf("bad infrastructure type: %s", inft) } return nil }, @@ -112,7 +127,7 @@ func NewCLI() *CLI { chLoadResult <- err }() - if err := Start(cmd.Context(), cli.testnet); err != nil { + if err := Start(cmd.Context(), cli.testnet, cli.infp); err != nil { return err } @@ -145,7 +160,7 @@ func NewCLI() *CLI { if err := Wait(cmd.Context(), cli.testnet, 5); err != nil { // wait for network to settle before tests return err } - if err := Test(cli.testnet); err != nil { + if err := Test(cli.testnet, cli.infp.GetInfrastructureData()); err != nil { return err } if !cli.preserve { @@ -177,7 +192,7 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "start", - Short: "Starts the Docker testnet, waiting for nodes to become available", + Short: "Starts the testnet, waiting for nodes to become available", RunE: func(cmd *cobra.Command, args []string) error { _, err := os.Stat(cli.testnet.Dir) if os.IsNotExist(err) { @@ -186,13 +201,13 @@ func NewCLI() *CLI { if err != nil { return err } - return Start(cmd.Context(), cli.testnet) + return Start(cmd.Context(), cli.testnet, cli.infp) }, }) cli.root.AddCommand(&cobra.Command{ Use: "perturb", - Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", + Short: "Perturbs the testnet, e.g. by restarting or disconnecting nodes", RunE: func(cmd *cobra.Command, args []string) error { return Perturb(cmd.Context(), cli.testnet) }, @@ -208,10 +223,10 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "stop", - Short: "Stops the Docker testnet", + Short: "Stops the testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Stopping testnet") - return execCompose(cli.testnet.Dir, "down") + return cli.infp.StopTestnet(context.Background()) }, }) @@ -250,7 +265,7 @@ func NewCLI() *CLI { Use: "test", Short: "Runs test cases against a running testnet", RunE: func(cmd *cobra.Command, args []string) error { - return Test(cli.testnet) + return Test(cli.testnet, cli.infp.GetInfrastructureData()) }, }) @@ -266,7 +281,7 @@ func NewCLI() *CLI { Use: "logs", Short: "Shows the testnet logs", RunE: func(cmd *cobra.Command, args []string) error { - return execComposeVerbose(cli.testnet.Dir, "logs") + return docker.ExecComposeVerbose(context.Background(), cli.testnet.Dir, "logs") }, }) @@ -274,7 +289,7 @@ func NewCLI() *CLI { Use: "tail", Short: "Tails the testnet logs", RunE: func(cmd *cobra.Command, args []string) error { - return execComposeVerbose(cli.testnet.Dir, "logs", "--follow") + return docker.ExecComposeVerbose(context.Background(), cli.testnet.Dir, "logs", "--follow") }, }) @@ -309,7 +324,7 @@ Does not run any perturbations. chLoadResult <- err }() - if err := Start(cmd.Context(), cli.testnet); err != nil { + if err := Start(cmd.Context(), cli.testnet, cli.infp); err != nil { return err } @@ -327,11 +342,7 @@ Does not run any perturbations. return err } - if err := Cleanup(cli.testnet); err != nil { - return err - } - - return nil + return Cleanup(cli.testnet) }, }) diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index f407854997c..db8de63de0b 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -8,6 +8,7 @@ import ( "github.com/cometbft/cometbft/libs/log" rpctypes "github.com/cometbft/cometbft/rpc/core/types" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/infra/docker" ) // Perturbs a running testnet. @@ -28,7 +29,7 @@ func Perturb(ctx context.Context, testnet *e2e.Testnet) error { // after recovering. func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { testnet := node.Testnet - out, err := execComposeOutput(testnet.Dir, "ps", "-q", node.Name) + out, err := docker.ExecComposeOutput(context.Background(), testnet.Dir, "ps", "-q", node.Name) if err != nil { return nil, err } @@ -45,36 +46,36 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat switch perturbation { case e2e.PerturbationDisconnect: logger.Info("perturb node", "msg", log.NewLazySprintf("Disconnecting node %v...", node.Name)) - if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, name); err != nil { + if err := docker.Exec(context.Background(), "network", "disconnect", testnet.Name+"_"+testnet.Name, name); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, name); err != nil { + if err := docker.Exec(context.Background(), "network", "connect", testnet.Name+"_"+testnet.Name, name); err != nil { return nil, err } case e2e.PerturbationKill: logger.Info("perturb node", "msg", log.NewLazySprintf("Killing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "kill", "-s", "SIGKILL", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "kill", "-s", "SIGKILL", name); err != nil { return nil, err } - if err := execCompose(testnet.Dir, "start", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "start", name); err != nil { return nil, err } case e2e.PerturbationPause: logger.Info("perturb node", "msg", log.NewLazySprintf("Pausing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "pause", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "pause", name); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "unpause", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "unpause", name); err != nil { return nil, err } case e2e.PerturbationRestart: logger.Info("perturb node", "msg", log.NewLazySprintf("Restarting node %v...", node.Name)) - if err := execCompose(testnet.Dir, "restart", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "restart", name); err != nil { return nil, err } @@ -95,11 +96,11 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat log.NewLazySprintf("Upgrading node %v from version '%v' to version '%v'...", node.Name, oldV, newV)) - if err := execCompose(testnet.Dir, "stop", name); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "stop", name); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "up", "-d", name+"_u"); err != nil { + if err := docker.ExecCompose(context.Background(), testnet.Dir, "up", "-d", name+"_u"); err != nil { return nil, err } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 4fc57252aa5..25d9a70b6a3 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -116,6 +116,12 @@ func Setup(testnet *e2e.Testnet, infp infra.Provider) error { )).Save() } + if testnet.Prometheus { + if err := testnet.WritePrometheusConfig(); err != nil { + return err + } + } + return nil } @@ -131,7 +137,12 @@ func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { genesis.ConsensusParams.Version.App = 1 genesis.ConsensusParams.Evidence.MaxAgeNumBlocks = e2e.EvidenceAgeHeight genesis.ConsensusParams.Evidence.MaxAgeDuration = e2e.EvidenceAgeTime - genesis.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testnet.VoteExtensionsEnableHeight + if testnet.BlockMaxBytes != 0 { + genesis.ConsensusParams.Block.MaxBytes = testnet.BlockMaxBytes + } + if testnet.VoteExtensionsUpdateHeight == -1 { + genesis.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testnet.VoteExtensionsEnableHeight + } for validator, power := range testnet.Validators { genesis.Validators = append(genesis.Validators, types.GenesisValidator{ Name: validator.Name, @@ -167,6 +178,8 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second cfg.BlockSync.Version = node.BlockSyncVersion + cfg.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToNonPersistentPeers) + cfg.Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToPersistentPeers) switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -176,7 +189,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { case e2e.ProtocolGRPC: cfg.ProxyApp = AppAddressTCP cfg.ABCI = "grpc" - case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinUnsync: + case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinConnSync: cfg.ProxyApp = "" cfg.ABCI = "" default: @@ -242,6 +255,14 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.P2P.PersistentPeers += peer.AddressP2P(true) } + if node.Testnet.LogLevel != "" { + cfg.LogLevel = node.Testnet.LogLevel + } + + if node.Testnet.LogFormat != "" { + cfg.LogFormat = node.Testnet.LogFormat + } + if node.Prometheus { cfg.Instrumentation.Prometheus = true } @@ -252,21 +273,22 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg := map[string]interface{}{ - "chain_id": node.Testnet.Name, - "dir": "data/app", - "listen": AppAddressUNIX, - "mode": node.Mode, - "proxy_port": node.ProxyPort, - "protocol": "socket", - "persist_interval": node.PersistInterval, - "snapshot_interval": node.SnapshotInterval, - "retain_blocks": node.RetainBlocks, - "key_type": node.PrivvalKey.Type(), - "prepare_proposal_delay": node.Testnet.PrepareProposalDelay, - "process_proposal_delay": node.Testnet.ProcessProposalDelay, - "check_tx_delay": node.Testnet.CheckTxDelay, - "vote_extension_delay": node.Testnet.VoteExtensionDelay, - "finalize_block_delay": node.Testnet.FinalizeBlockDelay, + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "mode": node.Mode, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), + "prepare_proposal_delay": node.Testnet.PrepareProposalDelay, + "process_proposal_delay": node.Testnet.ProcessProposalDelay, + "check_tx_delay": node.Testnet.CheckTxDelay, + "vote_extension_delay": node.Testnet.VoteExtensionDelay, + "finalize_block_delay": node.Testnet.FinalizeBlockDelay, + "vote_extensions_enable_height": node.Testnet.VoteExtensionsEnableHeight, + "vote_extensions_update_height": node.Testnet.VoteExtensionsUpdateHeight, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -276,7 +298,7 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { case e2e.ProtocolGRPC: cfg["listen"] = AppAddressTCP cfg["protocol"] = "grpc" - case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinUnsync: + case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinConnSync: delete(cfg, "listen") cfg["protocol"] = string(node.ABCIProtocol) default: diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 491088da691..7a223aa33cb 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -8,9 +8,10 @@ import ( "github.com/cometbft/cometbft/libs/log" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/infra" ) -func Start(ctx context.Context, testnet *e2e.Testnet) error { +func Start(ctx context.Context, testnet *e2e.Testnet, p infra.Provider) error { if len(testnet.Nodes) == 0 { return fmt.Errorf("no nodes in testnet") } @@ -41,19 +42,35 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { // Start initial nodes (StartAt: 0) logger.Info("Starting initial network nodes...") + nodesAtZero := make([]*e2e.Node, 0) for len(nodeQueue) > 0 && nodeQueue[0].StartAt == 0 { - node := nodeQueue[0] + nodesAtZero = append(nodesAtZero, nodeQueue[0]) nodeQueue = nodeQueue[1:] - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { - return err - } + } + err := p.StartNodes(context.Background(), nodesAtZero...) + if err != nil { + return err + } + for _, node := range nodesAtZero { if _, err := waitForNode(ctx, node, 0, 15*time.Second); err != nil { return err } if node.PrometheusProxyPort > 0 { - logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v; with Prometheus on http://127.0.0.1:%v/metrics", node.Name, node.ProxyPort, node.PrometheusProxyPort)) + logger.Info("start", "msg", + log.NewLazySprintf("Node %v up on http://%s:%v; with Prometheus on http://%s:%v/metrics", + node.Name, + node.ExternalIP, + node.ProxyPort, + node.ExternalIP, + node.PrometheusProxyPort, + ), + ) } else { - logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) + logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v", + node.Name, + node.ExternalIP, + node.ProxyPort, + )) } } @@ -102,15 +119,16 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { logger.Info("Starting catch up node", "node", node.Name, "height", node.StartAt) - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + err := p.StartNodes(context.Background(), node) + if err != nil { return err } status, err := waitForNode(ctx, node, node.StartAt, 3*time.Minute) if err != nil { return err } - logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v at height %v", - node.Name, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) + logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v at height %v", + node.Name, node.ExternalIP, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) } return nil diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index f39e2a0da55..cea4441ee43 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -1,19 +1,31 @@ package main import ( + "context" "os" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/exec" ) // Test runs test cases under tests/ -func Test(testnet *e2e.Testnet) error { +func Test(testnet *e2e.Testnet, ifd *e2e.InfrastructureData) error { logger.Info("Running tests in ./tests/...") err := os.Setenv("E2E_MANIFEST", testnet.File) if err != nil { return err } + if p := ifd.Path; p != "" { + err = os.Setenv("INFRASTRUCTURE_FILE", p) + if err != nil { + return err + } + } + err = os.Setenv("INFRASTRUCTURE_TYPE", ifd.Provider) + if err != nil { + return err + } - return execVerbose("go", "test", "-count", "1", "./tests/...") + return exec.CommandVerbose(context.Background(), "go", "test", "-count", "1", "./tests/...") } diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index f1669e747c3..536d4014fc0 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -73,7 +73,11 @@ func loadTestnet(t *testing.T) e2e.Testnet { if !filepath.IsAbs(manifestFile) { manifestFile = filepath.Join("..", manifestFile) } - + ifdType := os.Getenv("INFRASTRUCTURE_TYPE") + ifdFile := os.Getenv("INFRASTRUCTURE_FILE") + if ifdType != "docker" && ifdFile == "" { + t.Fatalf("INFRASTRUCTURE_FILE not set and INFRASTRUCTURE_TYPE is not 'docker'") + } testnetCacheMtx.Lock() defer testnetCacheMtx.Unlock() if testnet, ok := testnetCache[manifestFile]; ok { @@ -81,7 +85,17 @@ func loadTestnet(t *testing.T) e2e.Testnet { } m, err := e2e.LoadManifest(manifestFile) require.NoError(t, err) - ifd, err := e2e.NewDockerInfrastructureData(m) + + var ifd e2e.InfrastructureData + switch ifdType { + case "docker": + ifd, err = e2e.NewDockerInfrastructureData(m) + require.NoError(t, err) + case "digital-ocean": + ifd, err = e2e.InfrastructureDataFromFile(ifdFile) + require.NoError(t, err) + default: + } require.NoError(t, err) testnet, err := e2e.LoadTestnet(manifestFile, ifd) diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index bdb71d83dee..ab1d6705c63 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -34,7 +34,7 @@ func TestNet_Peers(t *testing.T) { for _, peerInfo := range netInfo.Peers { peer := node.Testnet.LookupNode(peerInfo.NodeInfo.Moniker) require.NotNil(t, peer, "unknown node %v", peerInfo.NodeInfo.Moniker) - require.Equal(t, peer.IP.String(), peerInfo.RemoteIP, + require.Equal(t, peer.InternalIP.String(), peerInfo.RemoteIP, "unexpected IP address for peer %v", peer.Name) seen[peerInfo.NodeInfo.Moniker] = true } diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 61d07cad8c3..601d1760666 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -1,7 +1,7 @@ # fuzz Fuzzing for various packages in Tendermint using the fuzzing infrastructure -included in Go 1.20. +included in Go 1.21. Inputs: diff --git a/test/fuzz/tests/mempool_test.go b/test/fuzz/tests/mempool_test.go index 65dff8fbcd9..a1b08d83577 100644 --- a/test/fuzz/tests/mempool_test.go +++ b/test/fuzz/tests/mempool_test.go @@ -1,4 +1,4 @@ -//go:build gofuzz || go1.20 +//go:build gofuzz || go1.21 package tests diff --git a/test/fuzz/tests/p2p_secretconnection_test.go b/test/fuzz/tests/p2p_secretconnection_test.go index f61fa14d9c8..f16ce964a99 100644 --- a/test/fuzz/tests/p2p_secretconnection_test.go +++ b/test/fuzz/tests/p2p_secretconnection_test.go @@ -1,4 +1,4 @@ -//go:build gofuzz || go1.20 +//go:build gofuzz || go1.21 package tests diff --git a/test/fuzz/tests/rpc_jsonrpc_server_test.go b/test/fuzz/tests/rpc_jsonrpc_server_test.go index db6c0a2090e..846ab2db9ae 100644 --- a/test/fuzz/tests/rpc_jsonrpc_server_test.go +++ b/test/fuzz/tests/rpc_jsonrpc_server_test.go @@ -1,4 +1,4 @@ -//go:build gofuzz || go1.20 +//go:build gofuzz || go1.21 package tests diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index bd533be00fe..32ee960f915 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -2,6 +2,7 @@ package report import ( "math" + "sort" "sync" "time" @@ -112,6 +113,13 @@ func (rs *Reports) calculateAll() { r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil))) rs.l = append(rs.l, r) } + sort.Slice(rs.l, func(i, j int) bool { + if rs.l[i].Connections == rs.l[j].Connections { + return rs.l[i].Rate < rs.l[j].Rate + } + return rs.l[i].Connections < rs.l[j].Connections + }) + } func (rs *Reports) addError() { diff --git a/tools/README.md b/tools/README.md deleted file mode 100644 index 98e36c86887..00000000000 --- a/tools/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# tools - -Tools for working with CometBFT and associated technologies. -Documentation for these tools can be found online in the -[CometBFT tools documentation](https://docs.cometbft.com/main/tools/). diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index bd2d486a33f..00000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGOPROTO_VERSION=1.4.1 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/cosmos/gogoproto/protoc-gen-gogo@v${GOGOPROTO_VERSION} \ - github.com/cosmos/gogoproto/protoc-gen-gogofaster@v${GOGOPROTO_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin diff --git a/tools/tools.go b/tools/tools.go deleted file mode 100644 index 23d2366bb99..00000000000 --- a/tools/tools.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build tools - -// This file uses the recommended method for tracking developer tools in a go module. -// -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module - -package tools - -import ( - _ "github.com/bufbuild/buf/cmd/buf" - _ "github.com/golangci/golangci-lint/cmd/golangci-lint" - _ "github.com/pointlander/peg" - _ "github.com/vektra/mockery/v2" -) diff --git a/types/block.go b/types/block.go index 0dadd571b9f..643038becc4 100644 --- a/types/block.go +++ b/types/block.go @@ -43,10 +43,11 @@ const ( type Block struct { mtx cmtsync.Mutex - Header `json:"header"` - Data `json:"data"` - Evidence EvidenceData `json:"evidence"` - LastCommit *Commit `json:"last_commit"` + verifiedHash cmtbytes.HexBytes // Verified block hash (not included in the struct hash) + Header `json:"header"` + Data `json:"data"` + Evidence EvidenceData `json:"evidence"` + LastCommit *Commit `json:"last_commit"` } // ValidateBasic performs basic validation that doesn't involve state data. @@ -130,8 +131,13 @@ func (b *Block) Hash() cmtbytes.HexBytes { if b.LastCommit == nil { return nil } + if b.verifiedHash != nil { + return b.verifiedHash + } b.fillHeader() - return b.Header.Hash() + hash := b.Header.Hash() + b.verifiedHash = hash + return hash } // MakePartSet returns a PartSet containing parts of a serialized block. @@ -294,8 +300,7 @@ func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 { } // MaxDataBytesNoEvidence returns the maximum size of block's data when -// evidence count is unknown. MaxEvidencePerBlock will be used for the size -// of evidence. +// evidence count is unknown (will be assumed to be 0). // // XXX: Panics on negative result. func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { @@ -306,7 +311,7 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { if maxDataBytes < 0 { panic(fmt.Sprintf( - "Negative MaxDataBytesUnknownEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d", + "Negative MaxDataBytesNoEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d", maxBytes, -(maxDataBytes - maxBytes), )) @@ -321,7 +326,7 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { // NOTE: changes to the Header should be duplicated in: // - header.Hash() // - abci.Header -// - https://github.com/cometbft/cometbft/blob/main/spec/blockchain/blockchain.md +// - https://github.com/cometbft/cometbft/blob/v0.38.x/spec/blockchain/blockchain.md type Header struct { // basic block info Version cmtversion.Consensus `json:"version"` @@ -850,6 +855,15 @@ type Commit struct { hash cmtbytes.HexBytes } +// Clone creates a deep copy of this commit. +func (commit *Commit) Clone() *Commit { + sigs := make([]CommitSig, len(commit.Signatures)) + copy(sigs, commit.Signatures) + commCopy := *commit + commCopy.Signatures = sigs + return &commCopy +} + // GetVote converts the CommitSig for the given valIdx to a Vote. Commits do // not contain vote extensions, so the vote extension and vote extension // signature will not be present in the returned vote. @@ -1010,9 +1024,7 @@ func CommitFromProto(cp *cmtproto.Commit) (*Commit, error) { return nil, errors.New("nil Commit") } - var ( - commit = new(Commit) - ) + commit := new(Commit) bi, err := BlockIDFromProto(&cp.BlockID) if err != nil { @@ -1176,12 +1188,12 @@ func (ec *ExtendedCommit) Size() int { // Implements VoteSetReader. func (ec *ExtendedCommit) BitArray() *bits.BitArray { if ec.bitArray == nil { - ec.bitArray = bits.NewBitArray(len(ec.ExtendedSignatures)) - for i, extCommitSig := range ec.ExtendedSignatures { + initialBitFn := func(i int) bool { // TODO: need to check the BlockID otherwise we could be counting conflicts, // not just the one with +2/3 ! - ec.bitArray.SetIndex(i, extCommitSig.BlockIDFlag != BlockIDFlagAbsent) + return ec.ExtendedSignatures[i].BlockIDFlag != BlockIDFlagAbsent } + ec.bitArray = bits.NewBitArrayFromFn(len(ec.ExtendedSignatures), initialBitFn) } return ec.bitArray } @@ -1278,7 +1290,6 @@ func ExtendedCommitFromProto(ecp *cmtproto.ExtendedCommit) (*ExtendedCommit, err // Data contains the set of transactions included in the block type Data struct { - // Txs that will be applied by state @ block.Height+1. // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. diff --git a/types/block_meta.go b/types/block_meta.go index d66cc8f36cd..9dcefcb34d1 100644 --- a/types/block_meta.go +++ b/types/block_meta.go @@ -41,6 +41,14 @@ func (bm *BlockMeta) ToProto() *cmtproto.BlockMeta { } func BlockMetaFromProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { + bm, err := BlockMetaFromTrustedProto(pb) + if err != nil { + return nil, err + } + return bm, bm.ValidateBasic() +} + +func BlockMetaFromTrustedProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { if pb == nil { return nil, errors.New("blockmeta is empty") } @@ -62,7 +70,7 @@ func BlockMetaFromProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { bm.Header = h bm.NumTxs = int(pb.NumTxs) - return bm, bm.ValidateBasic() + return bm, nil } // ValidateBasic performs basic validation. diff --git a/types/block_meta_test.go b/types/block_meta_test.go index b557f284a50..269291d2e96 100644 --- a/types/block_meta_test.go +++ b/types/block_meta_test.go @@ -34,7 +34,7 @@ func TestBlockMeta_ToProto(t *testing.T) { t.Run(tt.testName, func(t *testing.T) { pb := tt.bm.ToProto() - bm, err := BlockMetaFromProto(pb) + bm, err := BlockMetaFromTrustedProto(pb) if !tt.expErr { require.NoError(t, err, tt.testName) diff --git a/types/event_bus.go b/types/event_bus.go index 8ed23ab8005..be5aee65c66 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -109,17 +109,15 @@ func (b *EventBus) Publish(eventType string, eventData TMEventData) error { // map of stringified events where each key is composed of the event // type and each of the event's attributes keys in the form of // "{event.Type}.{attribute.Key}" and the value is each attribute's value. -func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.Logger) map[string][]string { +func (*EventBus) validateAndStringifyEvents(events []types.Event) map[string][]string { result := make(map[string][]string) for _, event := range events { if len(event.Type) == 0 { - logger.Debug("Got an event with an empty type (skipping)", "event", event) continue } for _, attr := range event.Attributes { if len(attr.Key) == 0 { - logger.Debug("Got an event attribute with an empty key(skipping)", "event", event) continue } @@ -134,8 +132,7 @@ func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.L func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { // no explicit deadline for publishing events ctx := context.Background() - - events := b.validateAndStringifyEvents(data.ResultFinalizeBlock.Events, b.Logger.With("height", data.Block.Height)) + events := b.validateAndStringifyEvents(data.ResultFinalizeBlock.Events) // add predefined new block event events[EventTypeKey] = append(events[EventTypeKey], EventNewBlock) @@ -147,7 +144,7 @@ func (b *EventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) erro // no explicit deadline for publishing events ctx := context.Background() - events := b.validateAndStringifyEvents(data.Events, b.Logger.With("height", data.Height)) + events := b.validateAndStringifyEvents(data.Events) // add predefined new block event events[EventTypeKey] = append(events[EventTypeKey], EventNewBlockEvents) @@ -178,7 +175,7 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() - events := b.validateAndStringifyEvents(data.Result.Events, b.Logger.With("tx", data.Tx)) + events := b.validateAndStringifyEvents(data.Result.Events) // add predefined compositeKeys events[EventTypeKey] = append(events[EventTypeKey], EventTx) @@ -232,82 +229,82 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd type NopEventBus struct{} func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query cmtpubsub.Query, - out chan<- interface{}, + context.Context, + string, + cmtpubsub.Query, + chan<- interface{}, ) error { return nil } -func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query cmtpubsub.Query) error { +func (NopEventBus) Unsubscribe(context.Context, string, cmtpubsub.Query) error { return nil } -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { +func (NopEventBus) UnsubscribeAll(context.Context, string) error { return nil } -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { +func (NopEventBus) PublishEventNewBlock(EventDataNewBlock) error { return nil } -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { +func (NopEventBus) PublishEventNewBlockHeader(EventDataNewBlockHeader) error { return nil } -func (NopEventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) error { +func (NopEventBus) PublishEventNewBlockEvents(EventDataNewBlockEvents) error { return nil } -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { +func (NopEventBus) PublishEventNewEvidence(EventDataNewEvidence) error { return nil } -func (NopEventBus) PublishEventVote(data EventDataVote) error { +func (NopEventBus) PublishEventVote(EventDataVote) error { return nil } -func (NopEventBus) PublishEventTx(data EventDataTx) error { +func (NopEventBus) PublishEventTx(EventDataTx) error { return nil } -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { +func (NopEventBus) PublishEventNewRoundStep(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutPropose(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutWait(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { +func (NopEventBus) PublishEventNewRound(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { +func (NopEventBus) PublishEventCompleteProposal(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { +func (NopEventBus) PublishEventPolka(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { +func (NopEventBus) PublishEventUnlock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { +func (NopEventBus) PublishEventRelock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { +func (NopEventBus) PublishEventLock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { +func (NopEventBus) PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error { return nil } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 3058a80d402..de9e61ed285 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -96,7 +96,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { }() var ps *PartSet - ps, err = block.MakePartSet(MaxBlockSizeBytes) + ps, err = block.MakePartSet(BlockPartSizeBytes) require.NoError(t, err) err = eventBus.PublishEventNewBlock(EventDataNewBlock{ diff --git a/types/light.go b/types/light.go index 31fdc620f94..e3ef1f63db5 100644 --- a/types/light.go +++ b/types/light.go @@ -120,6 +120,11 @@ type SignedHeader struct { Commit *Commit `json:"commit"` } +// IsEmpty returns true if both the header and commit are nil. +func (sh SignedHeader) IsEmpty() bool { + return sh.Header == nil && sh.Commit == nil +} + // ValidateBasic does basic consistency checks and makes sure the header // and commit are consistent. // diff --git a/types/params.go b/types/params.go index d12503ae0e0..81bfa5aa2bc 100644 --- a/types/params.go +++ b/types/params.go @@ -143,8 +143,12 @@ func IsValidPubkeyType(params ValidatorParams, pubkeyType string) bool { // Validate validates the ConsensusParams to ensure all values are within their // allowed limits, and returns an error if they are not. func (params ConsensusParams) ValidateBasic() error { - if params.Block.MaxBytes <= 0 { - return fmt.Errorf("block.MaxBytes must be greater than 0. Got %d", + if params.Block.MaxBytes == 0 { + return fmt.Errorf("block.MaxBytes cannot be 0") + } + if params.Block.MaxBytes < -1 { + return fmt.Errorf("block.MaxBytes must be -1 or greater than 0. Got %d", + params.Block.MaxBytes) } if params.Block.MaxBytes > MaxBlockSizeBytes { @@ -167,7 +171,11 @@ func (params ConsensusParams) ValidateBasic() error { params.Evidence.MaxAgeDuration) } - if params.Evidence.MaxBytes > params.Block.MaxBytes { + maxBytes := params.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(MaxBlockSizeBytes) + } + if params.Evidence.MaxBytes > maxBytes { return fmt.Errorf("evidence.MaxBytesEvidence is greater than upper bound, %d > %d", params.Evidence.MaxBytes, params.Block.MaxBytes) } @@ -197,27 +205,63 @@ func (params ConsensusParams) ValidateBasic() error { return nil } +// ValidateUpdate validates the updated VoteExtensionsEnableHeight. +// | r | params...EnableHeight | updated...EnableHeight | result (nil == pass) +// | 1 | * | (nil) | nil +// | 2 | * | < 0 | VoteExtensionsEnableHeight must be positive +// | 3 | <=0 | 0 | nil +// | 4 | X | X (>=0) | nil +// | 5 | > 0; <=height | 0 | vote extensions cannot be disabled once enabled +// | 6 | > 0; > height | 0 | nil (disable a previous proposal) +// | 7 | * | <=height | vote extensions cannot be updated to a past height +// | 8 | <=0 | > height (*) | nil +// | 9 | (> 0) <=height | > height (*) | vote extensions cannot be modified once enabled +// | 10 | (> 0) > height | > height (*) | nil func (params ConsensusParams) ValidateUpdate(updated *cmtproto.ConsensusParams, h int64) error { - if updated.Abci == nil { + // 1 + if updated == nil || updated.Abci == nil { + return nil + } + // 2 + if updated.Abci.VoteExtensionsEnableHeight < 0 { + return errors.New("VoteExtensionsEnableHeight must be positive") + } + // 3 + if params.ABCI.VoteExtensionsEnableHeight <= 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { return nil } + // 4 (implicit: updated.Abci.VoteExtensionsEnableHeight >= 0) if params.ABCI.VoteExtensionsEnableHeight == updated.Abci.VoteExtensionsEnableHeight { return nil } - if params.ABCI.VoteExtensionsEnableHeight != 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { - return errors.New("vote extensions cannot be disabled once enabled") + // 5 & 6 + if params.ABCI.VoteExtensionsEnableHeight > 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { + // 5 + if params.ABCI.VoteExtensionsEnableHeight <= h { + return fmt.Errorf("vote extensions cannot be disabled once enabled"+ + "old enable height: %d, current height %d", + params.ABCI.VoteExtensionsEnableHeight, h) + } + // 6 + return nil } + // 7 (implicit: updated.Abci.VoteExtensionsEnableHeight > 0) if updated.Abci.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated to a past height, "+ - "initial height: %d, current height %d", - params.ABCI.VoteExtensionsEnableHeight, h) + return fmt.Errorf("vote extensions cannot be updated to a past or current height, "+ + "enable height: %d, current height %d", + updated.Abci.VoteExtensionsEnableHeight, h) + } + // 8 (implicit: updated.Abci.VoteExtensionsEnableHeight > h) + if params.ABCI.VoteExtensionsEnableHeight <= 0 { + return nil } + // 9 (implicit: params.ABCI.VoteExtensionsEnableHeight > 0 && updated.Abci.VoteExtensionsEnableHeight > h) if params.ABCI.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be modified once"+ - "the initial height has occurred, "+ - "initial height: %d, current height %d", + return fmt.Errorf("vote extensions cannot be modified once enabled"+ + "enable height: %d, current height %d", params.ABCI.VoteExtensionsEnableHeight, h) } + // 10 (implicit: params.ABCI.VoteExtensionsEnableHeight > h && updated.Abci.VoteExtensionsEnableHeight > h) return nil } diff --git a/types/params_test.go b/types/params_test.go index 4311945d558..f3e758237c9 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -39,6 +39,8 @@ func TestConsensusParamsValidation(t *testing.T) { 11: {makeParams(1, 0, 2, 0, []string{}, 0), false}, // test invalid pubkey type provided 12: {makeParams(1, 0, 2, 0, []string{"potatoes make good pubkeys"}, 0), false}, + 13: {makeParams(-1, 0, 2, 0, valEd25519, 0), true}, + 14: {makeParams(-2, 0, 2, 0, valEd25519, 0), false}, } for i, tc := range testCases { if tc.valid { @@ -152,63 +154,78 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { } func TestConsensusParamsUpdate_VoteExtensionsEnableHeight(t *testing.T) { - t.Run("set to height but initial height already run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 1) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - require.Error(t, initialParams.ValidateUpdate(update, 5)) - }) - t.Run("reset to 0", func(t *testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 1) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - }) - t.Run("set to height before current height run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 100) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 11)) - require.Error(t, initialParams.ValidateUpdate(update, 99)) - }) - t.Run("set to height after current height run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 300) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 99, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 11)) - require.NoError(t, initialParams.ValidateUpdate(update, 98)) - }) - t.Run("no error when unchanged", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 100) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 100, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 500)) - }) - t.Run("updated from 0 to 0", func(t *testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 0) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 100)) - }) + const nilTest = -10000000 + testCases := []struct { + name string + current int64 + from int64 + to int64 + expectedErr bool + }{ + // no change + {"current: 3, 0 -> 0", 3, 0, 0, false}, + {"current: 3, 100 -> 100, ", 3, 100, 100, false}, + {"current: 100, 100 -> 100, ", 100, 100, 100, false}, + {"current: 300, 100 -> 100, ", 300, 100, 100, false}, + // set for the first time + {"current: 3, 0 -> 5, ", 3, 0, 5, false}, + {"current: 4, 0 -> 5, ", 4, 0, 5, false}, + {"current: 5, 0 -> 5, ", 5, 0, 5, true}, + {"current: 6, 0 -> 5, ", 6, 0, 5, true}, + {"current: 50, 0 -> 5, ", 50, 0, 5, true}, + // reset to 0 + {"current: 4, 5 -> 0, ", 4, 5, 0, false}, + {"current: 5, 5 -> 0, ", 5, 5, 0, true}, + {"current: 6, 5 -> 0, ", 6, 5, 0, true}, + {"current: 10, 5 -> 0, ", 10, 5, 0, true}, + // modify backwards + {"current: 1, 10 -> 5, ", 1, 10, 5, false}, + {"current: 4, 10 -> 5, ", 4, 10, 5, false}, + {"current: 5, 10 -> 5, ", 5, 10, 5, true}, + {"current: 6, 10 -> 5, ", 6, 10, 5, true}, + {"current: 9, 10 -> 5, ", 9, 10, 5, true}, + {"current: 10, 10 -> 5, ", 10, 10, 5, true}, + {"current: 11, 10 -> 5, ", 11, 10, 5, true}, + {"current: 100, 10 -> 5, ", 100, 10, 5, true}, + // modify forward + {"current: 3, 10 -> 15, ", 3, 10, 15, false}, + {"current: 9, 10 -> 15, ", 9, 10, 15, false}, + {"current: 10, 10 -> 15, ", 10, 10, 15, true}, + {"current: 11, 10 -> 15, ", 11, 10, 15, true}, + {"current: 14, 10 -> 15, ", 14, 10, 15, true}, + {"current: 15, 10 -> 15, ", 15, 10, 15, true}, + {"current: 16, 10 -> 15, ", 16, 10, 15, true}, + {"current: 100, 10 -> 15, ", 100, 10, 15, true}, + // negative values + {"current: 3, 0 -> -5", 3, 0, -5, true}, + {"current: 3, -5 -> 100, ", 3, -5, 100, false}, + {"current: 3, -10 -> 3, ", 3, -10, 3, true}, + {"current: 3, -3 -> -3", 3, -3, -3, true}, + {"current: 100, -8 -> -9, ", 100, -8, -9, true}, + {"current: 300, -10 -> -8, ", 300, -10, -8, true}, + // test for nil + {"current: 300, 400 -> nil, ", 300, 400, nilTest, false}, + {"current: 300, 200 -> nil, ", 300, 200, nilTest, false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(*testing.T) { + initialParams := makeParams(1, 0, 2, 0, valEd25519, tc.from) + update := &cmtproto.ConsensusParams{} + if tc.to == nilTest { + update.Abci = nil + } else { + update.Abci = &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: tc.to, + } + } + if tc.expectedErr { + require.Error(t, initialParams.ValidateUpdate(update, tc.current)) + } else { + require.NoError(t, initialParams.ValidateUpdate(update, tc.current)) + } + }) + } } func TestProto(t *testing.T) { diff --git a/types/part_set.go b/types/part_set.go index cd110bfcec7..87a81f1c6bc 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -18,6 +18,8 @@ import ( var ( ErrPartSetUnexpectedIndex = errors.New("error part set unexpected index") ErrPartSetInvalidProof = errors.New("error part set invalid proof") + ErrPartTooBig = errors.New("error part size too big") + ErrPartInvalidSize = errors.New("error inner part with invalid size") ) type Part struct { @@ -29,7 +31,11 @@ type Part struct { // ValidateBasic performs basic validation. func (part *Part) ValidateBasic() error { if len(part.Bytes) > int(BlockPartSizeBytes) { - return fmt.Errorf("too big: %d bytes, max: %d", len(part.Bytes), BlockPartSizeBytes) + return ErrPartTooBig + } + // All parts except the last one should have the same constant size. + if int64(part.Index) < part.Proof.Total-1 && len(part.Bytes) != int(BlockPartSizeBytes) { + return ErrPartInvalidSize } if err := part.Proof.ValidateBasic(); err != nil { return fmt.Errorf("wrong Proof: %w", err) @@ -270,9 +276,12 @@ func (ps *PartSet) Total() uint32 { } func (ps *PartSet) AddPart(part *Part) (bool, error) { + // TODO: remove this? would be preferable if this only returned (false, nil) + // when its a duplicate block part if ps == nil { return false, nil } + ps.mtx.Lock() defer ps.mtx.Unlock() @@ -286,6 +295,11 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { return false, nil } + // The proof should be compatible with the number of parts. + if part.Proof.Total != int64(ps.total) { + return false, ErrPartSetInvalidProof + } + // Check hash proof if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { return false, ErrPartSetInvalidProof diff --git a/types/part_set_test.go b/types/part_set_test.go index c1f885260cd..260618dab1d 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -86,6 +86,22 @@ func TestWrongProof(t *testing.T) { if added || err == nil { t.Errorf("expected to fail adding a part with bad bytes.") } + + // Test adding a part with wrong proof index. + part = partSet.GetPart(2) + part.Proof.Index = 1 + added, err = partSet2.AddPart(part) + if added || err == nil { + t.Errorf("expected to fail adding a part with bad proof index.") + } + + // Test adding a part with wrong proof total. + part = partSet.GetPart(3) + part.Proof.Total = int64(partSet.Total() - 1) + added, err = partSet2.AddPart(part) + if added || err == nil { + t.Errorf("expected to fail adding a part with bad proof total.") + } } func TestPartSetHeaderValidateBasic(t *testing.T) { @@ -117,9 +133,19 @@ func TestPartValidateBasic(t *testing.T) { }{ {"Good Part", func(pt *Part) {}, false}, {"Too big part", func(pt *Part) { pt.Bytes = make([]byte, BlockPartSizeBytes+1) }, true}, + {"Good small last part", func(pt *Part) { + pt.Index = 1 + pt.Bytes = make([]byte, BlockPartSizeBytes-1) + pt.Proof.Total = 2 + }, false}, + {"Too small inner part", func(pt *Part) { + pt.Index = 0 + pt.Bytes = make([]byte, BlockPartSizeBytes-1) + pt.Proof.Total = 2 + }, true}, {"Too big proof", func(pt *Part) { pt.Proof = merkle.Proof{ - Total: 1, + Total: 2, Index: 1, LeafHash: make([]byte, 1024*1024), } diff --git a/types/priv_validator.go b/types/priv_validator.go index b12dd6e6765..340794d00c5 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -142,12 +142,12 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") // Implements PrivValidator. -func (pv *ErroringMockPV) SignVote(chainID string, vote *cmtproto.Vote) error { +func (pv *ErroringMockPV) SignVote(string, *cmtproto.Vote) error { return ErroringMockPVErr } // Implements PrivValidator. -func (pv *ErroringMockPV) SignProposal(chainID string, proposal *cmtproto.Proposal) error { +func (pv *ErroringMockPV) SignProposal(string, *cmtproto.Proposal) error { return ErroringMockPVErr } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 375b304d282..73a2c02a16b 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -14,11 +14,11 @@ import ( func TestABCIPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - err := testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) + err := testABCIPubKey(t, pkEd) assert.NoError(t, err) } -func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { +func testABCIPubKey(t *testing.T, pk crypto.PubKey) error { abciPubKey, err := cryptoenc.PubKeyToProto(pk) require.NoError(t, err) pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) @@ -54,12 +54,12 @@ func TestABCIValidators(t *testing.T) { type pubKeyEddie struct{} -func (pubKeyEddie) Address() Address { return []byte{} } -func (pubKeyEddie) Bytes() []byte { return []byte{} } -func (pubKeyEddie) VerifySignature(msg []byte, sig []byte) bool { return false } -func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } -func (pubKeyEddie) String() string { return "" } -func (pubKeyEddie) Type() string { return "pubKeyEddie" } +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) VerifySignature([]byte, []byte) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) String() string { return "" } +func (pubKeyEddie) Type() string { return "pubKeyEddie" } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go index e8daccbb1b3..bea28a3d7ec 100644 --- a/types/signed_msg_type.go +++ b/types/signed_msg_type.go @@ -11,3 +11,18 @@ func IsVoteTypeValid(t cmtproto.SignedMsgType) bool { return false } } + +var signedMsgTypeToShortName = map[cmtproto.SignedMsgType]string{ + cmtproto.UnknownType: "unknown", + cmtproto.PrevoteType: "prevote", + cmtproto.PrecommitType: "precommit", + cmtproto.ProposalType: "proposal", +} + +// Returns a short lowercase descriptor for a signed message type. +func SignedMsgTypeToShortString(t cmtproto.SignedMsgType) string { + if shortName, ok := signedMsgTypeToShortName[t]; ok { + return shortName + } + return "unknown" +} diff --git a/types/tx.go b/types/tx.go index 0ba14dbadd0..5cbb2cc40df 100644 --- a/types/tx.go +++ b/types/tx.go @@ -107,7 +107,7 @@ func ToTxs(txl [][]byte) Txs { func (txs Txs) Validate(maxSizeBytes int64) error { var size int64 for _, tx := range txs { - size += int64(len(tx)) + size += ComputeProtoSizeForTxs([]Tx{tx}) if size > maxSizeBytes { return fmt.Errorf("transaction data size exceeds maximum %d", maxSizeBytes) } diff --git a/types/validation.go b/types/validation.go index ad3a13690c5..77d9e47c021 100644 --- a/types/validation.go +++ b/types/validation.go @@ -12,7 +12,9 @@ import ( const batchVerifyThreshold = 2 func shouldBatchVerify(vals *ValidatorSet, commit *Commit) bool { - return len(commit.Signatures) >= batchVerifyThreshold && batch.SupportsBatchVerifier(vals.GetProposer().PubKey) + return len(commit.Signatures) >= batchVerifyThreshold && + batch.SupportsBatchVerifier(vals.GetProposer().PubKey) && + vals.AllKeysHaveSameType() } // VerifyCommit verifies +2/3 of the set had signed the given commit. @@ -54,10 +56,39 @@ func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID, // VerifyCommitLight verifies +2/3 of the set had signed the given commit. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, - height int64, commit *Commit) error { +func VerifyCommitLight( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, +) error { + return verifyCommitLightInternal(chainID, vals, blockID, height, commit, false) +} + +// VerifyCommitLightAllSignatures verifies +2/3 of the set had signed the given commit. +// +// This method DOES check all the signatures. +func VerifyCommitLightAllSignatures( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, +) error { + return verifyCommitLightInternal(chainID, vals, blockID, height, commit, true) +} + +func verifyCommitLightInternal( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, + countAllSignatures bool, +) error { // run a basic validation of the arguments if err := verifyBasicValsAndCommit(vals, commit, height, blockID); err != nil { return err @@ -75,12 +106,12 @@ func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, // attempt to batch verify if shouldBatchVerify(vals, commit) { return verifyCommitBatch(chainID, vals, commit, - votingPowerNeeded, ignore, count, false, true) + votingPowerNeeded, ignore, count, countAllSignatures, true) } // if verification failed or is not supported then fallback to single verification return verifyCommitSingle(chainID, vals, commit, votingPowerNeeded, - ignore, count, false, true) + ignore, count, countAllSignatures, true) } // VerifyCommitLightTrusting verifies that trustLevel of the validator set signed @@ -89,9 +120,40 @@ func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, // NOTE the given validators do not necessarily correspond to the validator set // for this commit, but there may be some intersection. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commit, trustLevel cmtmath.Fraction) error { +func VerifyCommitLightTrusting( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return verifyCommitLightTrustingInternal(chainID, vals, commit, trustLevel, false) +} + +// VerifyCommitLightTrustingAllSignatures verifies that trustLevel of the validator +// set signed this commit. +// +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +// +// This method DOES check all the signatures. +func VerifyCommitLightTrustingAllSignatures( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return verifyCommitLightTrustingInternal(chainID, vals, commit, trustLevel, true) +} + +func verifyCommitLightTrustingInternal( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, + countAllSignatures bool, +) error { // sanity checks if vals == nil { return errors.New("nil validator set") @@ -121,12 +183,12 @@ func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commi // up by address rather than index. if shouldBatchVerify(vals, commit) { return verifyCommitBatch(chainID, vals, commit, - votingPowerNeeded, ignore, count, false, false) + votingPowerNeeded, ignore, count, countAllSignatures, false) } // attempt with single verification return verifyCommitSingle(chainID, vals, commit, votingPowerNeeded, - ignore, count, false, false) + ignore, count, countAllSignatures, false) } // ValidateHash returns an error if the hash is not empty, but its @@ -284,7 +346,11 @@ func verifyCommitSingle( continue } - // If the vals and commit have a 1-to-1 correspondance we can retrieve + if commitSig.ValidateBasic() != nil { + return fmt.Errorf("invalid signatures from %v at index %d", val, idx) + } + + // If the vals and commit have a 1-to-1 correspondence we can retrieve // them by index else we need to retrieve them by address if lookUpByIndex { val = vals.Validators[idx] @@ -306,6 +372,10 @@ func verifyCommitSingle( seenVals[valIdx] = idx } + if val.PubKey == nil { + return fmt.Errorf("validator %v has a nil PubKey at index %d", val, idx) + } + voteSignBytes = commit.VoteSignBytes(chainID, int32(idx)) if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { diff --git a/types/validation_test.go b/types/validation_test.go index a6cdf818c3e..b44c63643cd 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -1,6 +1,7 @@ package types import ( + "strconv" "testing" "time" @@ -24,7 +25,7 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { ) testCases := []struct { - description string + description, description2 string // description2, if not empty, is checked against VerifyCommitLightTrusting // vote chainID chainID string // vote blockID @@ -41,24 +42,26 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { expErr bool }{ - {"good (batch verification)", chainID, blockID, 3, height, 3, 0, 0, false}, - {"good (single verification)", chainID, blockID, 1, height, 1, 0, 0, false}, + {"good (batch verification)", "", chainID, blockID, 3, height, 3, 0, 0, false}, + {"good (single verification)", "", chainID, blockID, 1, height, 1, 0, 0, false}, - {"wrong signature (#0)", "EpsilonEridani", blockID, 2, height, 2, 0, 0, true}, - {"wrong block ID", chainID, makeBlockIDRandom(), 2, height, 2, 0, 0, true}, - {"wrong height", chainID, blockID, 1, height - 1, 1, 0, 0, true}, + {"wrong signature (#0)", "", "EpsilonEridani", blockID, 2, height, 2, 0, 0, true}, + {"wrong block ID", "", chainID, makeBlockIDRandom(), 2, height, 2, 0, 0, true}, + {"wrong height", "", chainID, blockID, 1, height - 1, 1, 0, 0, true}, - {"wrong set size: 4 vs 3", chainID, blockID, 4, height, 3, 0, 0, true}, - {"wrong set size: 1 vs 2", chainID, blockID, 1, height, 2, 0, 0, true}, + {"wrong set size: 4 vs 3", "", chainID, blockID, 4, height, 3, 0, 0, true}, + {"wrong set size: 1 vs 2", "double vote from Validator", chainID, blockID, 1, height, 2, 0, 0, true}, - {"insufficient voting power: got 30, needed more than 66", chainID, blockID, 10, height, 3, 2, 5, true}, - {"insufficient voting power: got 0, needed more than 6", chainID, blockID, 1, height, 0, 0, 1, true}, - {"insufficient voting power: got 60, needed more than 60", chainID, blockID, 9, height, 6, 3, 0, true}, + {"insufficient voting power: got 30, needed more than 66", "", chainID, blockID, 10, height, 3, 2, 5, true}, + {"insufficient voting power: got 0, needed more than 6", "", chainID, blockID, 1, height, 0, 0, 1, true}, // absent + {"insufficient voting power: got 0, needed more than 6", "", chainID, blockID, 1, height, 0, 1, 0, true}, // nil + {"insufficient voting power: got 60, needed more than 60", "", chainID, blockID, 9, height, 6, 3, 0, true}, } for _, tc := range testCases { tc := tc - t.Run(tc.description, func(t *testing.T) { + countAllSignatures := false + f := func(t *testing.T) { _, valSet, vals := randVoteSet(tc.height, round, cmtproto.PrecommitType, tc.valSize, 10, false) totalVotes := tc.blockVotes + tc.absentVotes + tc.nilVotes sigs := make([]CommitSig, totalVotes) @@ -110,7 +113,11 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { assert.NoError(t, err, "VerifyCommit") } - err = valSet.VerifyCommitLight(chainID, blockID, height, commit) + if countAllSignatures { + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, height, commit) + } else { + err = valSet.VerifyCommitLight(chainID, blockID, height, commit) + } if tc.expErr { if assert.Error(t, err, "VerifyCommitLight") { assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight") @@ -120,18 +127,30 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { } // only a subsection of the tests apply to VerifyCommitLightTrusting - if totalVotes != tc.valSize || !tc.blockID.Equals(blockID) || tc.height != height { - tc.expErr = false + expErr := tc.expErr + if (!countAllSignatures && totalVotes != tc.valSize) || totalVotes < tc.valSize || !tc.blockID.Equals(blockID) || tc.height != height { + expErr = false } - err = valSet.VerifyCommitLightTrusting(chainID, commit, trustLevel) - if tc.expErr { + if countAllSignatures { + err = valSet.VerifyCommitLightTrustingAllSignatures(chainID, commit, trustLevel) + } else { + err = valSet.VerifyCommitLightTrusting(chainID, commit, trustLevel) + } + if expErr { if assert.Error(t, err, "VerifyCommitLightTrusting") { - assert.Contains(t, err.Error(), tc.description, "VerifyCommitLightTrusting") + errStr := tc.description2 + if len(errStr) == 0 { + errStr = tc.description + } + assert.Contains(t, err.Error(), errStr, "VerifyCommitLightTrusting") } } else { assert.NoError(t, err, "VerifyCommitLightTrusting") } - }) + } + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) + countAllSignatures = true + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) } } @@ -163,7 +182,7 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { } } -func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajOfVotingPowerSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) @@ -176,6 +195,9 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + assert.NoError(t, err) + // malleate 4th signature (3 signatures are enough for 2/3+) vote := voteSet.GetByIndex(3) v := vote.ToProto() @@ -187,9 +209,11 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign err = valSet.VerifyCommitLight(chainID, blockID, h, commit) assert.NoError(t, err) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + assert.Error(t, err) // counting all signatures detects the malleated signature } -func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) @@ -202,6 +226,13 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + assert.NoError(t, err) + // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) vote := voteSet.GetByIndex(2) v := vote.ToProto() @@ -213,6 +244,12 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin err = valSet.VerifyCommitLightTrusting(chainID, commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}) assert.NoError(t, err) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + assert.Error(t, err) // counting all signatures detects the malleated signature } func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { diff --git a/types/validator.go b/types/validator.go index 886b32756d8..3e95c467bc7 100644 --- a/types/validator.go +++ b/types/validator.go @@ -46,8 +46,9 @@ func (v *Validator) ValidateBasic() error { return errors.New("validator has negative voting power") } - if len(v.Address) != crypto.AddressSize { - return fmt.Errorf("validator address is the wrong size: %v", v.Address) + addr := v.PubKey.Address() + if !bytes.Equal(v.Address, addr) { + return fmt.Errorf("validator address is incorrectly derived from pubkey. Exp: %v, got %v", addr, v.Address) } return nil diff --git a/types/validator_set.go b/types/validator_set.go index 4b509d605a1..62b53dc8945 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "encoding/binary" "errors" "fmt" "math" @@ -10,6 +11,7 @@ import ( "strings" "github.com/cometbft/cometbft/crypto/merkle" + "github.com/cometbft/cometbft/crypto/tmhash" cmtmath "github.com/cometbft/cometbft/libs/math" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) @@ -35,6 +37,9 @@ const ( var ErrTotalVotingPowerOverflow = fmt.Errorf("total voting power of resulting valset exceeds max %d", MaxTotalVotingPower) +// ErrProposerNotInVals is returned if the proposer is not in the validator set. +var ErrProposerNotInVals = errors.New("proposer not in validator set") + // ValidatorSet represent a set of *Validator at a given height. // // The validators can be fetched by address or index. @@ -55,6 +60,8 @@ type ValidatorSet struct { // cached (unexported) totalVotingPower int64 + // true if all validators have the same type of public key or if the set is empty. + allKeysHaveSameType bool } // NewValidatorSet initializes a ValidatorSet by copying over the values from @@ -68,7 +75,9 @@ type ValidatorSet struct { // MaxVotesCount - commits by a validator set larger than this will fail // validation. func NewValidatorSet(valz []*Validator) *ValidatorSet { - vals := &ValidatorSet{} + vals := &ValidatorSet{ + allKeysHaveSameType: true, + } err := vals.updateWithChangeSet(valz, false) if err != nil { panic(fmt.Sprintf("Cannot create validator set: %v", err)) @@ -94,7 +103,13 @@ func (vals *ValidatorSet) ValidateBasic() error { return fmt.Errorf("proposer failed validate basic, error: %w", err) } - return nil + for _, val := range vals.Validators { + if bytes.Equal(val.Address, vals.Proposer.Address) { + return nil + } + } + + return ErrProposerNotInVals } // IsNilOrEmpty returns true if validator set is nil or empty. @@ -105,9 +120,9 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool { // CopyIncrementProposerPriority increments ProposerPriority and updates the // proposer on a copy, and returns it. func (vals *ValidatorSet) CopyIncrementProposerPriority(times int32) *ValidatorSet { - copy := vals.Copy() - copy.IncrementProposerPriority(times) - return copy + cp := vals.Copy() + cp.IncrementProposerPriority(times) + return cp } // IncrementProposerPriority increments ProposerPriority of each validator and @@ -248,9 +263,10 @@ func validatorListCopy(valsList []*Validator) []*Validator { // Copy each validator into a new ValidatorSet. func (vals *ValidatorSet) Copy() *ValidatorSet { return &ValidatorSet{ - Validators: validatorListCopy(vals.Validators), - Proposer: vals.Proposer, - totalVotingPower: vals.totalVotingPower, + Validators: validatorListCopy(vals.Validators), + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, + allKeysHaveSameType: vals.allKeysHaveSameType, } } @@ -344,6 +360,8 @@ func (vals *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. +// +// See merkle.HashFromByteSlices. func (vals *ValidatorSet) Hash() []byte { bzs := make([][]byte, len(vals.Validators)) for i, val := range vals.Validators { @@ -352,6 +370,23 @@ func (vals *ValidatorSet) Hash() []byte { return merkle.HashFromByteSlices(bzs) } +// ProposerPriorityHash returns the tmhash of the proposer priorities. +// Validator set must be sorted to get the same hash. +// If the validator set is empty, nil is returned. +func (vals *ValidatorSet) ProposerPriorityHash() []byte { + if len(vals.Validators) == 0 { + return nil + } + + buf := make([]byte, binary.MaxVarintLen64*len(vals.Validators)) + total := 0 + for _, val := range vals.Validators { + n := binary.PutVarint(buf, val.ProposerPriority) + total += n + } + return tmhash.Sum(buf[:total]) +} + // Iterate will run the given function over the set. func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { for i, val := range vals.Validators { @@ -429,7 +464,6 @@ func verifyUpdates( vals *ValidatorSet, removedPower int64, ) (tvpAfterUpdatesBeforeRemovals int64, err error) { - delta := func(update *Validator, vals *ValidatorSet) int64 { _, val := vals.GetByAddress(update.Address) if val != nil { @@ -493,7 +527,6 @@ func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotal valUpdate.ProposerPriority = val.ProposerPriority } } - } // Merges the vals' validator list with the updates list. @@ -629,6 +662,9 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes vals.applyUpdates(updates) vals.applyRemovals(deletes) + // Should go after additions. + vals.checkAllKeysHaveSameType() + vals.updateTotalVotingPower() // will panic if total voting power > MaxTotalVotingPower // Scale and center. @@ -660,24 +696,51 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { // VerifyCommit verifies +2/3 of the set had signed the given commit and all // other signatures are valid func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, - height int64, commit *Commit) error { + height int64, commit *Commit, +) error { return VerifyCommit(chainID, vals, blockID, height, commit) } // LIGHT CLIENT VERIFICATION METHODS // VerifyCommitLight verifies +2/3 of the set had signed the given commit. +// It does NOT count all signatures. func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, - height int64, commit *Commit) error { + height int64, commit *Commit, +) error { return VerifyCommitLight(chainID, vals, blockID, height, commit) } +// VerifyCommitLight verifies +2/3 of the set had signed the given commit. +// It DOES count all signatures. +func (vals *ValidatorSet) VerifyCommitLightAllSignatures(chainID string, blockID BlockID, + height int64, commit *Commit, +) error { + return VerifyCommitLightAllSignatures(chainID, vals, blockID, height, commit) +} + // VerifyCommitLightTrusting verifies that trustLevel of the validator set signed // this commit. -func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Commit, trustLevel cmtmath.Fraction) error { +// It does NOT count all signatures. +func (vals *ValidatorSet) VerifyCommitLightTrusting( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { return VerifyCommitLightTrusting(chainID, vals, commit, trustLevel) } +// VerifyCommitLightTrusting verifies that trustLevel of the validator set signed +// this commit. +// It DOES count all signatures. +func (vals *ValidatorSet) VerifyCommitLightTrustingAllSignatures( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return VerifyCommitLightTrustingAllSignatures(chainID, vals, commit, trustLevel) +} + // findPreviousProposer reverses the compare proposer priority function to find the validator // with the lowest proposer priority which would have been the previous proposer. // @@ -696,7 +759,37 @@ func (vals *ValidatorSet) findPreviousProposer() *Validator { return previousProposer } -//----------------- +func (vals *ValidatorSet) checkAllKeysHaveSameType() { + if vals.Size() == 0 { + vals.allKeysHaveSameType = true + return + } + + firstKeyType := "" + for _, val := range vals.Validators { + if firstKeyType == "" { + // XXX: Should only be the case in tests. + if val.PubKey == nil { + continue + } + firstKeyType = val.PubKey.Type() + } + if val.PubKey.Type() != firstKeyType { + vals.allKeysHaveSameType = false + return + } + } + + vals.allKeysHaveSameType = true +} + +// AllKeysHaveSameType returns true if all validators have the same type of +// public key or if the set is empty. +func (vals *ValidatorSet) AllKeysHaveSameType() bool { + return vals.allKeysHaveSameType +} + +// ----------------- // IsErrNotEnoughVotingPowerSigned returns true if err is // ErrNotEnoughVotingPowerSigned. @@ -745,7 +838,6 @@ func (vals *ValidatorSet) StringIndented(indent string) string { indent, indent, strings.Join(valStrings, "\n"+indent+" "), indent) - } //------------------------------------- @@ -829,6 +921,7 @@ func ValidatorSetFromProto(vp *cmtproto.ValidatorSet) (*ValidatorSet, error) { valsProto[i] = v } vals.Validators = valsProto + vals.checkAllKeysHaveSameType() p, err := ValidatorFromProto(vp.GetProposer()) if err != nil { @@ -865,6 +958,7 @@ func ValidatorSetFromExistingValidators(valz []*Validator) (*ValidatorSet, error vals := &ValidatorSet{ Validators: valz, } + vals.checkAllKeysHaveSameType() vals.Proposer = vals.findPreviousProposer() vals.updateTotalVotingPower() sort.Sort(ValidatorsByVotingPower(vals.Validators)) diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 11f1ab24834..2a281f12198 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -14,6 +14,7 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/sr25519" cmtmath "github.com/cometbft/cometbft/libs/math" cmtrand "github.com/cometbft/cometbft/libs/rand" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -45,9 +46,11 @@ func TestValidatorSetBasic(t *testing.T) { assert.Zero(t, vset.Size()) assert.Equal(t, int64(0), vset.TotalVotingPower()) assert.Nil(t, vset.GetProposer()) - assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, + assert.Equal(t, []byte{ + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, - 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash()) + 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, + }, vset.Hash()) // add val = randValidator(vset.TotalVotingPower()) assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) @@ -74,12 +77,12 @@ func TestValidatorSetBasic(t *testing.T) { assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) assert.Equal(t, proposerPriority, val.ProposerPriority) - } -func TestValidatorSetValidateBasic(t *testing.T) { +func TestValidatorSet_ValidateBasic(t *testing.T) { val, _ := RandValidator(false, 1) badVal := &Validator{} + val2, _ := RandValidator(false, 1) testCases := []struct { vals ValidatorSet @@ -120,6 +123,14 @@ func TestValidatorSetValidateBasic(t *testing.T) { err: false, msg: "", }, + { + vals: ValidatorSet{ + Validators: []*Validator{val}, + Proposer: val2, + }, + err: true, + msg: ErrProposerNotInVals.Error(), + }, } for _, tc := range testCases { @@ -132,7 +143,6 @@ func TestValidatorSetValidateBasic(t *testing.T) { assert.NoError(t, err) } } - } func TestCopy(t *testing.T) { @@ -150,6 +160,30 @@ func TestCopy(t *testing.T) { } } +func TestValidatorSet_ProposerPriorityHash(t *testing.T) { + vset := NewValidatorSet(nil) + assert.Equal(t, []byte(nil), vset.ProposerPriorityHash()) + + vset = randValidatorSet(3) + assert.NotNil(t, vset.ProposerPriorityHash()) + + // Marshaling and unmarshalling do not affect ProposerPriorityHash + bz, err := vset.ToProto() + assert.NoError(t, err) + vsetProto, err := ValidatorSetFromProto(bz) + assert.NoError(t, err) + assert.Equal(t, vset.ProposerPriorityHash(), vsetProto.ProposerPriorityHash()) + + // Copy does not affect ProposerPriorityHash + vsetCopy := vset.Copy() + assert.Equal(t, vset.ProposerPriorityHash(), vsetCopy.ProposerPriorityHash()) + + // Incrementing priorities changes ProposerPriorityHash() but not Hash() + vset.IncrementProposerPriority(1) + assert.Equal(t, vset.Hash(), vsetCopy.Hash()) + assert.NotEqual(t, vset.ProposerPriorityHash(), vsetCopy.ProposerPriorityHash()) +} + // Test that IncrementProposerPriority requires positive times. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) { vset := NewValidatorSet([]*Validator{ @@ -299,18 +333,22 @@ func TestProposerSelection2(t *testing.T) { } func TestProposerSelection3(t *testing.T) { - vset := NewValidatorSet([]*Validator{ + vals := []*Validator{ newValidator([]byte("avalidator_address12"), 1), newValidator([]byte("bvalidator_address12"), 1), newValidator([]byte("cvalidator_address12"), 1), newValidator([]byte("dvalidator_address12"), 1), - }) + } - proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { - // need to give all validators to have keys pk := ed25519.GenPrivKey().PubKey() - vset.Validators[i].PubKey = pk + vals[i].PubKey = pk + vals[i].Address = pk.Address() + } + sort.Sort(ValidatorsByAddress(vals)) + vset := NewValidatorSet(vals) + proposerOrder := make([]*Validator, 4) + for i := 0; i < 4; i++ { proposerOrder[i] = vset.GetProposer() vset.IncrementProposerPriority(1) } @@ -326,7 +364,7 @@ func TestProposerSelection3(t *testing.T) { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address if !bytes.Equal(got, expected) { - t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) + t.Fatalf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j) } // serialize, deserialize, check proposer @@ -337,13 +375,11 @@ func TestProposerSelection3(t *testing.T) { if i != 0 { if !bytes.Equal(got, computed.Address) { t.Fatalf( - fmt.Sprintf( - "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", - got, - computed.Address, - i, - j, - ), + "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", + got, + computed.Address, + i, + j, ) } } @@ -478,28 +514,39 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { times int32 avg int64 }{ - 0: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 1}, - {Address: []byte("b"), ProposerPriority: 2}, - {Address: []byte("c"), ProposerPriority: 3}}}, - 1, 2}, - 1: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 10}, - {Address: []byte("b"), ProposerPriority: -10}, - {Address: []byte("c"), ProposerPriority: 1}}}, + 0: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 1}, + {Address: []byte("b"), ProposerPriority: 2}, + {Address: []byte("c"), ProposerPriority: 3}, + }, + }, + 1, 2, + }, + 1: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 10}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}, + }, + }, // this should average twice but the average should be 0 after the first iteration // (voting power is 0 -> no changes) 11, 0, // 1 / 3 }, - 2: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 100}, - {Address: []byte("b"), ProposerPriority: -10}, - {Address: []byte("c"), ProposerPriority: 1}}}, - 1, 91 / 3}, + 2: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 100}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}, + }, + }, + 1, 91 / 3, + }, } for i, tc := range tcs { // work on copy to have the old ProposerPriorities: @@ -523,103 +570,125 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { vals := ValidatorSet{Validators: []*Validator{ {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0}, {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1}, - {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}} + {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}, + }} tcs := []struct { vals *ValidatorSet wantProposerPrioritys []int64 times int32 wantProposer *Validator }{ - 0: { vals.Copy(), []int64{ // Acumm+VotingPower-Avg: 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12) 0 + vp1, - 0 + vp2}, + 0 + vp2, + }, 1, - vals.Validators[0]}, + vals.Validators[0], + }, 1: { vals.Copy(), []int64{ (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too (0 + vp1) + vp1, - (0 + vp2) + vp2}, + (0 + vp2) + vp2, + }, 2, - vals.Validators[0]}, // increment twice -> expect average to be subtracted twice + vals.Validators[0], + }, // increment twice -> expect average to be subtracted twice 2: { vals.Copy(), []int64{ 0 + 3*(vp0-total) - avg, // still mostest 0 + 3*vp1, - 0 + 3*vp2}, + 0 + 3*vp2, + }, 3, - vals.Validators[0]}, + vals.Validators[0], + }, 3: { vals.Copy(), []int64{ 0 + 4*(vp0-total), // still mostest 0 + 4*vp1, - 0 + 4*vp2}, + 0 + 4*vp2, + }, 4, - vals.Validators[0]}, + vals.Validators[0], + }, 4: { vals.Copy(), []int64{ 0 + 4*(vp0-total) + vp0, // 4 iters was mostest 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower) - 0 + 5*vp2}, + 0 + 5*vp2, + }, 5, - vals.Validators[1]}, + vals.Validators[1], + }, 5: { vals.Copy(), []int64{ 0 + 6*vp0 - 5*total, // mostest again 0 + 6*vp1 - total, // mostest once up to here - 0 + 6*vp2}, + 0 + 6*vp2, + }, 6, - vals.Validators[0]}, + vals.Validators[0], + }, 6: { vals.Copy(), []int64{ 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time - 0 + 7*vp2}, + 0 + 7*vp2, + }, 7, - vals.Validators[0]}, + vals.Validators[0], + }, 7: { vals.Copy(), []int64{ 0 + 8*vp0 - 7*total, // mostest again 0 + 8*vp1 - total, - 0 + 8*vp2}, + 0 + 8*vp2, + }, 8, - vals.Validators[0]}, + vals.Validators[0], + }, 8: { vals.Copy(), []int64{ 0 + 9*vp0 - 7*total, 0 + 9*vp1 - total, - 0 + 9*vp2 - total}, // mostest + 0 + 9*vp2 - total, + }, // mostest 9, - vals.Validators[2]}, + vals.Validators[2], + }, 9: { vals.Copy(), []int64{ 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once + 0 + 10*vp2 - total, + }, // in between 10 iters this val is "mostest" once 10, - vals.Validators[0]}, + vals.Validators[0], + }, 10: { vals.Copy(), []int64{ 0 + 11*vp0 - 9*total, - 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once + 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between + 0 + 11*vp2 - total, + }, // after 10 iters this val is "mostest" once 11, - vals.Validators[0]}, + vals.Validators[0], + }, } for i, tc := range tcs { tc.vals.IncrementProposerPriority(tc.times) @@ -665,7 +734,6 @@ func TestSafeSubClip(t *testing.T) { //------------------------------------------------------------------- func TestEmptySet(t *testing.T) { - var valList []*Validator valSet := NewValidatorSet(valList) assert.Panics(t, func() { valSet.IncrementProposerPriority(1) }) @@ -689,11 +757,9 @@ func TestEmptySet(t *testing.T) { // Attempt delete from empty set assert.Error(t, valSet.UpdateWithChangeSet(delList)) - } func TestUpdatesForNewValidatorSet(t *testing.T) { - v1 := newValidator([]byte("v1"), 100) v2 := newValidator([]byte("v2"), 100) valList := []*Validator{v1, v2} @@ -720,7 +786,6 @@ func TestUpdatesForNewValidatorSet(t *testing.T) { v3 = newValidator([]byte("v3"), 30) valList = []*Validator{v1, v2, v3} assert.Panics(t, func() { NewValidatorSet(valList) }) - } type testVal struct { @@ -1010,19 +1075,23 @@ func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) { }{ 0: { // order of changes should not matter, the final validator sets should be the same []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}}, - []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}}, + []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}, + }, 1: { // order of additions should not matter []testVal{{"v2", 20}, {"v1", 10}}, - []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}}, + []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}, + }, 2: { // order of removals should not matter []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, - []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}, + }, 3: { // order of mixed operations should not matter []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, - []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}, + }, } for i, tt := range valSetUpdatesOrderTests { @@ -1067,41 +1136,50 @@ func TestValSetApplyUpdatesTestsExecute(t *testing.T) { 0: { // prepend []testVal{{"v4", 44}, {"v5", 55}}, []testVal{{"v1", 11}}, - []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}}, + []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}, + }, 1: { // append []testVal{{"v4", 44}, {"v5", 55}}, []testVal{{"v6", 66}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}, + }, 2: { // insert []testVal{{"v4", 44}, {"v6", 66}}, []testVal{{"v5", 55}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}, + }, 3: { // insert multi []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}}, []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}, + }, // changes 4: { // head []testVal{{"v1", 111}, {"v2", 22}}, []testVal{{"v1", 11}}, - []testVal{{"v1", 11}, {"v2", 22}}}, + []testVal{{"v1", 11}, {"v2", 22}}, + }, 5: { // tail []testVal{{"v1", 11}, {"v2", 222}}, []testVal{{"v2", 22}}, - []testVal{{"v1", 11}, {"v2", 22}}}, + []testVal{{"v1", 11}, {"v2", 22}}, + }, 6: { // middle []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}}, []testVal{{"v2", 22}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, + }, 7: { // multi []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}}, []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, + }, // additions and changes 8: { []testVal{{"v1", 111}, {"v2", 22}}, []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}, + }, } for i, tt := range valSetUpdatesBasicTests { @@ -1127,7 +1205,7 @@ type testVSetCfg struct { expErr error } -func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { +func randTestVSetCfg(nBase, nAddMax int) testVSetCfg { if nBase <= 0 || nAddMax < 0 { panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax)) } @@ -1179,7 +1257,6 @@ func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { sort.Sort(testValsByVotingPower(cfg.expectedVals)) return cfg - } func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) { @@ -1225,15 +1302,15 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { // generate a configuration with 100 validators, // randomly select validators for updates and deletes, and // generate 10 new validators to be added - 3: randTestVSetCfg(t, 100, 10), + 3: randTestVSetCfg(100, 10), - 4: randTestVSetCfg(t, 1000, 100), + 4: randTestVSetCfg(1000, 100), - 5: randTestVSetCfg(t, 10, 100), + 5: randTestVSetCfg(10, 100), - 6: randTestVSetCfg(t, 100, 1000), + 6: randTestVSetCfg(100, 1000), - 7: randTestVSetCfg(t, 1000, 1000), + 7: randTestVSetCfg(1000, 1000), } for _, cfg := range testCases { @@ -1325,28 +1402,56 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { { name: "4 no false overflow error messages for adds, updates and deletes", startVals: []testVal{ - {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4}, - {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}}, + {"v1", MaxTotalVotingPower / 4}, + {"v2", MaxTotalVotingPower / 4}, + {"v3", MaxTotalVotingPower / 4}, + {"v4", MaxTotalVotingPower / 4}, + }, deletedVals: []testVal{{"v2", 0}}, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}}, + {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}, + }, addedVals: []testVal{{"v5", 3}}, expectedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}}, + {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}, + }, expErr: nil, }, { name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8", startVals: []testVal{ - {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, - {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, + {"v1", 1}, + {"v2", 1}, + {"v3", 1}, + {"v4", 1}, + {"v5", 1}, + {"v6", 1}, + {"v7", 1}, + {"v8", 1}, + {"v9", 1}, + }, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower}, - {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower}, - {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}}, + {"v1", MaxTotalVotingPower}, + {"v2", MaxTotalVotingPower}, + {"v3", MaxTotalVotingPower}, + {"v4", MaxTotalVotingPower}, + {"v5", MaxTotalVotingPower}, + {"v6", MaxTotalVotingPower}, + {"v7", MaxTotalVotingPower}, + {"v8", MaxTotalVotingPower}, + {"v9", 8}, + }, expectedVals: []testVal{ - {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, - {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, + {"v1", 1}, + {"v2", 1}, + {"v3", 1}, + {"v4", 1}, + {"v5", 1}, + {"v6", 1}, + {"v7", 1}, + {"v8", 1}, + {"v9", 1}, + }, expErr: ErrTotalVotingPowerOverflow, }, } @@ -1504,3 +1609,70 @@ func BenchmarkUpdates(b *testing.B) { assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList)) } } + +func TestVerifyCommitWithInvalidProposerKey(t *testing.T) { + vs := &ValidatorSet{ + Validators: []*Validator{{}, {}}, + } + commit := &Commit{ + Height: 100, + Signatures: []CommitSig{{}, {}}, + } + var bid BlockID + cid := "" + err := vs.VerifyCommit(cid, bid, 100, commit) + assert.Error(t, err) +} + +func TestVerifyCommitSingleWithInvalidSignatures(t *testing.T) { + vs := &ValidatorSet{ + Validators: []*Validator{{}, {}}, + } + commit := &Commit{ + Height: 100, + Signatures: []CommitSig{{}, {}}, + } + cid := "" + votingPowerNeeded := vs.TotalVotingPower() * 2 / 3 + + // ignore all absent signatures + ignore := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagAbsent } + + // only count the signatures that are for the block + count := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagCommit } + + err := verifyCommitSingle(cid, vs, commit, votingPowerNeeded, ignore, count, true, true) + assert.Error(t, err) +} + +func TestValidatorSet_AllKeysHaveSameType(t *testing.T) { + testCases := []struct { + vals *ValidatorSet + sameType bool + }{ + { + vals: NewValidatorSet([]*Validator{}), + sameType: true, + }, + { + vals: randValidatorSet(1), + sameType: true, + }, + { + vals: randValidatorSet(2), + sameType: true, + }, + { + vals: NewValidatorSet([]*Validator{randValidator(100), NewValidator(sr25519.GenPrivKey().PubKey(), 200)}), + sameType: false, + }, + } + + for i, tc := range testCases { + if tc.sameType { + assert.True(t, tc.vals.AllKeysHaveSameType(), "test %d", i) + } else { + assert.False(t, tc.vals.AllKeysHaveSameType(), "test %d", i) + } + } +} diff --git a/types/validator_test.go b/types/validator_test.go index 5eb2ed7bf1c..954e8ec23bb 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -74,7 +75,7 @@ func TestValidatorValidateBasic(t *testing.T) { Address: nil, }, err: true, - msg: "validator address is the wrong size: ", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got ", pubKey.Address()), }, { val: &Validator{ @@ -82,7 +83,7 @@ func TestValidatorValidateBasic(t *testing.T) { Address: []byte{'a'}, }, err: true, - msg: "validator address is the wrong size: 61", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got 61", pubKey.Address()), }, } diff --git a/types/vote.go b/types/vote.go index 660b538d178..68cf91477f2 100644 --- a/types/vote.go +++ b/types/vote.go @@ -47,6 +47,15 @@ func NewConflictingVoteError(vote1, vote2 *Vote) *ErrVoteConflictingVotes { } } +// The vote extension is only valid for non-nil precommits. +type ErrVoteExtensionInvalid struct { + ExtSignature []byte +} + +func (err *ErrVoteExtensionInvalid) Error() string { + return fmt.Sprintf("extensions must be present IFF vote is a non-nil Precommit; extension signature: %X", err.ExtSignature) +} + // Address is hex bytes. type Address = crypto.Address @@ -396,6 +405,9 @@ func VotesToProto(votes []*Vote) []*cmtproto.Vote { return res } +// SignAndCheckVote signs the vote with the given privVal and checks the vote. +// It returns an error if the vote is invalid and a boolean indicating if the +// error is recoverable or not. func SignAndCheckVote( vote *Vote, privVal PrivValidator, @@ -404,33 +416,38 @@ func SignAndCheckVote( ) (bool, error) { v := vote.ToProto() if err := privVal.SignVote(chainID, v); err != nil { - // Failing to sign a vote has always been a recoverable error, this function keeps it that way - return true, err // true = recoverable + // Failing to sign a vote has always been a recoverable error, this + // function keeps it that way. + return true, err } vote.Signature = v.Signature isPrecommit := vote.Type == cmtproto.PrecommitType if !isPrecommit && extensionsEnabled { // Non-recoverable because the caller passed parameters that don't make sense - return false, fmt.Errorf("only Precommit votes may have extensions enabled; vote type: %d", vote.Type) + return false, &ErrVoteExtensionInvalid{ExtSignature: v.ExtensionSignature} } isNil := vote.BlockID.IsZero() extSignature := (len(v.ExtensionSignature) > 0) - if extSignature == (!isPrecommit || isNil) { + + // Error if prevote contains an extension signature + if extSignature && (!isPrecommit || isNil) { // Non-recoverable because the vote is malformed - return false, fmt.Errorf( - "extensions must be present IFF vote is a non-nil Precommit; present %t, vote type %d, is nil %t", - extSignature, - vote.Type, - isNil, - ) + return false, &ErrVoteExtensionInvalid{ExtSignature: v.ExtensionSignature} } vote.ExtensionSignature = nil if extensionsEnabled { + // Error if missing extension signature for non-nil Precommit + if !extSignature && isPrecommit && !isNil { + // Non-recoverable because the vote is malformed + return false, &ErrVoteExtensionInvalid{ExtSignature: v.ExtensionSignature} + } + vote.ExtensionSignature = v.ExtensionSignature } + vote.Timestamp = v.Timestamp return true, nil diff --git a/types/vote_set.go b/types/vote_set.go index effcb16d2d3..6b9c70efed5 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -265,9 +265,8 @@ func (voteSet *VoteSet) addVerifiedVote( if existing := voteSet.votes[valIndex]; existing != nil { if existing.BlockID.Equals(vote.BlockID) { panic("addVerifiedVote does not expect duplicate votes") - } else { - conflicting = existing } + conflicting = existing // Replace vote if blockKey matches voteSet.maj23. if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey { voteSet.votes[valIndex] = vote diff --git a/types/vote_test.go b/types/vote_test.go index 9c7e8777f71..f49368ca8d2 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "testing" "time" @@ -22,12 +23,13 @@ func examplePrevote() *Vote { func examplePrecommit() *Vote { vote := exampleVote(byte(cmtproto.PrecommitType)) + vote.Extension = []byte("extension") vote.ExtensionSignature = []byte("signature") return vote } func exampleVote(t byte) *Vote { - var stamp, err = time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") + stamp, err := time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") if err != nil { panic(err) } @@ -61,7 +63,6 @@ func TestVoteSignable(t *testing.T) { } func TestVoteSignBytesTestVectors(t *testing.T) { - tests := []struct { chainID string vote *Vote @@ -85,7 +86,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x2a, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, // with proper (fixed size) height and round (PreVote): 2: { @@ -100,7 +102,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x2a, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, 3: { "", &Vote{Height: 1, Round: 1}, @@ -112,7 +115,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields (timestamp): 0x2a, - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, // containing non-empty chain_id: 4: { @@ -128,7 +132,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp // (field_number << 3) | wire_type 0x32, - 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + }, // chainID }, // containing vote extension 5: { @@ -309,7 +314,7 @@ func TestVoteVerify(t *testing.T) { func TestVoteString(t *testing.T) { str := examplePrecommit().String() - expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests + expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 657874656E73 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } @@ -486,3 +491,110 @@ func TestVoteProtobuf(t *testing.T) { } } } + +func TestSignAndCheckVote(t *testing.T) { + privVal := NewMockPV() + + testCases := []struct { + name string + extensionsEnabled bool + vote *Vote + expectError bool + }{ + { + name: "precommit with extension signature", + extensionsEnabled: true, + vote: examplePrecommit(), + expectError: false, + }, + { + name: "precommit with extension signature", + extensionsEnabled: false, + vote: examplePrecommit(), + expectError: false, + }, + { + name: "precommit with extension signature for a nil block", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrecommit() + v.BlockID = BlockID{make([]byte, 0), PartSetHeader{0, make([]byte, 0)}} + return v + }(), + expectError: true, + }, + { + name: "precommit with extension signature for a nil block", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrecommit() + v.BlockID = BlockID{make([]byte, 0), PartSetHeader{0, make([]byte, 0)}} + return v + }(), + expectError: true, + }, + { + name: "precommit without extension", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrecommit() + v.Extension = make([]byte, 0) + return v + }(), + expectError: false, + }, + { + name: "precommit without extension", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrecommit() + v.Extension = make([]byte, 0) + return v + }(), + expectError: false, + }, + { + name: "prevote", + extensionsEnabled: true, + vote: examplePrevote(), + expectError: true, + }, + { + name: "prevote", + extensionsEnabled: false, + vote: examplePrevote(), + expectError: false, + }, + { + name: "prevote with extension", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrevote() + v.Extension = []byte("extension") + return v + }(), + expectError: true, + }, + { + name: "prevote with extension", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrevote() + v.Extension = []byte("extension") + return v + }(), + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s (extensionsEnabled: %t) ", tc.name, tc.extensionsEnabled), func(t *testing.T) { + _, err := SignAndCheckVote(tc.vote, privVal, "test_chain_id", tc.extensionsEnabled) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/version/version.go b/version/version.go index 049f13dfc34..c00b4f8ffe2 100644 --- a/version/version.go +++ b/version/version.go @@ -3,9 +3,9 @@ package version const ( // TMVersionDefault is the used as the fallback version of CometBFT // when not using git describe. It is formatted with semantic versioning. - TMCoreSemVer = "0.38.0-dev" + TMCoreSemVer = "0.38.12" // ABCISemVer is the semantic version of the ABCI protocol - ABCISemVer = "1.0.0" + ABCISemVer = "2.0.0" ABCIVersion = ABCISemVer // P2PProtocol versions all p2p behavior and msgs. // This includes proposer selection. @@ -16,8 +16,6 @@ const ( BlockProtocol uint64 = 11 ) -var ( - // TMGitCommitHash uses git rev-parse HEAD to find commit hash which is helpful - // for the engineering team when working with the cometbft binary. See Makefile - TMGitCommitHash = "" -) +// TMGitCommitHash uses git rev-parse HEAD to find commit hash which is helpful +// for the engineering team when working with the cometbft binary. See Makefile +var TMGitCommitHash = ""