Skip to content

Commit

Permalink
Enable BEEFY report_fork_voting() (#6856)
Browse files Browse the repository at this point in the history
Related to #4523

Follow-up for: #5188

Reopening #6732 as a new
PR

---------

Co-authored-by: command-bot <>
  • Loading branch information
serban300 authored Jan 22, 2025
1 parent 2345eb9 commit 1bdb817
Show file tree
Hide file tree
Showing 17 changed files with 278 additions and 71 deletions.
14 changes: 12 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 24 additions & 13 deletions polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
//! Autogenerated weights for `pallet_beefy_mmr`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
// Executed Command:
Expand Down Expand Up @@ -48,25 +48,36 @@ use core::marker::PhantomData;
/// Weight functions for `pallet_beefy_mmr`.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
/// The range of component `n` is `[2, 512]`.
fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 622_000 picoseconds.
Weight::from_parts(1_166_954, 0)
.saturating_add(Weight::from_parts(0, 0))
// Standard Error: 65
.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
}
/// Storage: `System::BlockHash` (r:1 w:0)
/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
fn extract_validation_context() -> Weight {
// Proof Size summary in bytes:
// Measured: `92`
// Measured: `68`
// Estimated: `3509`
// Minimum execution time: 7_116_000 picoseconds.
Weight::from_parts(7_343_000, 0)
// Minimum execution time: 6_272_000 picoseconds.
Weight::from_parts(6_452_000, 0)
.saturating_add(Weight::from_parts(0, 3509))
.saturating_add(T::DbWeight::get().reads(1))
}
/// Storage: `Mmr::Nodes` (r:1 w:0)
/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
fn read_peak() -> Weight {
// Proof Size summary in bytes:
// Measured: `234`
// Measured: `254`
// Estimated: `3505`
// Minimum execution time: 5_652_000 picoseconds.
Weight::from_parts(5_963_000, 0)
// Minimum execution time: 6_576_000 picoseconds.
Weight::from_parts(6_760_000, 0)
.saturating_add(Weight::from_parts(0, 3505))
.saturating_add(T::DbWeight::get().reads(1))
}
Expand All @@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
/// The range of component `n` is `[2, 512]`.
fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `226`
// Measured: `246`
// Estimated: `1517`
// Minimum execution time: 11_953_000 picoseconds.
Weight::from_parts(15_978_891, 0)
// Minimum execution time: 12_538_000 picoseconds.
Weight::from_parts(24_516_023, 0)
.saturating_add(Weight::from_parts(0, 1517))
// Standard Error: 1_780
.saturating_add(Weight::from_parts(1_480_582, 0).saturating_mul(n.into()))
// Standard Error: 1_923
.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(2))
}
}
37 changes: 24 additions & 13 deletions polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
//! Autogenerated weights for `pallet_beefy_mmr`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
// Executed Command:
Expand Down Expand Up @@ -48,25 +48,36 @@ use core::marker::PhantomData;
/// Weight functions for `pallet_beefy_mmr`.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
/// The range of component `n` is `[2, 512]`.
fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 628_000 picoseconds.
Weight::from_parts(1_200_102, 0)
.saturating_add(Weight::from_parts(0, 0))
// Standard Error: 63
.saturating_add(Weight::from_parts(1_110, 0).saturating_mul(n.into()))
}
/// Storage: `System::BlockHash` (r:1 w:0)
/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
fn extract_validation_context() -> Weight {
// Proof Size summary in bytes:
// Measured: `92`
// Measured: `68`
// Estimated: `3509`
// Minimum execution time: 7_850_000 picoseconds.
Weight::from_parts(8_169_000, 0)
// Minimum execution time: 9_862_000 picoseconds.
Weight::from_parts(10_329_000, 0)
.saturating_add(Weight::from_parts(0, 3509))
.saturating_add(T::DbWeight::get().reads(1))
}
/// Storage: `Mmr::Nodes` (r:1 w:0)
/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
fn read_peak() -> Weight {
// Proof Size summary in bytes:
// Measured: `201`
// Measured: `221`
// Estimated: `3505`
// Minimum execution time: 6_852_000 picoseconds.
Weight::from_parts(7_448_000, 0)
// Minimum execution time: 6_396_000 picoseconds.
Weight::from_parts(6_691_000, 0)
.saturating_add(Weight::from_parts(0, 3505))
.saturating_add(T::DbWeight::get().reads(1))
}
Expand All @@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
/// The range of component `n` is `[2, 512]`.
fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `193`
// Measured: `213`
// Estimated: `1517`
// Minimum execution time: 12_860_000 picoseconds.
Weight::from_parts(17_158_162, 0)
// Minimum execution time: 12_553_000 picoseconds.
Weight::from_parts(24_003_920, 0)
.saturating_add(Weight::from_parts(0, 1517))
// Standard Error: 1_732
.saturating_add(Weight::from_parts(1_489_410, 0).saturating_mul(n.into()))
// Standard Error: 2_023
.saturating_add(Weight::from_parts(1_390_986, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(2))
}
}
28 changes: 28 additions & 0 deletions prdoc/pr_6856.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json

title: Enable report_fork_voting()

doc:
- audience:
- Runtime Dev
- Runtime User
description: |
This PR enables calling `report_fork_voting`.
In order to do this we needed to also check that the ancestry proof is optimal.

crates:
- name: pallet-mmr
bump: minor
- name: sp-mmr-primitives
bump: minor
- name: sp-consensus-beefy
bump: minor
- name: rococo-runtime
bump: minor
- name: pallet-beefy
bump: minor
- name: pallet-beefy-mmr
bump: minor
- name: westend-runtime
bump: minor
18 changes: 18 additions & 0 deletions substrate/frame/beefy-mmr/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,24 @@ fn init_block<T: Config>(block_num: u32) {
mod benchmarks {
use super::*;

/// Generate ancestry proofs with `n` leafs and benchmark the logic that checks
/// if the proof is optimal.
#[benchmark]
fn n_leafs_proof_is_optimal(n: Linear<2, 512>) {
pallet_mmr::UseLocalStorage::<T>::set(true);

for block_num in 1..=n {
init_block::<T>(block_num);
}
let proof = Mmr::<T>::generate_mock_ancestry_proof().unwrap();
assert_eq!(proof.leaf_count, n as u64);

#[block]
{
<BeefyMmr<T> as AncestryHelper<HeaderFor<T>>>::is_proof_optimal(&proof);
};
}

#[benchmark]
fn extract_validation_context() {
pallet_mmr::UseLocalStorage::<T>::set(true);
Expand Down
16 changes: 16 additions & 0 deletions substrate/frame/beefy-mmr/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,18 @@ where
.ok()
}

fn is_proof_optimal(proof: &Self::Proof) -> bool {
let is_proof_optimal = pallet_mmr::Pallet::<T>::is_ancestry_proof_optimal(proof);

// We don't check the proof size when running benchmarks, since we use mock proofs
// which would cause the test to fail.
if cfg!(feature = "runtime-benchmarks") {
return true
}

is_proof_optimal
}

fn extract_validation_context(header: HeaderFor<T>) -> Option<Self::ValidationContext> {
// Check if the provided header is canonical.
let expected_hash = frame_system::Pallet::<T>::block_hash(header.number());
Expand Down Expand Up @@ -292,6 +304,10 @@ impl<T: Config> AncestryHelperWeightInfo<HeaderFor<T>> for Pallet<T>
where
T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
{
fn is_proof_optimal(proof: &<Self as AncestryHelper<HeaderFor<T>>>::Proof) -> Weight {
<T as Config>::WeightInfo::n_leafs_proof_is_optimal(proof.leaf_count.saturated_into())
}

fn extract_validation_context() -> Weight {
<T as Config>::WeightInfo::extract_validation_context()
}
Expand Down
Loading

0 comments on commit 1bdb817

Please sign in to comment.