Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Take into account proof size for transaction payment and priority #13958

Merged
merged 22 commits into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 59 additions & 21 deletions bin/node/runtime/src/impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,6 @@

//! Some configurable implementations as associated type for the substrate runtime.

use crate::{
AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime,
RuntimeCall,
};
use frame_support::{
pallet_prelude::*,
traits::{
Expand All @@ -32,6 +28,11 @@ use pallet_alliance::{IdentityVerifier, ProposalIndex, ProposalProvider};
use pallet_asset_tx_payment::HandleCredit;
use sp_std::prelude::*;

use crate::{
AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime,
RuntimeCall,
};

pub struct Author;
impl OnUnbalanced<NegativeImbalance> for Author {
fn on_nonzero_unbalanced(amount: NegativeImbalance) {
Expand Down Expand Up @@ -111,6 +112,10 @@ impl ProposalProvider<AccountId, Hash, RuntimeCall> for AllianceProposalProvider

#[cfg(test)]
mod multiplier_tests {
use frame_support::{
dispatch::DispatchClass,
weights::{Weight, WeightToFee},
};
use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment};
use sp_runtime::{
assert_eq_error_rate,
Expand All @@ -123,10 +128,6 @@ mod multiplier_tests {
AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime,
RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment,
};
use frame_support::{
dispatch::DispatchClass,
weights::{Weight, WeightToFee},
};

fn max_normal() -> Weight {
BlockWeights::get()
Expand Down Expand Up @@ -161,14 +162,28 @@ mod multiplier_tests {
// bump if it is zero.
let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy);

let max_normal = max_normal();
let target_weight = target();
let normalized_weight_dimensions = (
block_weight.ref_time() as f64 / max_normal.ref_time() as f64,
block_weight.proof_size() as f64 / max_normal.proof_size() as f64,
);

let (normal, max, target) =
if normalized_weight_dimensions.0 < normalized_weight_dimensions.1 {
(block_weight.proof_size(), max_normal.proof_size(), target_weight.proof_size())
} else {
(block_weight.ref_time(), max_normal.ref_time(), target_weight.ref_time())
};

// maximum tx weight
let m = max_normal().ref_time() as f64;
let m = max as f64;
// block weight always truncated to max weight
let block_weight = (block_weight.ref_time() as f64).min(m);
let block_weight = (normal as f64).min(m);
let v: f64 = AdjustmentVariable::get().to_float();

// Ideal saturation in terms of weight
let ss = target().ref_time() as f64;
let ss = target as f64;
// Current saturation in terms of weight
let s = block_weight;

Expand Down Expand Up @@ -218,10 +233,16 @@ mod multiplier_tests {
#[test]
fn multiplier_can_grow_from_zero() {
// if the min is too small, then this will not change, and we are doomed forever.
// the weight is 1/100th bigger than target.
// the block ref time is 1/100th bigger than target.
run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || {
let next = runtime_multiplier_update(min_multiplier());
assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier());
assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier());
});

// the block proof size is 1/100th bigger than target.
run_with_system_weight(target().set_proof_size((target().proof_size() / 100) * 101), || {
let next = runtime_multiplier_update(min_multiplier());
assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier());
})
}

Expand Down Expand Up @@ -407,23 +428,33 @@ mod multiplier_tests {

#[test]
fn weight_to_fee_should_not_overflow_on_large_weights() {
let kb = Weight::from_parts(1024, 0);
let mb = 1024u64 * kb;
let kb_time = Weight::from_parts(1024, 0);
let kb_size = Weight::from_parts(0, 1024);
let mb_time = 1024u64 * kb_time;
let max_fm = Multiplier::saturating_from_integer(i128::MAX);

// check that for all values it can compute, correctly.
vec![
Weight::zero(),
// testcases ignoring proof size part of the weight.
Weight::from_parts(1, 0),
Weight::from_parts(10, 0),
Weight::from_parts(1000, 0),
kb,
10u64 * kb,
100u64 * kb,
mb,
10u64 * mb,
kb_time,
10u64 * kb_time,
100u64 * kb_time,
mb_time,
10u64 * mb_time,
Weight::from_parts(2147483647, 0),
Weight::from_parts(4294967295, 0),
// testcases ignoring ref time part of the weight.
Weight::from_parts(0, 100000000000),
1000000u64 * kb_size,
1000000000u64 * kb_size,
Weight::from_parts(0, 18014398509481983),
Weight::from_parts(0, 9223372036854775807),
// test cases with both parts of the weight.
BlockWeights::get().max_block / 1024,
BlockWeights::get().max_block / 2,
BlockWeights::get().max_block,
Weight::MAX / 2,
Expand All @@ -440,7 +471,14 @@ mod multiplier_tests {

// Some values that are all above the target and will cause an increase.
let t = target();
vec![t + Weight::from_parts(100, 0), t * 2, t * 4].into_iter().for_each(|i| {
vec![
t + Weight::from_parts(100, 0),
t + Weight::from_parts(0, t.proof_size() * 2),
t * 2,
t * 4,
]
.into_iter()
.for_each(|i| {
run_with_system_weight(i, || {
let fm = runtime_multiplier_update(max_fm);
// won't grow. The convert saturates everything.
Expand Down
93 changes: 58 additions & 35 deletions frame/transaction-payment/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,15 @@
use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;

use frame_support::{
dispatch::{
DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo,
},
traits::{Defensive, EstimateCallFee, Get},
weights::{Weight, WeightToFee},
};
pub use pallet::*;
pub use payment::*;
use sp_runtime::{
traits::{
Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion,
Expand All @@ -58,17 +67,10 @@ use sp_runtime::{
transaction_validity::{
TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction,
},
FixedPointNumber, FixedPointOperand, FixedU128, Perquintill, RuntimeDebug,
FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Perquintill, RuntimeDebug,
};
use sp_std::prelude::*;

use frame_support::{
dispatch::{
DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo,
},
traits::{EstimateCallFee, Get},
weights::{Weight, WeightToFee},
};
pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo};

#[cfg(test)]
mod mock;
Expand All @@ -78,10 +80,6 @@ mod tests;
mod payment;
mod types;

pub use pallet::*;
pub use payment::*;
pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo};

/// Fee multiplier.
pub type Multiplier = FixedU128;

Expand All @@ -108,10 +106,17 @@ type BalanceOf<T> = <<T as Config>::OnChargeTransaction as OnChargeTransaction<T
/// with tests that the combination of this `M` and `V` is not such that the multiplier can drop to
/// zero and never recover.
///
/// note that `s'` is interpreted as a portion in the _normal transaction_ capacity of the block.
/// Note that `s'` is interpreted as a portion in the _normal transaction_ capacity of the block.
/// For example, given `s' == 0.25` and `AvailableBlockRatio = 0.75`, then the target fullness is
/// _0.25 of the normal capacity_ and _0.1875 of the entire block_.
///
/// Since block weight is multi-dimension, we use the scarcer resource, referred as limiting
/// dimension, for calculation of fees. We determine the limiting dimension by comparing the
/// dimensions using the ratio of `dimension_value / max_dimension_value` and selecting the largest
/// ratio. For instance, if a block is 30% full based on `ref_time` and 25% full based on
/// `proof_size`, we identify `ref_time` as the limiting dimension, indicating that the block is 30%
/// full.
///
/// This implementation implies the bound:
/// - `v ≤ p / k * (s − s')`
/// - or, solving for `p`: `p >= v * k * (s - s')`
Expand Down Expand Up @@ -207,28 +212,44 @@ where
let normal_block_weight =
current_block_weight.get(DispatchClass::Normal).min(normal_max_weight);

// TODO: Handle all weight dimensions
let normal_max_weight = normal_max_weight.ref_time();
let normal_block_weight = normal_block_weight.ref_time();

let s = S::get();
let v = V::get();

let target_weight = (s * normal_max_weight) as u128;
let block_weight = normal_block_weight as u128;
// Normalize dimensions so they can be compared. Ensure (defensive) max weight is non-zero.
let normalized_ref_time = Perbill::from_rational(
Ank4n marked this conversation as resolved.
Show resolved Hide resolved
normal_block_weight.ref_time(),
normal_max_weight.ref_time().max(1),
);
let normalized_proof_size = Perbill::from_rational(
normal_block_weight.proof_size(),
normal_max_weight.proof_size().max(1),
);

// Pick the limiting dimension. If the proof size is the limiting dimension, then the
// multiplier is adjusted by the proof size. Otherwise, it is adjusted by the ref time.
let (normal_limiting_dimension, max_limiting_dimension) =
if normalized_ref_time < normalized_proof_size {
(normal_block_weight.proof_size(), normal_max_weight.proof_size())
} else {
(normal_block_weight.ref_time(), normal_max_weight.ref_time())
};

let target_block_fullness = S::get();
let adjustment_variable = V::get();

let target_weight = (target_block_fullness * max_limiting_dimension) as u128;
let block_weight = normal_limiting_dimension as u128;

// determines if the first_term is positive
let positive = block_weight >= target_weight;
let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight);

// defensive only, a test case assures that the maximum weight diff can fit in Multiplier
// without any saturation.
let diff = Multiplier::saturating_from_rational(diff_abs, normal_max_weight.max(1));
let diff = Multiplier::saturating_from_rational(diff_abs, max_limiting_dimension.max(1));
let diff_squared = diff.saturating_mul(diff);

let v_squared_2 = v.saturating_mul(v) / Multiplier::saturating_from_integer(2);
let v_squared_2 = adjustment_variable.saturating_mul(adjustment_variable) /
Multiplier::saturating_from_integer(2);

let first_term = v.saturating_mul(diff);
let first_term = adjustment_variable.saturating_mul(diff);
let second_term = v_squared_2.saturating_mul(diff_squared);

if positive {
Expand Down Expand Up @@ -290,10 +311,11 @@ const MULTIPLIER_DEFAULT_VALUE: Multiplier = Multiplier::from_u32(1);

#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;

use super::*;

#[pallet::pallet]
pub struct Pallet<T>(_);

Expand Down Expand Up @@ -710,19 +732,20 @@ where
tip: BalanceOf<T>,
final_fee: BalanceOf<T>,
) -> TransactionPriority {
// Calculate how many such extrinsics we could fit into an empty block and take
// the limitting factor.
// Calculate how many such extrinsics we could fit into an empty block and take the
// limiting factor.
let max_block_weight = T::BlockWeights::get().max_block;
let max_block_length = *T::BlockLength::get().max.get(info.class) as u64;

// TODO: Take into account all dimensions of weight
let max_block_weight = max_block_weight.ref_time();
let info_weight = info.weight.ref_time();

let bounded_weight = info_weight.clamp(1, max_block_weight);
// bounded_weight is used as a divisor later so we keep it non-zero.
let bounded_weight = info.weight.max(Weight::from_parts(1, 1)).min(max_block_weight);
let bounded_length = (len as u64).clamp(1, max_block_length);

let max_tx_per_block_weight = max_block_weight / bounded_weight;
// returns the scarce resource, i.e. the one that is limiting the number of transactions.
let max_tx_per_block_weight = max_block_weight
.checked_div_per_component(&bounded_weight)
.defensive_proof("bounded_weight is non-zero; qed")
.unwrap_or(1);
let max_tx_per_block_length = max_block_length / bounded_length;
// Given our current knowledge this value is going to be in a reasonable range - i.e.
// less than 10^9 (2^30), so multiplying by the `tip` value is unlikely to overflow the
Expand Down