-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Create benchmarks for XCM instructions introduced in v3 #4564
Changes from all commits
2f41600
7b56ff5
ab1c0c7
a099680
74ea1a8
5d1bab7
3c729b4
ec880ef
22de225
516b02d
3b7c47a
bc16824
70dc3f5
421dd8a
e731fdf
d32f33e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,9 +21,10 @@ use frame_benchmarking::{benchmarks, BenchmarkError}; | |
use frame_support::dispatch::GetDispatchInfo; | ||
use sp_std::vec; | ||
use xcm::{ | ||
latest::{prelude::*, MultiAssets}, | ||
latest::{prelude::*, MaybeErrorCode, MultiAssets}, | ||
DoubleEncoded, | ||
}; | ||
use xcm_executor::ExecutorError; | ||
|
||
benchmarks! { | ||
report_holding { | ||
|
@@ -254,12 +255,10 @@ benchmarks! { | |
} : { | ||
_result = executor.execute(xcm); | ||
} verify { | ||
match _result { | ||
Err(error) if error.xcm_error == XcmError::Trap(10) => { | ||
// This is the success condition | ||
}, | ||
_ => Err("xcm trap did not return the expected error")? | ||
}; | ||
assert!(matches!(_result, Err(ExecutorError { | ||
xcm_error: XcmError::Trap(10), | ||
.. | ||
}))); | ||
} | ||
|
||
subscribe_version { | ||
|
@@ -306,13 +305,141 @@ benchmarks! { | |
executor.holding = holding.into(); | ||
let instruction = Instruction::InitiateReserveWithdraw { assets: assets_filter, reserve, xcm: Xcm(vec![]) }; | ||
let xcm = Xcm(vec![instruction]); | ||
}:{ | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
// The execute completing successfully is as good as we can check. | ||
// TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 | ||
} | ||
|
||
burn_asset { | ||
let holding = T::worst_case_holding(0); | ||
let assets = holding.clone(); | ||
|
||
let mut executor = new_executor::<T>(Default::default()); | ||
executor.holding = holding.into(); | ||
|
||
let instruction = Instruction::BurnAsset(assets.into()); | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
assert!(executor.holding.is_empty()); | ||
} | ||
|
||
expect_asset { | ||
let holding = T::worst_case_holding(0); | ||
let assets = holding.clone(); | ||
|
||
let mut executor = new_executor::<T>(Default::default()); | ||
executor.holding = holding.into(); | ||
|
||
let instruction = Instruction::ExpectAsset(assets.into()); | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
// `execute` completing successfully is as good as we can check. | ||
} | ||
|
||
expect_origin { | ||
let expected_origin = Parent.into(); | ||
let mut executor = new_executor::<T>(Default::default()); | ||
|
||
let instruction = Instruction::ExpectOrigin(Some(expected_origin)); | ||
let xcm = Xcm(vec![instruction]); | ||
let mut _result = Ok(()); | ||
}: { | ||
_result = executor.execute(xcm); | ||
} verify { | ||
assert!(matches!(_result, Err(ExecutorError { | ||
xcm_error: XcmError::ExpectationFalse, | ||
.. | ||
}))); | ||
} | ||
|
||
expect_error { | ||
KiChjang marked this conversation as resolved.
Show resolved
Hide resolved
|
||
let mut executor = new_executor::<T>(Default::default()); | ||
executor.error = Some((3u32, XcmError::Overflow)); | ||
|
||
let instruction = Instruction::ExpectError(None); | ||
let xcm = Xcm(vec![instruction]); | ||
let mut _result = Ok(()); | ||
}: { | ||
_result = executor.execute(xcm); | ||
} verify { | ||
assert!(matches!(_result, Err(ExecutorError { | ||
xcm_error: XcmError::ExpectationFalse, | ||
.. | ||
}))); | ||
} | ||
|
||
query_pallet { | ||
let query_id = Default::default(); | ||
let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; | ||
let max_weight = Default::default(); | ||
let mut executor = new_executor::<T>(Default::default()); | ||
|
||
let instruction = Instruction::QueryPallet { | ||
module_name: b"frame_system".to_vec(), | ||
kianenigma marked this conversation as resolved.
Show resolved
Hide resolved
|
||
response_info: QueryResponseInfo { destination, query_id, max_weight }, | ||
}; | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
// TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 | ||
} | ||
|
||
expect_pallet { | ||
let mut executor = new_executor::<T>(Default::default()); | ||
|
||
let instruction = Instruction::ExpectPallet { | ||
index: 0, | ||
name: b"System".to_vec(), | ||
module_name: b"frame_system".to_vec(), | ||
crate_major: 4, | ||
min_crate_minor: 0, | ||
Comment on lines
+398
to
+402
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is there a way to make this test more adaptable? For example, you will easily fail this benchmark when the crate version increments. Better to query some value you know will succeed here. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Well hmm, I guess I could create a dummy pallet, import it into the runtime and give it a fixed index location. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would also need to be generic for any runtime we benchmark this on too, so making a custom pallet is not a good choice. |
||
}; | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
// the execution succeeding is all we need to verify this xcm was successful | ||
} | ||
|
||
report_transact_status { | ||
let query_id = Default::default(); | ||
let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; | ||
let max_weight = Default::default(); | ||
|
||
let mut executor = new_executor::<T>(Default::default()); | ||
executor.transact_status = MaybeErrorCode::Error(b"MyError".to_vec()); | ||
|
||
let instruction = Instruction::ReportTransactStatus(QueryResponseInfo { | ||
query_id, | ||
destination, | ||
max_weight, | ||
}); | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
// TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 | ||
} | ||
|
||
clear_transact_status { | ||
let mut executor = new_executor::<T>(Default::default()); | ||
executor.transact_status = MaybeErrorCode::Error(b"MyError".to_vec()); | ||
|
||
let instruction = Instruction::ClearTransactStatus; | ||
let xcm = Xcm(vec![instruction]); | ||
}: { | ||
executor.execute(xcm)?; | ||
} verify { | ||
assert_eq!(executor.transact_status, MaybeErrorCode::Success); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not a note for this PR, but it is strange to me the transaction status defaults to success versus some kind of "unreported' |
||
} | ||
|
||
impl_benchmark_test_suite!( | ||
Pallet, | ||
crate::generic::mock::new_test_ext(), | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A simple comment here that worst case scenario is when the expected origin call fails.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think it actually is -- I'm using the failure case here because it's easier to verify that it worked than the success case, since the success case looks practically identical to any other success case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Isn't success here means that the expected origin matches? so literally only one input could give a success output?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, but I can't do a match against the result. If we are to treat the entire XCM executor as a blackbox, then the success case is not distinguishable from the "do nothing" case, since both of them emit no observable result, and hence I've chosen the failure case.