diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b6bc087b1ba..406b3f294af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -272,7 +272,7 @@ tokio-console http://127.0.0.1:5555 To optimize performance it's useful to profile iroha. -To do that you should compile iroha with `profiling` profile and with `profiling` feature: +To do that you should compile iroha with `profiling` profile and with `profiling` feature: ```bash RUSTFLAGS="-C force-frame-pointers=on" cargo +nightly -Z build-std build --target your-desired-target --profile profiling --features profiling diff --git a/cli/README.md b/cli/README.md index 5ba8d269b39..60a69f453c9 100644 --- a/cli/README.md +++ b/cli/README.md @@ -22,7 +22,7 @@ The results of the compilation can be found in `/target/release ### Add features -To add optional features, use ``--features``. For example, to add the support for _dev_telemetry_, run: +To add optional features, use ``--features``. For example, to add the support for _dev telemetry_, run: ```bash cargo build --release --features dev-telemetry diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 1c3b107bcc6..9854535d50a 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -252,7 +252,7 @@ impl Iroha { }); let state = Arc::new(state); - let queue = Arc::new(Queue::from_config(config.queue)); + let queue = Arc::new(Queue::from_config(config.queue, events_sender.clone())); #[cfg(feature = "telemetry")] Self::start_telemetry(&logger, &config).await?; diff --git a/client/benches/tps/utils.rs b/client/benches/tps/utils.rs index d215d1ce203..6e2f74d83fc 100644 --- a/client/benches/tps/utils.rs +++ b/client/benches/tps/utils.rs @@ -18,6 +18,7 @@ use iroha_client::{ prelude::*, }, }; +use iroha_data_model::events::pipeline::{BlockEventFilter, BlockStatus}; use serde::Deserialize; use test_network::*; @@ -172,13 +173,11 @@ impl MeasurerUnit { fn spawn_event_counter(&self) -> thread::JoinHandle> { let listener = self.client.clone(); let (init_sender, init_receiver) = mpsc::channel(); - let event_filter = PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Block) - .for_status(PipelineStatusKind::Committed); + let event_filter = BlockEventFilter::default().for_status(BlockStatus::Applied); let blocks_expected = self.config.blocks as usize; let name = self.name; let handle = thread::spawn(move || -> Result<()> { - let mut event_iterator = listener.listen_for_events(event_filter)?; + let mut event_iterator = listener.listen_for_events([event_filter])?; init_sender.send(())?; for i in 1..=blocks_expected { let _event = event_iterator.next().expect("Event stream closed")?; diff --git a/client/src/client.rs b/client/src/client.rs index ce942c1752c..b30a0d67193 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -14,7 +14,13 @@ use eyre::{eyre, Result, WrapErr}; use futures_util::StreamExt; use http_default::{AsyncWebSocketStream, WebSocketStream}; pub use iroha_config::client_api::ConfigDTO; -use iroha_data_model::query::QueryOutputBox; +use iroha_data_model::{ + events::pipeline::{ + BlockEventFilter, BlockStatus, PipelineEventBox, PipelineEventFilterBox, + TransactionEventFilter, TransactionStatus, + }, + query::QueryOutputBox, +}; use iroha_logger::prelude::*; use iroha_telemetry::metrics::Status; use iroha_torii_const::uri as torii_uri; @@ -603,14 +609,19 @@ impl Client { rt.block_on(async { let mut event_iterator = { - let event_iterator_result = tokio::time::timeout_at( - deadline, - self.listen_for_events_async(PipelineEventFilter::new().for_hash(hash.into())), - ) - .await - .map_err(Into::into) - .and_then(std::convert::identity) - .wrap_err("Failed to establish event listener connection"); + let filters = vec![ + TransactionEventFilter::default().for_hash(hash).into(), + PipelineEventFilterBox::from( + BlockEventFilter::default().for_status(BlockStatus::Applied), + ), + ]; + + let event_iterator_result = + tokio::time::timeout_at(deadline, self.listen_for_events_async(filters)) + .await + .map_err(Into::into) + .and_then(std::convert::identity) + .wrap_err("Failed to establish event listener connection"); let _send_result = init_sender.send(event_iterator_result.is_ok()); event_iterator_result? }; @@ -631,17 +642,34 @@ impl Client { event_iterator: &mut AsyncEventStream, hash: HashOf, ) -> Result> { + let mut block_height = None; + while let Some(event) = event_iterator.next().await { - if let Event::Pipeline(this_event) = event? { - match this_event.status() { - PipelineStatus::Validating => {} - PipelineStatus::Rejected(ref reason) => { - return Err(reason.clone().into()); + if let EventBox::Pipeline(this_event) = event? { + match this_event { + PipelineEventBox::Transaction(transaction_event) => { + match transaction_event.status() { + TransactionStatus::Queued => {} + TransactionStatus::Approved => { + block_height = transaction_event.block_height; + } + TransactionStatus::Rejected(reason) => { + return Err((Clone::clone(&**reason)).into()); + } + TransactionStatus::Expired => return Err(eyre!("Transaction expired")), + } + } + PipelineEventBox::Block(block_event) => { + if Some(block_event.header().height()) == block_height { + if let BlockStatus::Applied = block_event.status() { + return Ok(hash); + } + } } - PipelineStatus::Committed => return Ok(hash), } } } + Err(eyre!( "Connection dropped without `Committed` or `Rejected` event" )) @@ -903,11 +931,9 @@ impl Client { /// - Forwards from [`events_api::EventIterator::new`] pub fn listen_for_events( &self, - event_filter: impl Into, - ) -> Result>> { - let event_filter = event_filter.into(); - iroha_logger::trace!(?event_filter); - events_api::EventIterator::new(self.events_handler(event_filter)?) + event_filters: impl IntoIterator>, + ) -> Result>> { + events_api::EventIterator::new(self.events_handler(event_filters)?) } /// Connect asynchronously (through `WebSocket`) to listen for `Iroha` `pipeline` and `data` events. @@ -917,11 +943,9 @@ impl Client { /// - Forwards from [`events_api::AsyncEventStream::new`] pub async fn listen_for_events_async( &self, - event_filter: impl Into + Send, + event_filters: impl IntoIterator> + Send, ) -> Result { - let event_filter = event_filter.into(); - iroha_logger::trace!(?event_filter, "Async listening with"); - events_api::AsyncEventStream::new(self.events_handler(event_filter)?).await + events_api::AsyncEventStream::new(self.events_handler(event_filters)?).await } /// Constructs an Events API handler. With it, you can use any WS client you want. @@ -931,10 +955,10 @@ impl Client { #[inline] pub fn events_handler( &self, - event_filter: impl Into, + event_filters: impl IntoIterator>, ) -> Result { events_api::flow::Init::new( - event_filter.into(), + event_filters, self.headers.clone(), self.torii_url .join(torii_uri::SUBSCRIPTION) @@ -1237,12 +1261,12 @@ pub mod events_api { /// Initialization struct for Events API flow. pub struct Init { - /// Event filter - filter: EventFilterBox, - /// HTTP request headers - headers: HashMap, /// TORII URL url: Url, + /// HTTP request headers + headers: HashMap, + /// Event filter + filters: Vec, } impl Init { @@ -1252,14 +1276,14 @@ pub mod events_api { /// Fails if [`transform_ws_url`] fails. #[inline] pub(in super::super) fn new( - filter: EventFilterBox, + filters: impl IntoIterator>, headers: HashMap, url: Url, ) -> Result { Ok(Self { - filter, - headers, url: transform_ws_url(url)?, + headers, + filters: filters.into_iter().map(Into::into).collect(), }) } } @@ -1269,12 +1293,12 @@ pub mod events_api { fn init(self) -> InitData { let Self { - filter, - headers, url, + headers, + filters, } = self; - let msg = EventSubscriptionRequest::new(filter).encode(); + let msg = EventSubscriptionRequest::new(filters).encode(); InitData::new(R::new(HttpMethod::GET, url).headers(headers), msg, Events) } } @@ -1284,7 +1308,7 @@ pub mod events_api { pub struct Events; impl FlowEvents for Events { - type Event = crate::data_model::prelude::Event; + type Event = crate::data_model::prelude::EventBox; fn message(&self, message: Vec) -> Result { let event_socket_message = EventMessage::decode_all(&mut message.as_slice())?; diff --git a/client/src/config.rs b/client/src/config.rs index 34b7e8663c7..72bb909d8c7 100644 --- a/client/src/config.rs +++ b/client/src/config.rs @@ -9,7 +9,7 @@ use iroha_config::{ base, base::{FromEnv, StdEnv, UnwrapPartial}, }; -use iroha_crypto::prelude::*; +use iroha_crypto::KeyPair; use iroha_data_model::{prelude::*, ChainId}; use iroha_primitives::small::SmallStr; use serde::{Deserialize, Serialize}; diff --git a/client/src/http.rs b/client/src/http.rs index 40ea3b923b0..905c4965838 100644 --- a/client/src/http.rs +++ b/client/src/http.rs @@ -150,7 +150,7 @@ pub mod ws { /// use eyre::Result; /// use url::Url; /// use iroha_client::{ - /// data_model::prelude::Event, + /// data_model::prelude::EventBox, /// client::events_api::flow as events_api_flow, /// http::{ /// ws::conn_flow::{Events, Init, InitData}, @@ -203,7 +203,7 @@ pub mod ws { /// } /// } /// - /// fn collect_5_events(flow: events_api_flow::Init) -> Result> { + /// fn collect_5_events(flow: events_api_flow::Init) -> Result> { /// // Constructing initial flow data /// let InitData { /// next: flow, @@ -216,7 +216,7 @@ pub mod ws { /// stream.send(first_message); /// /// // And now we are able to collect events - /// let mut events: Vec = Vec::with_capacity(5); + /// let mut events: Vec = Vec::with_capacity(5); /// while events.len() < 5 { /// let msg = stream.get_next(); /// let event = flow.message(msg)?; diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index fe95e30f348..34a102afd93 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -10,6 +10,7 @@ use iroha_config::parameters::actual::Root as Config; use iroha_data_model::{ asset::{AssetId, AssetValue, AssetValueType}, isi::error::{InstructionEvaluationError, InstructionExecutionError, Mismatch, TypeError}, + transaction::error::TransactionRejectionReason, }; use serde_json::json; use test_network::*; @@ -463,17 +464,17 @@ fn fail_if_dont_satisfy_spec() { .expect_err("Should be rejected due to non integer value"); let rejection_reason = err - .downcast_ref::() - .unwrap_or_else(|| panic!("Error {err} is not PipelineRejectionReason")); + .downcast_ref::() + .unwrap_or_else(|| panic!("Error {err} is not TransactionRejectionReason")); assert_eq!( rejection_reason, - &PipelineRejectionReason::Transaction(TransactionRejectionReason::Validation( - ValidationFail::InstructionFailed(InstructionExecutionError::Evaluate( - InstructionEvaluationError::Type(TypeError::from(Mismatch { + &TransactionRejectionReason::Validation(ValidationFail::InstructionFailed( + InstructionExecutionError::Evaluate(InstructionEvaluationError::Type( + TypeError::from(Mismatch { expected: AssetValueType::Numeric(NumericSpec::integer()), actual: AssetValueType::Numeric(NumericSpec::fractional(2)) - })) + }) )) )) ); diff --git a/client/tests/integration/domain_owner_permissions.rs b/client/tests/integration/domain_owner_permissions.rs index e0945b85f70..af78eff12ac 100644 --- a/client/tests/integration/domain_owner_permissions.rs +++ b/client/tests/integration/domain_owner_permissions.rs @@ -3,6 +3,7 @@ use iroha_client::{ crypto::KeyPair, data_model::{account::SignatureCheckCondition, prelude::*}, }; +use iroha_data_model::transaction::error::TransactionRejectionReason; use serde_json::json; use test_network::*; @@ -37,14 +38,12 @@ fn domain_owner_domain_permissions() -> Result<()> { .expect_err("Tx should fail due to permissions"); let rejection_reason = err - .downcast_ref::() - .unwrap_or_else(|| panic!("Error {err} is not PipelineRejectionReason")); + .downcast_ref::() + .unwrap_or_else(|| panic!("Error {err} is not TransactionRejectionReason")); assert!(matches!( rejection_reason, - &PipelineRejectionReason::Transaction(TransactionRejectionReason::Validation( - ValidationFail::NotPermitted(_) - )) + &TransactionRejectionReason::Validation(ValidationFail::NotPermitted(_)) )); // "alice@wonderland" owns the domain and can register AssetDefinitions by default as domain owner diff --git a/client/tests/integration/events/data.rs b/client/tests/integration/events/data.rs index 4250ff2b682..9a6d6986cc2 100644 --- a/client/tests/integration/events/data.rs +++ b/client/tests/integration/events/data.rs @@ -140,7 +140,7 @@ fn transaction_execution_should_produce_events( let (event_sender, event_receiver) = mpsc::channel(); let event_filter = DataEventFilter::Any; thread::spawn(move || -> Result<()> { - let event_iterator = listener.listen_for_events(event_filter)?; + let event_iterator = listener.listen_for_events([event_filter])?; init_sender.send(())?; for event in event_iterator { event_sender.send(event)? @@ -184,7 +184,7 @@ fn produce_multiple_events() -> Result<()> { let (event_sender, event_receiver) = mpsc::channel(); let event_filter = DataEventFilter::Any; thread::spawn(move || -> Result<()> { - let event_iterator = listener.listen_for_events(event_filter)?; + let event_iterator = listener.listen_for_events([event_filter])?; init_sender.send(())?; for event in event_iterator { event_sender.send(event)? diff --git a/client/tests/integration/events/notification.rs b/client/tests/integration/events/notification.rs index bf26feb351b..c060d1e1e64 100644 --- a/client/tests/integration/events/notification.rs +++ b/client/tests/integration/events/notification.rs @@ -33,11 +33,9 @@ fn trigger_completion_success_should_produce_event() -> Result<()> { let thread_client = test_client.clone(); let (sender, receiver) = mpsc::channel(); let _handle = thread::spawn(move || -> Result<()> { - let mut event_it = thread_client.listen_for_events( - TriggerCompletedEventFilter::new() - .for_trigger(trigger_id) - .for_outcome(TriggerCompletedOutcomeType::Success), - )?; + let mut event_it = thread_client.listen_for_events([TriggerCompletedEventFilter::new() + .for_trigger(trigger_id) + .for_outcome(TriggerCompletedOutcomeType::Success)])?; if event_it.next().is_some() { sender.send(())?; return Ok(()); @@ -79,11 +77,9 @@ fn trigger_completion_failure_should_produce_event() -> Result<()> { let thread_client = test_client.clone(); let (sender, receiver) = mpsc::channel(); let _handle = thread::spawn(move || -> Result<()> { - let mut event_it = thread_client.listen_for_events( - TriggerCompletedEventFilter::new() - .for_trigger(trigger_id) - .for_outcome(TriggerCompletedOutcomeType::Failure), - )?; + let mut event_it = thread_client.listen_for_events([TriggerCompletedEventFilter::new() + .for_trigger(trigger_id) + .for_outcome(TriggerCompletedOutcomeType::Failure)])?; if event_it.next().is_some() { sender.send(())?; return Ok(()); diff --git a/client/tests/integration/events/pipeline.rs b/client/tests/integration/events/pipeline.rs index 30f17528219..cd8288e0f05 100644 --- a/client/tests/integration/events/pipeline.rs +++ b/client/tests/integration/events/pipeline.rs @@ -9,6 +9,14 @@ use iroha_client::{ }, }; use iroha_config::parameters::actual::Root as Config; +use iroha_data_model::{ + events::pipeline::{ + BlockEvent, BlockEventFilter, BlockStatus, TransactionEventFilter, TransactionStatus, + }, + isi::error::InstructionExecutionError, + transaction::error::TransactionRejectionReason, + ValidationFail, +}; use test_network::*; // Needed to re-enable ignored tests. @@ -17,24 +25,28 @@ const PEER_COUNT: usize = 7; #[ignore = "ignore, more in #2851"] #[test] fn transaction_with_no_instructions_should_be_committed() -> Result<()> { - test_with_instruction_and_status_and_port(None, PipelineStatusKind::Committed, 10_250) + test_with_instruction_and_status_and_port(None, &TransactionStatus::Approved, 10_250) } #[ignore = "ignore, more in #2851"] // #[ignore = "Experiment"] #[test] fn transaction_with_fail_instruction_should_be_rejected() -> Result<()> { - let fail = Fail::new("Should be rejected".to_owned()); + let msg = "Should be rejected".to_owned(); + + let fail = Fail::new(msg.clone()); test_with_instruction_and_status_and_port( Some(fail.into()), - PipelineStatusKind::Rejected, + &TransactionStatus::Rejected(Box::new(TransactionRejectionReason::Validation( + ValidationFail::InstructionFailed(InstructionExecutionError::Fail(msg)), + ))), 10_350, ) } fn test_with_instruction_and_status_and_port( instruction: Option, - should_be: PipelineStatusKind, + should_be: &TransactionStatus, port: u16, ) -> Result<()> { let (_rt, network, client) = @@ -56,9 +68,9 @@ fn test_with_instruction_and_status_and_port( let mut handles = Vec::new(); for listener in clients { let checker = Checker { listener, hash }; - let handle_validating = checker.clone().spawn(PipelineStatusKind::Validating); + let handle_validating = checker.clone().spawn(TransactionStatus::Queued); handles.push(handle_validating); - let handle_validated = checker.spawn(should_be); + let handle_validated = checker.spawn(should_be.clone()); handles.push(handle_validated); } // When @@ -78,16 +90,13 @@ struct Checker { } impl Checker { - fn spawn(self, status_kind: PipelineStatusKind) -> JoinHandle<()> { + fn spawn(self, status_kind: TransactionStatus) -> JoinHandle<()> { thread::spawn(move || { let mut event_iterator = self .listener - .listen_for_events( - PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Transaction) - .for_status(status_kind) - .for_hash(*self.hash), - ) + .listen_for_events([TransactionEventFilter::default() + .for_status(status_kind) + .for_hash(self.hash)]) .expect("Failed to create event iterator."); let event_result = event_iterator.next().expect("Stream closed"); let _event = event_result.expect("Must be valid"); @@ -96,36 +105,30 @@ impl Checker { } #[test] -fn committed_block_must_be_available_in_kura() { +fn applied_block_must_be_available_in_kura() { let (_rt, peer, client) = ::new().with_port(11_040).start_with_runtime(); wait_for_genesis_committed(&[client.clone()], 0); - let event_filter = PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Block) - .for_status(PipelineStatusKind::Committed); + let event_filter = BlockEventFilter::default().for_status(BlockStatus::Applied); let mut event_iter = client - .listen_for_events(event_filter) + .listen_for_events([event_filter]) .expect("Failed to subscribe for events"); client .submit(Fail::new("Dummy instruction".to_owned())) .expect("Failed to submit transaction"); - let event = event_iter.next().expect("Block must be committed"); - let Ok(Event::Pipeline(PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Committed, - hash, - })) = event - else { - panic!("Received unexpected event") - }; - let hash = HashOf::from_untyped_unchecked(hash); + let event: BlockEvent = event_iter + .next() + .expect("Block must be committed") + .expect("Block must be committed") + .try_into() + .expect("Received unexpected event"); peer.iroha .as_ref() .expect("Must be some") .kura - .get_block_height_by_hash(&hash) - .expect("Block committed event was received earlier"); + .get_block_by_height(event.header().height()) + .expect("Block applied event was received earlier"); } diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index e7fea53ac18..9a4578b8660 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -6,7 +6,9 @@ use iroha_client::{ crypto::KeyPair, data_model::prelude::*, }; -use iroha_data_model::permission::PermissionToken; +use iroha_data_model::{ + permission::PermissionToken, transaction::error::TransactionRejectionReason, +}; use iroha_genesis::GenesisNetwork; use serde_json::json; use test_network::{PeerBuilder, *}; @@ -104,14 +106,12 @@ fn permissions_disallow_asset_transfer() { .submit_transaction_blocking(&transfer_tx) .expect_err("Transaction was not rejected."); let rejection_reason = err - .downcast_ref::() - .expect("Error {err} is not PipelineRejectionReason"); + .downcast_ref::() + .expect("Error {err} is not TransactionRejectionReason"); //Then assert!(matches!( rejection_reason, - &PipelineRejectionReason::Transaction(TransactionRejectionReason::Validation( - ValidationFail::NotPermitted(_) - )) + &TransactionRejectionReason::Validation(ValidationFail::NotPermitted(_)) )); let alice_assets = get_assets(&iroha_client, &alice_id); assert_eq!(alice_assets, alice_start_assets); @@ -156,14 +156,12 @@ fn permissions_disallow_asset_burn() { .submit_transaction_blocking(&burn_tx) .expect_err("Transaction was not rejected."); let rejection_reason = err - .downcast_ref::() - .expect("Error {err} is not PipelineRejectionReason"); + .downcast_ref::() + .expect("Error {err} is not TransactionRejectionReason"); assert!(matches!( rejection_reason, - &PipelineRejectionReason::Transaction(TransactionRejectionReason::Validation( - ValidationFail::NotPermitted(_) - )) + &TransactionRejectionReason::Validation(ValidationFail::NotPermitted(_)) )); let alice_assets = get_assets(&iroha_client, &alice_id); diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index 12a03f333c1..6f260e3709f 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -6,6 +6,7 @@ use iroha_client::{ crypto::KeyPair, data_model::prelude::*, }; +use iroha_data_model::transaction::error::TransactionRejectionReason; use serde_json::json; use test_network::*; @@ -164,14 +165,12 @@ fn role_with_invalid_permissions_is_not_accepted() -> Result<()> { .expect_err("Submitting role with invalid permission token should fail"); let rejection_reason = err - .downcast_ref::() - .unwrap_or_else(|| panic!("Error {err} is not PipelineRejectionReason")); + .downcast_ref::() + .unwrap_or_else(|| panic!("Error {err} is not TransactionRejectionReason")); assert!(matches!( rejection_reason, - &PipelineRejectionReason::Transaction(TransactionRejectionReason::Validation( - ValidationFail::NotPermitted(_) - )) + &TransactionRejectionReason::Validation(ValidationFail::NotPermitted(_)) )); Ok(()) diff --git a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs index 2a725479740..8eff37089b2 100644 --- a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs +++ b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs @@ -16,7 +16,7 @@ static ALLOC: LockedAllocator = LockedAllocator::new(FreeList getrandom::register_custom_getrandom!(iroha_trigger::stub_getrandom); #[iroha_trigger::main] -fn main(_owner: AccountId, _event: Event) { +fn main(_owner: AccountId, _event: EventBox) { iroha_trigger::log::info!("Executing trigger"); let accounts_cursor = FindAllAccounts.execute().dbg_unwrap(); diff --git a/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs b/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs index e3558de7c61..701956c8ad8 100644 --- a/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs +++ b/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs @@ -17,7 +17,7 @@ getrandom::register_custom_getrandom!(iroha_trigger::stub_getrandom); /// Mint 1 rose for owner #[iroha_trigger::main] -fn main(owner: AccountId, _event: Event) { +fn main(owner: AccountId, _event: EventBox) { let rose_definition_id = AssetDefinitionId::from_str("rose#wonderland") .dbg_expect("Failed to parse `rose#wonderland` asset definition id"); let rose_id = AssetId::new(rose_definition_id, owner); diff --git a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs index 5028ca4e01d..feadea44447 100644 --- a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs +++ b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs @@ -26,7 +26,7 @@ fn main(owner: AccountId) { .execute() .dbg_unwrap(); - let (_batch, cursor) = asset_cursor.into_raw_parts(); + let (_batch, cursor) = asset_cursor.into_parts(); SetKeyValue::account( owner, diff --git a/client/tests/integration/triggers/by_call_trigger.rs b/client/tests/integration/triggers/by_call_trigger.rs index a2c2ac2b41d..ff76caf34f2 100644 --- a/client/tests/integration/triggers/by_call_trigger.rs +++ b/client/tests/integration/triggers/by_call_trigger.rs @@ -58,11 +58,9 @@ fn execute_trigger_should_produce_event() -> Result<()> { let thread_client = test_client.clone(); let (sender, receiver) = mpsc::channel(); let _handle = thread::spawn(move || -> Result<()> { - let mut event_it = thread_client.listen_for_events( - ExecuteTriggerEventFilter::new() - .for_trigger(trigger_id) - .under_authority(account_id), - )?; + let mut event_it = thread_client.listen_for_events([ExecuteTriggerEventFilter::new() + .for_trigger(trigger_id) + .under_authority(account_id)])?; if event_it.next().is_some() { sender.send(())?; return Ok(()); diff --git a/client/tests/integration/triggers/time_trigger.rs b/client/tests/integration/triggers/time_trigger.rs index 1f29a0d8ba9..8a9bb9fb034 100644 --- a/client/tests/integration/triggers/time_trigger.rs +++ b/client/tests/integration/triggers/time_trigger.rs @@ -6,11 +6,20 @@ use iroha_client::{ data_model::{prelude::*, transaction::WasmSmartContract}, }; use iroha_config::parameters::defaults::chain_wide::DEFAULT_CONSENSUS_ESTIMATION; +use iroha_data_model::events::pipeline::{BlockEventFilter, BlockStatus}; use iroha_logger::info; use test_network::*; use crate::integration::new_account_with_random_public_key; +fn curr_time() -> core::time::Duration { + use std::time::SystemTime; + + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time") +} + /// Macro to abort compilation, if `e` isn't `true` macro_rules! const_assert { ($e:expr) => { @@ -33,7 +42,7 @@ fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result let (_rt, _peer, mut test_client) = ::new().with_port(10_775).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); - let start_time = current_time(); + let start_time = curr_time(); // Start listening BEFORE submitting any transaction not to miss any block committed event let event_listener = get_block_committed_event_listener(&test_client)?; @@ -66,7 +75,7 @@ fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result )?; std::thread::sleep(DEFAULT_CONSENSUS_ESTIMATION); - let finish_time = current_time(); + let finish_time = curr_time(); let average_count = finish_time.saturating_sub(start_time).as_millis() / PERIOD.as_millis(); let actual_value = get_asset_value(&mut test_client, asset_id); @@ -92,7 +101,7 @@ fn change_asset_metadata_after_1_sec() -> Result<()> { let (_rt, _peer, mut test_client) = ::new().with_port(10_660).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); - let start_time = current_time(); + let start_time = curr_time(); // Start listening BEFORE submitting any transaction not to miss any block committed event let event_listener = get_block_committed_event_listener(&test_client)?; @@ -220,7 +229,7 @@ fn mint_nft_for_every_user_every_1_sec() -> Result<()> { let event_listener = get_block_committed_event_listener(&test_client)?; // Registering trigger - let start_time = current_time(); + let start_time = curr_time(); let schedule = TimeSchedule::starting_at(start_time).with_period(Duration::from_millis(TRIGGER_PERIOD_MS)); let register_trigger = Register::trigger(Trigger::new( @@ -272,11 +281,9 @@ fn mint_nft_for_every_user_every_1_sec() -> Result<()> { /// Get block committed event listener fn get_block_committed_event_listener( client: &Client, -) -> Result>> { - let block_filter = PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Block) - .for_status(PipelineStatusKind::Committed); - client.listen_for_events(block_filter) +) -> Result>> { + let block_filter = BlockEventFilter::default().for_status(BlockStatus::Committed); + client.listen_for_events([block_filter]) } /// Get asset numeric value @@ -292,7 +299,7 @@ fn get_asset_value(client: &mut Client, asset_id: AssetId) -> Numeric { /// Submit some sample ISIs to create new blocks fn submit_sample_isi_on_every_block_commit( - block_committed_event_listener: impl Iterator>, + block_committed_event_listener: impl Iterator>, test_client: &mut Client, account_id: &AccountId, timeout: Duration, diff --git a/client_cli/src/main.rs b/client_cli/src/main.rs index 807d504a280..7a817316e57 100644 --- a/client_cli/src/main.rs +++ b/client_cli/src/main.rs @@ -249,13 +249,17 @@ mod filter { mod events { + use iroha_client::data_model::events::pipeline::{BlockEventFilter, TransactionEventFilter}; + use super::*; /// Get event stream from iroha peer #[derive(clap::Subcommand, Debug, Clone, Copy)] pub enum Args { - /// Gets pipeline events - Pipeline, + /// Gets block pipeline events + BlockPipeline, + /// Gets transaction pipeline events + TransactionPipeline, /// Gets data events Data, /// Get execute trigger events @@ -267,7 +271,8 @@ mod events { impl RunArgs for Args { fn run(self, context: &mut dyn RunContext) -> Result<()> { match self { - Args::Pipeline => listen(PipelineEventFilter::new(), context), + Args::TransactionPipeline => listen(TransactionEventFilter::default(), context), + Args::BlockPipeline => listen(BlockEventFilter::default(), context), Args::Data => listen(DataEventFilter::Any, context), Args::ExecuteTrigger => listen(ExecuteTriggerEventFilter::new(), context), Args::TriggerCompleted => listen(TriggerCompletedEventFilter::new(), context), @@ -280,7 +285,7 @@ mod events { let iroha_client = context.client_from_config(); eprintln!("Listening to events with filter: {filter:?}"); iroha_client - .listen_for_events(filter) + .listen_for_events([filter]) .wrap_err("Failed to listen for events.")? .try_for_each(|event| context.print_data(&event?))?; Ok(()) diff --git a/config/tests/fixtures/full.toml b/config/tests/fixtures/full.toml index ef611ea97cf..aae107333d0 100644 --- a/config/tests/fixtures/full.toml +++ b/config/tests/fixtures/full.toml @@ -57,7 +57,7 @@ min_retry_period = 5_000 max_retry_delay_exponent = 4 [dev_telemetry] -out_file = "./dev-telemetry.json" +out_file = "./dev_telemetry.json" [chain_wide] max_transactions_in_block = 512 diff --git a/configs/peer.template.toml b/configs/peer.template.toml index aa8691bcf76..2c8b88a7616 100644 --- a/configs/peer.template.toml +++ b/configs/peer.template.toml @@ -63,4 +63,4 @@ [dev_telemetry] ## A path to a file with JSON logs -# out_file = "./dev-telemetry.json" +# out_file = "./dev_telemetry.json" diff --git a/configs/swarm/executor.wasm b/configs/swarm/executor.wasm index fb05db1652e..c6700b3d1e4 100644 Binary files a/configs/swarm/executor.wasm and b/configs/swarm/executor.wasm differ diff --git a/core/benches/blocks/apply_blocks.rs b/core/benches/blocks/apply_blocks.rs index f85921695d5..bdf75e9e215 100644 --- a/core/benches/blocks/apply_blocks.rs +++ b/core/benches/blocks/apply_blocks.rs @@ -39,7 +39,7 @@ impl StateApplyBlocks { let state = build_state(rt, &account_id, &key_pair); instructions .into_iter() - .map(|instructions| -> Result<_> { + .map(|instructions| { let mut state_block = state.block(); let block = create_block( &mut state_block, @@ -47,11 +47,11 @@ impl StateApplyBlocks { account_id.clone(), &key_pair, ); - state_block.apply_without_execution(&block)?; + let _events = state_block.apply_without_execution(&block); state_block.commit(); - Ok(block) + block }) - .collect::, _>>()? + .collect::>() }; Ok(Self { state, blocks }) @@ -68,7 +68,7 @@ impl StateApplyBlocks { pub fn measure(Self { state, blocks }: &Self) -> Result<()> { for (block, i) in blocks.iter().zip(1..) { let mut state_block = state.block(); - state_block.apply(block)?; + let _events = state_block.apply(block)?; assert_eq!(state_block.height(), i); state_block.commit(); } diff --git a/core/benches/blocks/common.rs b/core/benches/blocks/common.rs index e4070b458c5..d88514f7c9f 100644 --- a/core/benches/blocks/common.rs +++ b/core/benches/blocks/common.rs @@ -42,7 +42,9 @@ pub fn create_block( ) .chain(0, state) .sign(key_pair) + .unpack(|_| {}) .commit(&topology) + .unpack(|_| {}) .unwrap(); // Verify that transactions are valid diff --git a/core/benches/blocks/validate_blocks.rs b/core/benches/blocks/validate_blocks.rs index 3390d7aaebe..ac6de7fa5d5 100644 --- a/core/benches/blocks/validate_blocks.rs +++ b/core/benches/blocks/validate_blocks.rs @@ -1,4 +1,3 @@ -use eyre::Result; use iroha_core::{prelude::*, state::State}; use iroha_data_model::{isi::InstructionBox, prelude::*}; @@ -21,11 +20,11 @@ impl StateValidateBlocks { /// - Failed to parse [`AccountId`] /// - Failed to generate [`KeyPair`] /// - Failed to create instructions for block - pub fn setup(rt: &tokio::runtime::Handle) -> Result { + pub fn setup(rt: &tokio::runtime::Handle) -> Self { let domains = 100; let accounts_per_domain = 1000; let assets_per_domain = 1000; - let account_id: AccountId = "alice@wonderland".parse()?; + let account_id: AccountId = "alice@wonderland".parse().unwrap(); let key_pair = KeyPair::random(); let state = build_state(rt, &account_id, &key_pair); @@ -38,12 +37,12 @@ impl StateValidateBlocks { .into_iter() .collect::>(); - Ok(Self { + Self { state, instructions, key_pair, account_id, - }) + } } /// Run benchmark body. @@ -61,7 +60,7 @@ impl StateValidateBlocks { key_pair, account_id, }: Self, - ) -> Result<()> { + ) { for (instructions, i) in instructions.into_iter().zip(1..) { let mut state_block = state.block(); let block = create_block( @@ -70,11 +69,9 @@ impl StateValidateBlocks { account_id.clone(), &key_pair, ); - state_block.apply_without_execution(&block)?; + let _events = state_block.apply_without_execution(&block); assert_eq!(state_block.height(), i); state_block.commit(); } - - Ok(()) } } diff --git a/core/benches/blocks/validate_blocks_benchmark.rs b/core/benches/blocks/validate_blocks_benchmark.rs index 454e07e3f4c..c3592b506f2 100644 --- a/core/benches/blocks/validate_blocks_benchmark.rs +++ b/core/benches/blocks/validate_blocks_benchmark.rs @@ -15,10 +15,8 @@ fn validate_blocks(c: &mut Criterion) { group.significance_level(0.1).sample_size(10); group.bench_function("validate_blocks", |b| { b.iter_batched( - || StateValidateBlocks::setup(rt.handle()).expect("Failed to setup benchmark"), - |bench| { - StateValidateBlocks::measure(bench).expect("Failed to execute benchmark"); - }, + || StateValidateBlocks::setup(rt.handle()), + StateValidateBlocks::measure, criterion::BatchSize::SmallInput, ); }); diff --git a/core/benches/blocks/validate_blocks_oneshot.rs b/core/benches/blocks/validate_blocks_oneshot.rs index 118ce739b99..8c8b20b1343 100644 --- a/core/benches/blocks/validate_blocks_oneshot.rs +++ b/core/benches/blocks/validate_blocks_oneshot.rs @@ -20,6 +20,6 @@ fn main() { } iroha_logger::test_logger(); iroha_logger::info!("Starting..."); - let bench = StateValidateBlocks::setup(rt.handle()).expect("Failed to setup benchmark"); - StateValidateBlocks::measure(bench).expect("Failed to execute bnechmark"); + let bench = StateValidateBlocks::setup(rt.handle()); + StateValidateBlocks::measure(bench); } diff --git a/core/benches/kura.rs b/core/benches/kura.rs index 06f78dcfc9b..521e242f60e 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -56,6 +56,7 @@ async fn measure_block_size_for_n_executors(n_executors: u32) { BlockBuilder::new(vec![tx], topology, Vec::new()) .chain(0, &mut state_block) .sign(&KeyPair::random()) + .unpack(|_| {}) }; for _ in 1..n_executors { diff --git a/core/benches/validation.rs b/core/benches/validation.rs index 8aff8c01ce0..d7e5459f090 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -186,7 +186,7 @@ fn sign_blocks(criterion: &mut Criterion) { b.iter_batched( || block.clone(), |block| { - let _: ValidBlock = block.sign(&key_pair); + let _: ValidBlock = block.sign(&key_pair).unpack(|_| {}); count += 1; }, BatchSize::SmallInput, diff --git a/core/src/block.rs b/core/src/block.rs index 4a6f210502e..c7b66b0f718 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -18,6 +18,7 @@ use iroha_genesis::GenesisTransaction; use iroha_primitives::unique_vec::UniqueVec; use thiserror::Error; +pub(crate) use self::event::WithEvents; pub use self::{chained::Chained, commit::CommittedBlock, valid::ValidBlock}; use crate::{prelude::*, sumeragi::network_topology::Topology, tx::AcceptTransactionFail}; @@ -51,6 +52,13 @@ pub enum BlockValidationError { /// Actual value actual: u64, }, + /// Mismatch between the actual and expected hashes of the current block. Expected: {expected:?}, actual: {actual:?} + IncorrectHash { + /// Expected value + expected: HashOf, + /// Actual value + actual: HashOf, + }, /// The transaction hash stored in the block header does not match the actual transaction hash TransactionHashMismatch, /// Error during transaction validation @@ -93,6 +101,8 @@ pub enum SignatureVerificationError { pub struct BlockBuilder(B); mod pending { + use std::time::SystemTime; + use iroha_data_model::transaction::TransactionValue; use super::*; @@ -110,7 +120,7 @@ mod pending { /// Transaction will be validated when block is chained. transactions: Vec, /// Event recommendations for use in triggers and off-chain work - event_recommendations: Vec, + event_recommendations: Vec, } impl BlockBuilder { @@ -123,7 +133,7 @@ mod pending { pub fn new( transactions: Vec, commit_topology: Topology, - event_recommendations: Vec, + event_recommendations: Vec, ) -> Self { assert!(!transactions.is_empty(), "Empty block created"); @@ -136,27 +146,29 @@ mod pending { fn make_header( previous_height: u64, - previous_block_hash: Option>, + prev_block_hash: Option>, view_change_index: u64, transactions: &[TransactionValue], ) -> BlockHeader { BlockHeader { - timestamp_ms: iroha_data_model::current_time() + height: previous_height + 1, + previous_block_hash: prev_block_hash, + transactions_hash: transactions + .iter() + .map(|value| value.as_ref().hash()) + .collect::>() + .hash(), + timestamp_ms: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time") .as_millis() .try_into() .expect("Time should fit into u64"), + view_change_index, consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION .as_millis() .try_into() .expect("Time should fit into u64"), - height: previous_height + 1, - view_change_index, - previous_block_hash, - transactions_hash: transactions - .iter() - .map(|value| value.as_ref().hash()) - .collect::>() - .hash(), } } @@ -222,16 +234,16 @@ mod chained { impl BlockBuilder { /// Sign this block and get [`SignedBlock`]. - pub fn sign(self, key_pair: &KeyPair) -> ValidBlock { + pub fn sign(self, key_pair: &KeyPair) -> WithEvents { let signature = SignatureOf::new(key_pair, &self.0 .0); - ValidBlock( + WithEvents::new(ValidBlock( SignedBlockV1 { payload: self.0 .0, signatures: SignaturesOf::from(signature), } .into(), - ) + )) } } } @@ -245,7 +257,7 @@ mod valid { /// Block that was validated and accepted #[derive(Debug, Clone)] #[repr(transparent)] - pub struct ValidBlock(pub(crate) SignedBlock); + pub struct ValidBlock(pub(super) SignedBlock); impl ValidBlock { /// Validate a block against the current state of the world. @@ -264,7 +276,7 @@ mod valid { topology: &Topology, expected_chain_id: &ChainId, state_block: &mut StateBlock<'_>, - ) -> Result { + ) -> WithEvents> { if !block.header().is_genesis() { let actual_commit_topology = block.commit_topology(); let expected_commit_topology = &topology.ordered_peers; @@ -272,20 +284,23 @@ mod valid { if actual_commit_topology != expected_commit_topology { let actual_commit_topology = actual_commit_topology.clone(); - return Err(( + return WithEvents::new(Err(( block, BlockValidationError::TopologyMismatch { expected: expected_commit_topology.clone(), actual: actual_commit_topology, }, - )); + ))); } if topology .filter_signatures_by_roles(&[Role::Leader], block.signatures()) .is_empty() { - return Err((block, SignatureVerificationError::LeaderMissing.into())); + return WithEvents::new(Err(( + block, + SignatureVerificationError::LeaderMissing.into(), + ))); } } @@ -293,48 +308,51 @@ mod valid { let actual_height = block.header().height; if expected_block_height != actual_height { - return Err(( + return WithEvents::new(Err(( block, BlockValidationError::LatestBlockHeightMismatch { expected: expected_block_height, actual: actual_height, }, - )); + ))); } - let expected_previous_block_hash = state_block.latest_block_hash(); - let actual_block_hash = block.header().previous_block_hash; + let expected_prev_block_hash = state_block.latest_block_hash(); + let actual_prev_block_hash = block.header().previous_block_hash; - if expected_previous_block_hash != actual_block_hash { - return Err(( + if expected_prev_block_hash != actual_prev_block_hash { + return WithEvents::new(Err(( block, BlockValidationError::LatestBlockHashMismatch { - expected: expected_previous_block_hash, - actual: actual_block_hash, + expected: expected_prev_block_hash, + actual: actual_prev_block_hash, }, - )); + ))); } if block .transactions() .any(|tx| state_block.has_transaction(tx.as_ref().hash())) { - return Err((block, BlockValidationError::HasCommittedTransactions)); + return WithEvents::new(Err(( + block, + BlockValidationError::HasCommittedTransactions, + ))); } if let Err(error) = Self::validate_transactions(&block, expected_chain_id, state_block) { - return Err((block, error.into())); + return WithEvents::new(Err((block, error.into()))); } let SignedBlock::V1(block) = block; - Ok(ValidBlock( + WithEvents::new(Ok(ValidBlock( SignedBlockV1 { payload: block.payload, signatures: block.signatures, } .into(), - )) + ))) } fn validate_transactions( @@ -379,24 +397,44 @@ mod valid { /// /// - Not enough signatures /// - Not signed by proxy tail - pub(crate) fn commit_with_signatures( + pub fn commit_with_signatures( mut self, topology: &Topology, signatures: SignaturesOf, - ) -> Result { + expected_hash: HashOf, + ) -> WithEvents> { if topology .filter_signatures_by_roles(&[Role::Leader], &signatures) .is_empty() { - return Err((self, SignatureVerificationError::LeaderMissing.into())); + return WithEvents::new(Err(( + self, + SignatureVerificationError::LeaderMissing.into(), + ))); } if !self.as_ref().signatures().is_subset(&signatures) { - return Err((self, SignatureVerificationError::SignatureMissing.into())); + return WithEvents::new(Err(( + self, + SignatureVerificationError::SignatureMissing.into(), + ))); } if !self.0.replace_signatures(signatures) { - return Err((self, SignatureVerificationError::UnknownSignature.into())); + return WithEvents::new(Err(( + self, + SignatureVerificationError::UnknownSignature.into(), + ))); + } + + let actual_block_hash = self.as_ref().hash(); + if actual_block_hash != expected_hash { + let err = BlockValidationError::IncorrectHash { + expected: expected_hash, + actual: actual_block_hash, + }; + + return WithEvents::new(Err((self, err))); } self.commit(topology) @@ -411,19 +449,19 @@ mod valid { pub fn commit( self, topology: &Topology, - ) -> Result { + ) -> WithEvents> { if !self.0.header().is_genesis() { if let Err(err) = self.verify_signatures(topology) { - return Err((self, err.into())); + return WithEvents::new(Err((self, err.into()))); } } - Ok(CommittedBlock(self)) + WithEvents::new(Ok(CommittedBlock(self))) } /// Add additional signatures for [`Self`]. #[must_use] - pub fn sign(self, key_pair: &KeyPair) -> Self { + pub fn sign(self, key_pair: &KeyPair) -> ValidBlock { ValidBlock(self.0.sign(key_pair)) } @@ -443,21 +481,22 @@ mod valid { pub(crate) fn new_dummy() -> Self { BlockBuilder(Chained(BlockPayload { header: BlockHeader { + height: 2, + previous_block_hash: None, + transactions_hash: None, timestamp_ms: 0, + view_change_index: 0, consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION .as_millis() .try_into() - .expect("Should never overflow?"), - height: 2, - view_change_index: 0, - previous_block_hash: None, - transactions_hash: None, + .expect("Time should fit into u64"), }, transactions: Vec::new(), commit_topology: UniqueVec::new(), event_recommendations: Vec::new(), })) .sign(&KeyPair::random()) + .unpack(|_| {}) } /// Check if block's signatures meet requirements for given topology. @@ -628,31 +667,7 @@ mod commit { /// Represents a block accepted by consensus. /// Every [`Self`] will have a different height. #[derive(Debug, Clone)] - pub struct CommittedBlock(pub(crate) ValidBlock); - - impl CommittedBlock { - pub(crate) fn produce_events(&self) -> Vec { - let tx = self.as_ref().transactions().map(|tx| { - let status = tx.error.as_ref().map_or_else( - || PipelineStatus::Committed, - |error| PipelineStatus::Rejected(error.clone().into()), - ); - - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status, - hash: tx.as_ref().hash().into(), - } - }); - let current_block = core::iter::once(PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Committed, - hash: self.as_ref().hash().into(), - }); - - tx.chain(current_block).collect() - } - } + pub struct CommittedBlock(pub(super) ValidBlock); impl From for ValidBlock { fn from(source: CommittedBlock) -> Self { @@ -666,12 +681,105 @@ mod commit { } } - // Invariants of [`CommittedBlock`] can't be violated through immutable reference impl AsRef for CommittedBlock { fn as_ref(&self) -> &SignedBlock { &self.0 .0 } } + + #[cfg(test)] + impl AsMut for CommittedBlock { + fn as_mut(&mut self) -> &mut SignedBlock { + &mut self.0 .0 + } + } +} + +mod event { + use super::*; + + pub trait EventProducer { + fn produce_events(&self) -> impl Iterator; + } + + #[derive(Debug)] + #[must_use] + pub struct WithEvents(B); + + impl WithEvents { + pub(super) fn new(source: B) -> Self { + Self(source) + } + } + + impl WithEvents> { + pub fn unpack(self, f: F) -> Result { + match self.0 { + Ok(ok) => Ok(WithEvents(ok).unpack(f)), + Err(err) => Err(WithEvents(err).unpack(f)), + } + } + } + impl WithEvents { + pub fn unpack(self, f: F) -> B { + self.0.produce_events().for_each(f); + self.0 + } + } + + impl WithEvents<(B, E)> { + pub(crate) fn unpack(self, f: F) -> (B, E) { + self.0 .1.produce_events().for_each(f); + self.0 + } + } + + impl EventProducer for ValidBlock { + fn produce_events(&self) -> impl Iterator { + let block_height = self.as_ref().header().height; + + let tx_events = self.as_ref().transactions().map(move |tx| { + let status = tx.error.as_ref().map_or_else( + || TransactionStatus::Approved, + |error| TransactionStatus::Rejected(error.clone().into()), + ); + + TransactionEvent { + block_height: Some(block_height), + hash: tx.as_ref().hash(), + status, + } + }); + + let block_event = core::iter::once(BlockEvent { + header: self.as_ref().header().clone(), + hash: self.as_ref().hash(), + status: BlockStatus::Approved, + }); + + tx_events + .map(PipelineEventBox::from) + .chain(block_event.map(Into::into)) + } + } + + impl EventProducer for CommittedBlock { + fn produce_events(&self) -> impl Iterator { + let block_event = core::iter::once(BlockEvent { + header: self.as_ref().header().clone(), + hash: self.as_ref().hash(), + status: BlockStatus::Committed, + }); + + block_event.map(Into::into) + } + } + + impl EventProducer for BlockValidationError { + fn produce_events(&self) -> impl Iterator { + core::iter::empty() + } + } } #[cfg(test)] @@ -690,12 +798,13 @@ mod tests { pub fn committed_and_valid_block_hashes_are_equal() { let valid_block = ValidBlock::new_dummy(); let topology = Topology::new(UniqueVec::new()); - let committed_block = valid_block.clone().commit(&topology).unwrap(); + let committed_block = valid_block + .clone() + .commit(&topology) + .unpack(|_| {}) + .unwrap(); - assert_eq!( - valid_block.0.hash_of_payload(), - committed_block.as_ref().hash_of_payload() - ) + assert_eq!(valid_block.0.hash(), committed_block.as_ref().hash()) } #[tokio::test] @@ -733,13 +842,26 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut state_block) - .sign(&alice_keys); + .sign(&alice_keys) + .unpack(|_| {}); // The first transaction should be confirmed - assert!(valid_block.0.transactions().next().unwrap().error.is_none()); + assert!(valid_block + .as_ref() + .transactions() + .next() + .unwrap() + .error + .is_none()); // The second transaction should be rejected - assert!(valid_block.0.transactions().nth(1).unwrap().error.is_some()); + assert!(valid_block + .as_ref() + .transactions() + .nth(1) + .unwrap() + .error + .is_some()); } #[tokio::test] @@ -795,13 +917,26 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut state_block) - .sign(&alice_keys); + .sign(&alice_keys) + .unpack(|_| {}); // The first transaction should fail - assert!(valid_block.0.transactions().next().unwrap().error.is_some()); + assert!(valid_block + .as_ref() + .transactions() + .next() + .unwrap() + .error + .is_some()); // The third transaction should succeed - assert!(valid_block.0.transactions().nth(2).unwrap().error.is_none()); + assert!(valid_block + .as_ref() + .transactions() + .nth(2) + .unwrap() + .error + .is_none()); } #[tokio::test] @@ -852,17 +987,30 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut state_block) - .sign(&alice_keys); + .sign(&alice_keys) + .unpack(|_| {}); // The first transaction should be rejected assert!( - valid_block.0.transactions().next().unwrap().error.is_some(), + valid_block + .as_ref() + .transactions() + .next() + .unwrap() + .error + .is_some(), "The first transaction should be rejected, as it contains `Fail`." ); // The second transaction should be accepted assert!( - valid_block.0.transactions().nth(1).unwrap().error.is_none(), + valid_block + .as_ref() + .transactions() + .nth(1) + .unwrap() + .error + .is_none(), "The second transaction should be accepted." ); } diff --git a/core/src/block_sync.rs b/core/src/block_sync.rs index d2e5c6b7219..ef7f5b8c10a 100644 --- a/core/src/block_sync.rs +++ b/core/src/block_sync.rs @@ -91,16 +91,13 @@ impl BlockSynchronizer { /// Sends request for latest blocks to a chosen peer async fn request_latest_blocks_from_peer(&mut self, peer_id: PeerId) { - let (previous_hash, latest_hash) = { + let (prev_hash, latest_hash) = { let state_view = self.state.view(); - ( - state_view.previous_block_hash(), - state_view.latest_block_hash(), - ) + (state_view.prev_block_hash(), state_view.latest_block_hash()) }; message::Message::GetBlocksAfter(message::GetBlocksAfter::new( latest_hash, - previous_hash, + prev_hash, self.peer_id.clone(), )) .send_to(&self.network, peer_id) @@ -138,7 +135,7 @@ pub mod message { /// Hash of latest available block pub latest_hash: Option>, /// Hash of second to latest block - pub previous_hash: Option>, + pub prev_hash: Option>, /// Peer id pub peer_id: PeerId, } @@ -147,12 +144,12 @@ pub mod message { /// Construct [`GetBlocksAfter`]. pub const fn new( latest_hash: Option>, - previous_hash: Option>, + prev_hash: Option>, peer_id: PeerId, ) -> Self { Self { latest_hash, - previous_hash, + prev_hash, peer_id, } } @@ -190,21 +187,21 @@ pub mod message { match self { Message::GetBlocksAfter(GetBlocksAfter { latest_hash, - previous_hash, + prev_hash, peer_id, }) => { let local_latest_block_hash = block_sync.state.view().latest_block_hash(); if *latest_hash == local_latest_block_hash - || *previous_hash == local_latest_block_hash + || *prev_hash == local_latest_block_hash { return; } - let start_height = match previous_hash { + let start_height = match prev_hash { Some(hash) => match block_sync.kura.get_block_height_by_hash(hash) { None => { - error!(?previous_hash, "Block hash not found"); + error!(?prev_hash, "Block hash not found"); return; } Some(height) => height + 1, // It's get blocks *after*, so we add 1. @@ -223,9 +220,9 @@ pub mod message { // The only case where the blocks array could be empty is if we got queried for blocks // after the latest hash. There is a check earlier in the function that returns early // so it should not be possible for us to get here. - error!(hash=?previous_hash, "Blocks array is empty but shouldn't be."); + error!(hash=?prev_hash, "Blocks array is empty but shouldn't be."); } else { - trace!(hash=?previous_hash, "Sharing blocks after hash"); + trace!(hash=?prev_hash, "Sharing blocks after hash"); Message::ShareBlocks(ShareBlocks::new(blocks, block_sync.peer_id.clone())) .send_to(&block_sync.network, peer_id.clone()) .await; diff --git a/core/src/kura.rs b/core/src/kura.rs index 3dc536f9c2d..69e1cdcbecd 100644 --- a/core/src/kura.rs +++ b/core/src/kura.rs @@ -154,7 +154,7 @@ impl Kura { let mut block_indices = vec![BlockIndex::default(); block_index_count]; block_store.read_block_indices(0, &mut block_indices)?; - let mut previous_block_hash = None; + let mut prev_block_hash = None; for block in block_indices { // This is re-allocated every iteration. This could cause a problem. let mut block_data_buffer = vec![0_u8; block.length.try_into()?]; @@ -162,13 +162,13 @@ impl Kura { match block_store.read_block_data(block.start, &mut block_data_buffer) { Ok(()) => match SignedBlock::decode_all_versioned(&block_data_buffer) { Ok(decoded_block) => { - if previous_block_hash != decoded_block.header().previous_block_hash { + if prev_block_hash != decoded_block.header().previous_block_hash { error!("Block has wrong previous block hash. Not reading any blocks beyond this height."); break; } let decoded_block_hash = decoded_block.hash(); block_hashes.push(decoded_block_hash); - previous_block_hash = Some(decoded_block_hash); + prev_block_hash = Some(decoded_block_hash); } Err(error) => { error!(?error, "Encountered malformed block. Not reading any blocks beyond this height."); diff --git a/core/src/lib.rs b/core/src/lib.rs index ab0b9be0d6b..06a0bd4103f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -18,7 +18,7 @@ use core::time::Duration; use gossiper::TransactionGossip; use indexmap::IndexSet; -use iroha_data_model::prelude::*; +use iroha_data_model::{events::EventBox, prelude::*}; use iroha_primitives::unique_vec::UniqueVec; use parity_scale_codec::{Decode, Encode}; use tokio::sync::broadcast; @@ -41,8 +41,8 @@ pub type PeersIds = UniqueVec; /// Parameters set. pub type Parameters = IndexSet; -/// Type of `Sender` which should be used for channels of `Event` messages. -pub type EventsSender = broadcast::Sender; +/// Type of `Sender` which should be used for channels of `Event` messages. +pub type EventsSender = broadcast::Sender; /// The network message #[derive(Clone, Debug, Encode, Decode)] diff --git a/core/src/queue.rs b/core/src/queue.rs index d463a655a4c..d5ab05b54b7 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -1,6 +1,6 @@ //! Module with queue actor use core::time::Duration; -use std::num::NonZeroUsize; +use std::{num::NonZeroUsize, time::SystemTime}; use crossbeam_queue::ArrayQueue; use dashmap::{mapref::entry::Entry, DashMap}; @@ -8,17 +8,21 @@ use eyre::Result; use indexmap::IndexSet; use iroha_config::parameters::actual::Queue as Config; use iroha_crypto::HashOf; -use iroha_data_model::{account::AccountId, transaction::prelude::*}; +use iroha_data_model::{ + account::AccountId, + events::pipeline::{TransactionEvent, TransactionStatus}, + transaction::prelude::*, +}; use iroha_logger::{trace, warn}; -use iroha_primitives::must_use::MustUse; use rand::seq::IteratorRandom; use thiserror::Error; -use crate::prelude::*; +use crate::{prelude::*, EventsSender}; impl AcceptedTransaction { // TODO: We should have another type of transaction like `CheckedTransaction` in the type system? - fn check_signature_condition(&self, state_view: &StateView<'_>) -> MustUse { + #[must_use] + fn check_signature_condition(&self, state_view: &StateView<'_>) -> bool { let authority = self.as_ref().authority(); let transaction_signatories = self @@ -34,7 +38,7 @@ impl AcceptedTransaction { .map_account(authority, |account| { account.check_signature_check_condition(&transaction_signatories) }) - .unwrap_or(MustUse(false)) + .unwrap_or(false) } /// Check if [`self`] is committed or rejected. @@ -48,6 +52,7 @@ impl AcceptedTransaction { /// Multiple producers, single consumer #[derive(Debug)] pub struct Queue { + events_sender: EventsSender, /// The queue for transactions tx_hashes: ArrayQueue>, /// [`AcceptedTransaction`]s addressed by `Hash` @@ -96,8 +101,9 @@ pub struct Failure { impl Queue { /// Makes queue from configuration - pub fn from_config(cfg: Config) -> Self { + pub fn from_config(cfg: Config, events_sender: EventsSender) -> Self { Self { + events_sender, tx_hashes: ArrayQueue::new(cfg.capacity.get()), accepted_txs: DashMap::new(), txs_per_user: DashMap::new(), @@ -121,13 +127,19 @@ impl Queue { |tx_time_to_live| core::cmp::min(self.tx_time_to_live, tx_time_to_live), ); - iroha_data_model::current_time().saturating_sub(tx_creation_time) > time_limit + let curr_time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time"); + curr_time.saturating_sub(tx_creation_time) > time_limit } /// If `true`, this transaction is regarded to have been tampered to have a future timestamp. fn is_in_future(&self, tx: &AcceptedTransaction) -> bool { let tx_timestamp = tx.as_ref().creation_time(); - tx_timestamp.saturating_sub(iroha_data_model::current_time()) > self.future_threshold + let curr_time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time"); + tx_timestamp.saturating_sub(curr_time) > self.future_threshold } /// Returns all pending transactions. @@ -167,7 +179,7 @@ impl Queue { Err(Error::Expired) } else if tx.is_in_blockchain(state_view) { Err(Error::InBlockchain) - } else if !tx.check_signature_condition(state_view).into_inner() { + } else if !tx.check_signature_condition(state_view) { Err(Error::SignatureCondition) } else { Ok(()) @@ -226,6 +238,14 @@ impl Queue { err: Error::Full, } })?; + let _ = self.events_sender.send( + TransactionEvent { + hash, + block_height: None, + status: TransactionStatus::Queued, + } + .into(), + ); trace!("Transaction queue length = {}", self.tx_hashes.len(),); Ok(()) } @@ -281,12 +301,7 @@ impl Queue { max_txs_in_block: usize, ) -> Vec { let mut transactions = Vec::with_capacity(max_txs_in_block); - self.get_transactions_for_block( - state_view, - max_txs_in_block, - &mut transactions, - &mut Vec::new(), - ); + self.get_transactions_for_block(state_view, max_txs_in_block, &mut transactions); transactions } @@ -298,17 +313,16 @@ impl Queue { state_view: &StateView, max_txs_in_block: usize, transactions: &mut Vec, - expired_transactions: &mut Vec, ) { if transactions.len() >= max_txs_in_block { return; } let mut seen_queue = Vec::new(); - let mut expired_transactions_queue = Vec::new(); + let mut expired_transactions = Vec::new(); let txs_from_queue = core::iter::from_fn(|| { - self.pop_from_queue(&mut seen_queue, state_view, &mut expired_transactions_queue) + self.pop_from_queue(&mut seen_queue, state_view, &mut expired_transactions) }); let transactions_hashes: IndexSet> = @@ -322,7 +336,17 @@ impl Queue { .into_iter() .try_for_each(|hash| self.tx_hashes.push(hash)) .expect("Exceeded the number of transactions pending"); - expired_transactions.extend(expired_transactions_queue); + + expired_transactions + .into_iter() + .map(|tx| TransactionEvent { + hash: tx.as_ref().hash(), + block_height: None, + status: TransactionStatus::Expired, + }) + .for_each(|e| { + let _ = self.events_sender.send(e.into()); + }); } /// Check that the user adhered to the maximum transaction per user limit and increment their transaction count. @@ -368,7 +392,6 @@ pub mod tests { use std::{str::FromStr, sync::Arc, thread, time::Duration}; use iroha_data_model::{prelude::*, transaction::TransactionLimits}; - use iroha_primitives::must_use::MustUse; use rand::Rng as _; use tokio::test; @@ -381,6 +404,21 @@ pub mod tests { PeersIds, }; + impl Queue { + pub fn test(cfg: Config) -> Self { + Self { + events_sender: tokio::sync::broadcast::Sender::new(1), + tx_hashes: ArrayQueue::new(cfg.capacity.get()), + accepted_txs: DashMap::new(), + txs_per_user: DashMap::new(), + capacity: cfg.capacity, + capacity_per_user: cfg.capacity_per_user, + tx_time_to_live: cfg.transaction_time_to_live, + future_threshold: cfg.future_threshold, + } + } + } + fn accepted_tx(account_id: &str, key: &KeyPair) -> AcceptedTransaction { let chain_id = ChainId::from("0"); @@ -437,7 +475,7 @@ pub mod tests { )); let state_view = state.view(); - let queue = Queue::from_config(config_factory()); + let queue = Queue::test(config_factory()); queue .push(accepted_tx("alice@wonderland", &key_pair), &state_view) @@ -458,7 +496,7 @@ pub mod tests { )); let state_view = state.view(); - let queue = Queue::from_config(Config { + let queue = Queue::test(Config { transaction_time_to_live: Duration::from_secs(100), capacity, ..Config::default() @@ -504,7 +542,7 @@ pub mod tests { }; let state_view = state.view(); - let queue = Queue::from_config(config_factory()); + let queue = Queue::test(config_factory()); let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), "alice@wonderland".parse().expect("Valid")) @@ -524,7 +562,7 @@ pub mod tests { // Check that fully signed transaction passes signature check assert!(matches!( fully_signed_tx.check_signature_condition(&state_view), - MustUse(true) + true )); let get_tx = |key_pair| { @@ -534,10 +572,7 @@ pub mod tests { for key_pair in key_pairs { let partially_signed_tx: AcceptedTransaction = get_tx(key_pair); // Check that none of partially signed txs passes signature check - assert_eq!( - partially_signed_tx.check_signature_condition(&state_view), - MustUse(false) - ); + assert!(!partially_signed_tx.check_signature_condition(&state_view),); assert!(matches!( queue .push(partially_signed_tx, &state_view) @@ -560,7 +595,7 @@ pub mod tests { query_handle, )); let state_view = state.view(); - let queue = Queue::from_config(Config { + let queue = Queue::test(Config { transaction_time_to_live: Duration::from_secs(100), ..config_factory() }); @@ -590,7 +625,7 @@ pub mod tests { state_block.transactions.insert(tx.as_ref().hash(), 1); state_block.commit(); let state_view = state.view(); - let queue = Queue::from_config(config_factory()); + let queue = Queue::test(config_factory()); assert!(matches!( queue.push(tx, &state_view), Err(Failure { @@ -613,7 +648,7 @@ pub mod tests { query_handle, ); let tx = accepted_tx("alice@wonderland", &alice_key); - let queue = Queue::from_config(config_factory()); + let queue = Queue::test(config_factory()); queue.push(tx.clone(), &state.view()).unwrap(); let mut state_block = state.block(); state_block.transactions.insert(tx.as_ref().hash(), 1); @@ -639,7 +674,7 @@ pub mod tests { query_handle, )); let state_view = state.view(); - let queue = Queue::from_config(Config { + let queue = Queue::test(Config { transaction_time_to_live: Duration::from_millis(300), ..config_factory() }); @@ -687,7 +722,7 @@ pub mod tests { query_handle, )); let state_view = state.view(); - let queue = Queue::from_config(config_factory()); + let queue = Queue::test(config_factory()); queue .push(accepted_tx("alice@wonderland", &alice_key), &state_view) .expect("Failed to push tx into queue"); @@ -722,7 +757,9 @@ pub mod tests { query_handle, )); let state_view = state.view(); - let queue = Queue::from_config(config_factory()); + let mut queue = Queue::test(config_factory()); + let (event_sender, mut event_receiver) = tokio::sync::broadcast::channel(1); + queue.events_sender = event_sender; let instructions = [Fail { message: "expired".to_owned(), }]; @@ -737,18 +774,39 @@ pub mod tests { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; + let tx_hash = tx.hash(); let tx = AcceptedTransaction::accept(tx, &chain_id, &limits) .expect("Failed to accept Transaction."); queue .push(tx.clone(), &state_view) .expect("Failed to push tx into queue"); + let queued_tx_event = event_receiver.recv().await.unwrap(); + + assert_eq!( + queued_tx_event, + TransactionEvent { + hash: tx_hash, + block_height: None, + status: TransactionStatus::Queued, + } + .into() + ); + let mut txs = Vec::new(); - let mut expired_txs = Vec::new(); thread::sleep(Duration::from_millis(TTL_MS)); - queue.get_transactions_for_block(&state_view, max_txs_in_block, &mut txs, &mut expired_txs); + queue.get_transactions_for_block(&state_view, max_txs_in_block, &mut txs); + let expired_tx_event = event_receiver.recv().await.unwrap(); assert!(txs.is_empty()); - assert_eq!(expired_txs.len(), 1); - assert_eq!(expired_txs[0], tx); + + assert_eq!( + expired_tx_event, + TransactionEvent { + hash: tx_hash, + block_height: None, + status: TransactionStatus::Expired, + } + .into() + ) } #[test] @@ -763,7 +821,7 @@ pub mod tests { query_handle, )); - let queue = Arc::new(Queue::from_config(Config { + let queue = Arc::new(Queue::test(Config { transaction_time_to_live: Duration::from_secs(100), capacity: 100_000_000.try_into().unwrap(), ..Config::default() @@ -837,7 +895,7 @@ pub mod tests { )); let state_view = state.view(); - let queue = Queue::from_config(Config { + let queue = Queue::test(Config { future_threshold, ..Config::default() }); @@ -898,7 +956,7 @@ pub mod tests { let query_handle = LiveQueryStore::test().start(); let state = State::new(world, kura, query_handle); - let queue = Queue::from_config(Config { + let queue = Queue::test(Config { transaction_time_to_live: Duration::from_secs(100), capacity: 100.try_into().unwrap(), capacity_per_user: 1.try_into().unwrap(), diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index 1b8f8715ad8..daf7faae917 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -316,20 +316,24 @@ mod tests { let first_block = BlockBuilder::new(transactions.clone(), topology.clone(), Vec::new()) .chain(0, &mut state_block) .sign(&ALICE_KEYS) + .unpack(|_| {}) .commit(&topology) + .unpack(|_| {}) .expect("Block is valid"); - state_block.apply(&first_block)?; + let _events = state_block.apply(&first_block)?; kura.store_block(first_block); for _ in 1u64..blocks { let block = BlockBuilder::new(transactions.clone(), topology.clone(), Vec::new()) .chain(0, &mut state_block) .sign(&ALICE_KEYS) + .unpack(|_| {}) .commit(&topology) + .unpack(|_| {}) .expect("Block is valid"); - state_block.apply(&block)?; + let _events = state_block.apply(&block)?; kura.store_block(block); } state_block.commit(); @@ -466,10 +470,12 @@ mod tests { let vcb = BlockBuilder::new(vec![va_tx.clone()], topology.clone(), Vec::new()) .chain(0, &mut state_block) .sign(&ALICE_KEYS) + .unpack(|_| {}) .commit(&topology) + .unpack(|_| {}) .expect("Block is valid"); - state_block.apply(&vcb)?; + let _events = state_block.apply(&vcb)?; kura.store_block(vcb); state_block.commit(); diff --git a/core/src/smartcontracts/isi/triggers/set.rs b/core/src/smartcontracts/isi/triggers/set.rs index d7bfca0b769..63d7732e92b 100644 --- a/core/src/smartcontracts/isi/triggers/set.rs +++ b/core/src/smartcontracts/isi/triggers/set.rs @@ -58,8 +58,8 @@ type WasmSmartContractMap = IndexMap, (WasmSmartContra pub struct Set { /// Triggers using [`DataEventFilter`] data_triggers: IndexMap>, - /// Triggers using [`PipelineEventFilter`] - pipeline_triggers: IndexMap>, + /// Triggers using [`PipelineEventFilterBox`] + pipeline_triggers: IndexMap>, /// Triggers using [`TimeEventFilter`] time_triggers: IndexMap>, /// Triggers using [`ExecuteTriggerEventFilter`] @@ -70,7 +70,7 @@ pub struct Set { original_contracts: WasmSmartContractMap, /// List of actions that should be triggered by events provided by `handle_*` methods. /// Vector is used to save the exact triggers order. - matched_ids: Vec<(Event, TriggerId)>, + matched_ids: Vec<(EventBox, TriggerId)>, } /// Helper struct for serializing triggers. @@ -177,7 +177,7 @@ impl<'de> DeserializeSeed<'de> for WasmSeed<'_, Set> { "pipeline_triggers" => { let triggers: IndexMap< TriggerId, - SpecializedAction, + SpecializedAction, > = map.next_value()?; for (id, action) in triggers { set.add_pipeline_trigger( @@ -259,7 +259,7 @@ impl Set { }) } - /// Add trigger with [`PipelineEventFilter`] + /// Add trigger with [`PipelineEventFilterBox`] /// /// Return `false` if a trigger with given id already exists /// @@ -270,7 +270,7 @@ impl Set { pub fn add_pipeline_trigger( &mut self, engine: &wasmtime::Engine, - trigger: SpecializedTrigger, + trigger: SpecializedTrigger, ) -> Result { self.add_to(engine, trigger, TriggeringEventType::Pipeline, |me| { &mut me.pipeline_triggers @@ -721,18 +721,6 @@ impl Set { }; } - /// Handle [`PipelineEvent`]. - /// - /// Find all actions that are triggered by `event` and store them. - /// These actions are inspected in the next [`Set::inspect_matched()`] call. - // Passing by value to follow other `handle_` methods interface - #[allow(clippy::needless_pass_by_value)] - pub fn handle_pipeline_event(&mut self, event: PipelineEvent) { - self.pipeline_triggers.iter().for_each(|entry| { - Self::match_and_insert_trigger(&mut self.matched_ids, event.clone(), entry) - }); - } - /// Handle [`TimeEvent`]. /// /// Find all actions that are triggered by `event` and store them. @@ -747,7 +735,7 @@ impl Set { continue; } - let ids = core::iter::repeat_with(|| (Event::Time(event), id.clone())).take( + let ids = core::iter::repeat_with(|| (EventBox::Time(event), id.clone())).take( count .try_into() .expect("`u32` should always fit in `usize`"), @@ -761,8 +749,8 @@ impl Set { /// Skips insertion: /// - If the action's filter doesn't match an event /// - If the action's repeats count equals to 0 - fn match_and_insert_trigger, F: EventFilter>( - matched_ids: &mut Vec<(Event, TriggerId)>, + fn match_and_insert_trigger, F: EventFilter>( + matched_ids: &mut Vec<(EventBox, TriggerId)>, event: E, (id, action): (&TriggerId, &LoadedAction), ) { @@ -825,7 +813,7 @@ impl Set { } /// Extract `matched_id` - pub fn extract_matched_ids(&mut self) -> Vec<(Event, TriggerId)> { + pub fn extract_matched_ids(&mut self) -> Vec<(EventBox, TriggerId)> { core::mem::take(&mut self.matched_ids) } } diff --git a/core/src/smartcontracts/isi/triggers/specialized.rs b/core/src/smartcontracts/isi/triggers/specialized.rs index 09e898b126d..24aa7b34500 100644 --- a/core/src/smartcontracts/isi/triggers/specialized.rs +++ b/core/src/smartcontracts/isi/triggers/specialized.rs @@ -103,7 +103,7 @@ macro_rules! impl_try_from_box { impl_try_from_box! { Data => DataEventFilter, - Pipeline => PipelineEventFilter, + Pipeline => PipelineEventFilterBox, Time => TimeEventFilter, ExecuteTrigger => ExecuteTriggerEventFilter, } @@ -228,7 +228,7 @@ mod tests { .unwrap() } TriggeringEventFilterBox::Pipeline(_) => { - SpecializedTrigger::::try_from(boxed) + SpecializedTrigger::::try_from(boxed) .map(|_| ()) .unwrap() } diff --git a/core/src/smartcontracts/wasm.rs b/core/src/smartcontracts/wasm.rs index dd8df4bd163..25f27e25675 100644 --- a/core/src/smartcontracts/wasm.rs +++ b/core/src/smartcontracts/wasm.rs @@ -465,7 +465,7 @@ pub mod state { #[derive(Constructor)] pub struct Trigger { /// Event which activated this trigger - pub(in super::super) triggering_event: Event, + pub(in super::super) triggering_event: EventBox, } pub mod executor { @@ -977,7 +977,7 @@ impl<'wrld, 'block: 'wrld, 'state: 'block> Runtime Result<()> { let span = wasm_log_span!("Trigger execution", %id, %authority); let state = state::Trigger::new( diff --git a/core/src/state.rs b/core/src/state.rs index b9291530cbf..20255cdfe51 100644 --- a/core/src/state.rs +++ b/core/src/state.rs @@ -7,7 +7,12 @@ use iroha_crypto::HashOf; use iroha_data_model::{ account::AccountId, block::SignedBlock, - events::trigger_completed::{TriggerCompletedEvent, TriggerCompletedOutcome}, + events::{ + pipeline::BlockEvent, + time::TimeEvent, + trigger_completed::{TriggerCompletedEvent, TriggerCompletedOutcome}, + EventBox, + }, isi::error::{InstructionExecutionError as Error, MathError}, parameter::{Parameter, ParameterValueBox}, permission::{PermissionTokenSchema, Permissions}, @@ -16,7 +21,7 @@ use iroha_data_model::{ role::RoleId, }; use iroha_logger::prelude::*; -use iroha_primitives::{numeric::Numeric, small::SmallVec}; +use iroha_primitives::{must_use::MustUse, numeric::Numeric, small::SmallVec}; use parking_lot::Mutex; use range_bounds::RoleIdByAccountBounds; use serde::{ @@ -95,7 +100,7 @@ pub struct WorldBlock<'world> { /// Runtime Executor pub(crate) executor: CellBlock<'world, Executor>, /// Events produced during execution of block - pub(crate) events_buffer: Vec, + events_buffer: Vec, } /// Struct for single transaction's aggregated changes @@ -126,7 +131,7 @@ pub struct WorldTransaction<'block, 'world> { /// Wrapper for event's buffer to apply transaction rollback struct TransactionEventBuffer<'block> { /// Events produced during execution of block - events_buffer: &'block mut Vec, + events_buffer: &'block mut Vec, /// Number of events produced during execution current transaction events_created_in_transaction: usize, } @@ -285,7 +290,7 @@ impl World { } } - /// Create struct to apply block's changes while reverting changes made in the latest block + /// Create struct to apply block's changes while reverting changes made in the latest block pub fn block_and_revert(&self) -> WorldBlock { WorldBlock { parameters: self.parameters.block_and_revert(), @@ -895,14 +900,14 @@ impl WorldTransaction<'_, '_> { } impl TransactionEventBuffer<'_> { - fn push(&mut self, event: Event) { + fn push(&mut self, event: EventBox) { self.events_created_in_transaction += 1; self.events_buffer.push(event); } } -impl Extend for TransactionEventBuffer<'_> { - fn extend>(&mut self, iter: T) { +impl Extend for TransactionEventBuffer<'_> { + fn extend>(&mut self, iter: T) { let len_before = self.events_buffer.len(); self.events_buffer.extend(iter); let len_after = self.events_buffer.len(); @@ -1024,7 +1029,7 @@ pub trait StateReadOnly { } /// Return the hash of the block one before the latest block - fn previous_block_hash(&self) -> Option> { + fn prev_block_hash(&self) -> Option> { self.block_hashes().iter().nth_back(1).copied() } @@ -1183,13 +1188,10 @@ impl<'state> StateBlock<'state> { deprecated(note = "This function is to be used in testing only. ") )] #[iroha_logger::log(skip_all, fields(block_height))] - pub fn apply(&mut self, block: &CommittedBlock) -> Result<()> { + pub fn apply(&mut self, block: &CommittedBlock) -> Result>> { self.execute_transactions(block)?; debug!("All block transactions successfully executed"); - - self.apply_without_execution(block)?; - - Ok(()) + Ok(self.apply_without_execution(block).into()) } /// Execute `block` transactions and store their hashes as well as @@ -1217,12 +1219,13 @@ impl<'state> StateBlock<'state> { /// Apply transactions without actually executing them. /// It's assumed that block's transaction was already executed (as part of validation for example). #[iroha_logger::log(skip_all, fields(block_height = block.as_ref().header().height))] - pub fn apply_without_execution(&mut self, block: &CommittedBlock) -> Result<()> { + #[must_use] + pub fn apply_without_execution(&mut self, block: &CommittedBlock) -> Vec { let block_hash = block.as_ref().hash(); trace!(%block_hash, "Applying block"); let time_event = self.create_time_event(block); - self.world.events_buffer.push(Event::Time(time_event)); + self.world.events_buffer.push(time_event.into()); let block_height = block.as_ref().header().height; block @@ -1248,24 +1251,44 @@ impl<'state> StateBlock<'state> { self.block_hashes.push(block_hash); self.apply_parameters(); - - Ok(()) + self.world.events_buffer.push( + BlockEvent { + header: block.as_ref().header().clone(), + hash: block.as_ref().hash(), + status: BlockStatus::Applied, + } + .into(), + ); + core::mem::take(&mut self.world.events_buffer) } /// Create time event using previous and current blocks fn create_time_event(&self, block: &CommittedBlock) -> TimeEvent { + use iroha_config::parameters::defaults::chain_wide::{ + DEFAULT_BLOCK_TIME, DEFAULT_COMMIT_TIME, + }; + + const DEFAULT_CONSENSUS_ESTIMATION: Duration = + match DEFAULT_BLOCK_TIME.checked_add(match DEFAULT_COMMIT_TIME.checked_div(2) { + Some(x) => x, + None => unreachable!(), + }) { + Some(x) => x, + None => unreachable!(), + }; + let prev_interval = self.latest_block_ref().map(|latest_block| { let header = &latest_block.as_ref().header(); TimeInterval { since: header.timestamp(), - length: header.consensus_estimation(), + length: DEFAULT_CONSENSUS_ESTIMATION, } }); let interval = TimeInterval { since: block.as_ref().header().timestamp(), - length: block.as_ref().header().consensus_estimation(), + length: DEFAULT_CONSENSUS_ESTIMATION, }; TimeEvent { @@ -1388,7 +1411,7 @@ impl StateTransaction<'_, '_> { &mut self, id: &TriggerId, action: &dyn LoadedActionTrait, - event: Event, + event: EventBox, ) -> Result<()> { use triggers::set::LoadedExecutable::*; let authority = action.authority(); @@ -1751,7 +1774,7 @@ mod tests { /// Used to inject faulty payload for testing fn payload_mut(block: &mut CommittedBlock) -> &mut BlockPayload { - let SignedBlock::V1(signed) = &mut block.0 .0; + let SignedBlock::V1(signed) = block.as_mut(); &mut signed.payload } @@ -1760,7 +1783,10 @@ mod tests { const BLOCK_CNT: usize = 10; let topology = Topology::new(UniqueVec::new()); - let block = ValidBlock::new_dummy().commit(&topology).unwrap(); + let block = ValidBlock::new_dummy() + .commit(&topology) + .unpack(|_| {}) + .unwrap(); let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); let state = State::new(World::default(), kura, query_handle); @@ -1774,7 +1800,7 @@ mod tests { payload_mut(&mut block).header.previous_block_hash = block_hashes.last().copied(); block_hashes.push(block.as_ref().hash()); - state_block.apply(&block).unwrap(); + let _events = state_block.apply(&block).unwrap(); } assert!(state_block @@ -1788,7 +1814,10 @@ mod tests { const BLOCK_CNT: usize = 10; let topology = Topology::new(UniqueVec::new()); - let block = ValidBlock::new_dummy().commit(&topology).unwrap(); + let block = ValidBlock::new_dummy() + .commit(&topology) + .unpack(|_| {}) + .unwrap(); let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); let state = State::new(World::default(), kura.clone(), query_handle); @@ -1798,7 +1827,7 @@ mod tests { let mut block = block.clone(); payload_mut(&mut block).header.height = i as u64; - state_block.apply(&block).unwrap(); + let _events = state_block.apply(&block).unwrap(); kura.store_block(block); } @@ -1806,7 +1835,7 @@ mod tests { &state_block .all_blocks() .skip(7) - .map(|block| *block.header().height()) + .map(|block| block.header().height()) .collect::>(), &[8, 9, 10] ); diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index 13bb94bb01a..df7e925a220 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -2,10 +2,7 @@ use std::sync::mpsc; use iroha_crypto::HashOf; -use iroha_data_model::{ - block::*, events::pipeline::PipelineEvent, peer::PeerId, - transaction::error::TransactionRejectionReason, -}; +use iroha_data_model::{block::*, events::pipeline::PipelineEventBox, peer::PeerId}; use iroha_p2p::UpdateTopology; use tracing::{span, Level}; @@ -82,17 +79,19 @@ impl Sumeragi { #[allow(clippy::needless_pass_by_value, single_use_lifetimes)] // TODO: uncomment when anonymous lifetimes are stable fn broadcast_packet_to<'peer_id>( &self, - msg: BlockMessage, + msg: impl Into, ids: impl IntoIterator + Send, ) { + let msg = msg.into(); + for peer_id in ids { self.post_packet_to(msg.clone(), peer_id); } } - fn broadcast_packet(&self, msg: BlockMessage) { + fn broadcast_packet(&self, msg: impl Into) { let broadcast = iroha_p2p::Broadcast { - data: NetworkMessage::SumeragiBlock(Box::new(msg)), + data: NetworkMessage::SumeragiBlock(Box::new(msg.into())), }; self.network.broadcast(broadcast); } @@ -116,17 +115,8 @@ impl Sumeragi { self.block_time + self.commit_time } - fn send_events(&self, events: impl IntoIterator>) { - let addr = &self.peer_id.address; - - if self.events_sender.receiver_count() > 0 { - for event in events { - self.events_sender - .send(event.into()) - .map_err(|err| warn!(%addr, ?err, "Event not sent")) - .unwrap_or(0); - } - } + fn send_event(&self, event: impl Into) { + let _ = self.events_sender.send(event.into()); } fn receive_network_packet( @@ -239,13 +229,15 @@ impl Sumeragi { &self.chain_id, &mut state_block, ) + .unpack(|e| self.send_event(e)) .and_then(|block| { block .commit(&self.current_topology) + .unpack(|e| self.send_event(e)) .map_err(|(block, error)| (block.into(), error)) }) { Ok(block) => block, - Err((_, error)) => { + Err(error) => { error!(?error, "Received invalid genesis block"); continue; } @@ -280,12 +272,14 @@ impl Sumeragi { let mut state_block = state.block(); let genesis = BlockBuilder::new(transactions, self.current_topology.clone(), vec![]) .chain(0, &mut state_block) - .sign(&self.key_pair); + .sign(&self.key_pair) + .unpack(|e| self.send_event(e)); - let genesis_msg = BlockCreated::from(genesis.clone()).into(); + let genesis_msg = BlockCreated::from(genesis.clone()); let genesis = genesis .commit(&self.current_topology) + .unpack(|e| self.send_event(e)) .expect("Genesis invalid"); assert!( @@ -319,24 +313,18 @@ impl Sumeragi { info!( addr=%self.peer_id.address, role=%self.current_topology.role(&self.peer_id), - block_height=%state_block.height(), + block_height=%block.as_ref().header().height, block_hash=%block.as_ref().hash(), "{}", Strategy::LOG_MESSAGE, ); - state_block - .apply_without_execution(&block) - .expect("Failed to apply block on state. Bailing."); - - let state_events = core::mem::take(&mut state_block.world.events_buffer); - self.send_events(state_events); + let state_events = state_block.apply_without_execution(&block); let new_topology = Topology::recreate_topology( block.as_ref(), 0, state_block.world.peers().cloned().collect(), ); - let events = block.produce_events(); // https://github.com/hyperledger/iroha/issues/3396 // Kura should store the block only upon successful application to the internal state to avoid storing a corrupted block. @@ -346,6 +334,7 @@ impl Sumeragi { // Parameters are updated before updating public copy of sumeragi self.update_params(&state_block); self.cache_transaction(&state_block); + self.current_topology = new_topology; self.connect_peers(&self.current_topology); @@ -353,7 +342,7 @@ impl Sumeragi { state_block.commit(); // NOTE: This sends "Block committed" event, // so it should be done AFTER public facing state update - self.send_events(events); + state_events.into_iter().for_each(|e| self.send_event(e)); } fn update_params(&mut self, state_block: &StateBlock<'_>) { @@ -385,22 +374,23 @@ impl Sumeragi { topology: &Topology, BlockCreated { block }: BlockCreated, ) -> Option> { - let block_hash = block.hash_of_payload(); + let block_hash = block.hash(); let addr = &self.peer_id.address; let role = self.current_topology.role(&self.peer_id); - trace!(%addr, %role, block_hash=%block_hash, "Block received, voting..."); + trace!(%addr, %role, block=%block_hash, "Block received, voting..."); let mut state_block = state.block(); - let block = match ValidBlock::validate(block, topology, &self.chain_id, &mut state_block) { + let block = match ValidBlock::validate(block, topology, &self.chain_id, &mut state_block) + .unpack(|e| self.send_event(e)) + { Ok(block) => block, - Err((_, error)) => { + Err(error) => { warn!(%addr, %role, ?error, "Block validation failed"); return None; } }; let signed_block = block.sign(&self.key_pair); - Some(VotingBlock::new(signed_block, state_block)) } @@ -434,30 +424,30 @@ impl Sumeragi { match (message, role) { (BlockMessage::BlockSyncUpdate(BlockSyncUpdate { block }), _) => { let block_hash = block.hash(); - info!(%addr, %role, hash=%block_hash, "Block sync update received"); + info!(%addr, %role, block=%block_hash, "Block sync update received"); // Release writer before handling block sync let _ = voting_block.take(); - match handle_block_sync(&self.chain_id, block, state) { + match handle_block_sync(&self.chain_id, block, state, &|e| self.send_event(e)) { Ok(BlockSyncOk::CommitBlock(block, state_block)) => { - self.commit_block(block, state_block) + self.commit_block(block, state_block); } Ok(BlockSyncOk::ReplaceTopBlock(block, state_block)) => { warn!( %addr, %role, peer_latest_block_hash=?state_block.latest_block_hash(), peer_latest_block_view_change_index=?state_block.latest_block_view_change_index(), - consensus_latest_block_hash=%block.as_ref().hash(), + consensus_latest_block=%block.as_ref().hash(), consensus_latest_block_view_change_index=%block.as_ref().header().view_change_index, "Soft fork occurred: peer in inconsistent state. Rolling back and replacing top block." ); self.replace_top_block(block, state_block) } Err((_, BlockSyncError::BlockNotValid(error))) => { - error!(%addr, %role, %block_hash, ?error, "Block not valid.") + error!(%addr, %role, block=%block_hash, ?error, "Block not valid.") } Err((_, BlockSyncError::SoftForkBlockNotValid(error))) => { - error!(%addr, %role, %block_hash, ?error, "Soft-fork block not valid.") + error!(%addr, %role, block=%block_hash, ?error, "Soft-fork block not valid.") } Err(( _, @@ -470,7 +460,7 @@ impl Sumeragi { %addr, %role, peer_latest_block_hash=?state.view().latest_block_hash(), peer_latest_block_view_change_index=?peer_view_change_index, - consensus_latest_block_hash=%block_hash, + consensus_latest_block=%block_hash, consensus_latest_block_view_change_index=%block_view_change_index, "Soft fork doesn't occurred: block has the same or smaller view change index" ); @@ -496,28 +486,30 @@ impl Sumeragi { { error!(%addr, %role, "Received BlockCommitted message, but shouldn't"); } else if let Some(voted_block) = voting_block.take() { - let voting_block_hash = voted_block.block.as_ref().hash_of_payload(); - - if hash == voting_block_hash { - match voted_block - .block - .commit_with_signatures(current_topology, signatures) - { - Ok(committed_block) => { - self.commit_block(committed_block, voted_block.state_block) - } - Err((_, error)) => { - error!(%addr, %role, %hash, ?error, "Block failed to be committed") - } - }; - } else { - error!( - %addr, %role, committed_block_hash=%hash, %voting_block_hash, - "The hash of the committed block does not match the hash of the block stored by the peer." - ); - - *voting_block = Some(voted_block); - }; + match voted_block + .block + .commit_with_signatures(current_topology, signatures, hash) + .unpack(|e| self.send_event(e)) + { + Ok(committed_block) => { + self.commit_block(committed_block, voted_block.state_block) + } + Err(( + valid_block, + BlockValidationError::IncorrectHash { expected, actual }, + )) => { + error!(%addr, %role, %expected, %actual, "The hash of the committed block does not match the hash of the block stored by the peer."); + + *voting_block = Some(VotingBlock { + voted_at: voted_block.voted_at, + block: valid_block, + state_block: voted_block.state_block, + }); + } + Err((_, error)) => { + error!(%addr, %role, %hash, ?error, "Block failed to be committed") + } + } } else { error!(%addr, %role, %hash, "Peer missing voting block") } @@ -531,20 +523,19 @@ impl Sumeragi { let _ = voting_block.take(); if let Some(v_block) = self.vote_for_block(state, ¤t_topology, block_created) { - let block_hash = v_block.block.as_ref().hash_of_payload(); - - let msg = BlockSigned::from(v_block.block.clone()).into(); + let block_hash = v_block.block.as_ref().hash(); + let msg = BlockSigned::from(&v_block.block); self.broadcast_packet_to(msg, [current_topology.proxy_tail()]); - info!(%addr, %block_hash, "Block validated, signed and forwarded"); + info!(%addr, block=%block_hash, "Block validated, signed and forwarded"); *voting_block = Some(v_block); } } (BlockMessage::BlockCreated(block_created), Role::ObservingPeer) => { let current_topology = current_topology.is_consensus_required().expect( - "Peer has `ObservingPeer` role, which mean that current topology require consensus", - ); + "Peer has `ObservingPeer` role, which mean that current topology require consensus" + ); // Release block writer before creating new one let _ = voting_block.take(); @@ -554,10 +545,10 @@ impl Sumeragi { let block_hash = v_block.block.as_ref().hash(); self.broadcast_packet_to( - BlockSigned::from(v_block.block.clone()).into(), + BlockSigned::from(&v_block.block), [current_topology.proxy_tail()], ); - info!(%addr, %block_hash, "Block validated, signed and forwarded"); + info!(%addr, block=%block_hash, "Block validated, signed and forwarded"); *voting_block = Some(v_block); } else { error!(%addr, %role, "Received BlockCreated message, but shouldn't"); @@ -641,33 +632,35 @@ impl Sumeragi { event_recommendations, ) .chain(current_view_change_index, &mut state_block) - .sign(&self.key_pair); + .sign(&self.key_pair) + .unpack(|e| self.send_event(e)); let created_in = create_block_start_time.elapsed(); if let Some(current_topology) = current_topology.is_consensus_required() { - info!(%addr, created_in_ms=%created_in.as_millis(), block_payload_hash=%new_block.as_ref().hash_of_payload(), "Block created"); + info!(%addr, created_in_ms=%created_in.as_millis(), block=%new_block.as_ref().hash(), "Block created"); if created_in > self.pipeline_time() / 2 { warn!("Creating block takes too much time. This might prevent consensus from operating. Consider increasing `commit_time` or decreasing `max_transactions_in_block`"); } *voting_block = Some(VotingBlock::new(new_block.clone(), state_block)); - let msg = BlockCreated::from(new_block).into(); + let msg = BlockCreated::from(new_block); if current_view_change_index >= 1 { self.broadcast_packet(msg); } else { self.broadcast_packet_to(msg, current_topology.voting_peers()); } } else { - match new_block.commit(current_topology) { + match new_block + .commit(current_topology) + .unpack(|e| self.send_event(e)) + { Ok(committed_block) => { - self.broadcast_packet( - BlockCommitted::from(committed_block.clone()).into(), - ); + self.broadcast_packet(BlockCommitted::from(&committed_block)); self.commit_block(committed_block, state_block); } - Err((_, error)) => error!(%addr, role=%Role::Leader, ?error), - } + Err(error) => error!(%addr, role=%Role::Leader, ?error), + }; } } } @@ -677,12 +670,15 @@ impl Sumeragi { let voted_at = voted_block.voted_at; let state_block = voted_block.state_block; - match voted_block.block.commit(current_topology) { + match voted_block + .block + .commit(current_topology) + .unpack(|e| self.send_event(e)) + { Ok(committed_block) => { - info!(voting_block_hash = %committed_block.as_ref().hash(), "Block reached required number of votes"); - - let msg = BlockCommitted::from(committed_block.clone()).into(); + info!(block=%committed_block.as_ref().hash(), "Block reached required number of votes"); + let msg = BlockCommitted::from(&committed_block); let current_topology = current_topology .is_consensus_required() .expect("Peer has `ProxyTail` role, which mean that current topology require consensus"); @@ -863,14 +859,11 @@ pub(crate) fn run( expired }); - let mut expired_transactions = Vec::new(); sumeragi.queue.get_transactions_for_block( &state_view, sumeragi.max_txs_in_block, &mut sumeragi.transaction_cache, - &mut expired_transactions, ); - sumeragi.send_events(expired_transactions.iter().map(expired_event)); let current_view_change_index = sumeragi .prune_view_change_proofs_and_calculate_current_index( @@ -928,7 +921,7 @@ pub(crate) fn run( if node_expects_block { if let Some(VotingBlock { block, .. }) = voting_block.as_ref() { // NOTE: Suspecting the tail node because it hasn't yet committed a block produced by leader - warn!(peer_public_key=%sumeragi.peer_id.public_key, %role, block=%block.as_ref().hash_of_payload(), "Block not committed in due time, requesting view change..."); + warn!(peer_public_key=%sumeragi.peer_id.public_key, %role, block=%block.as_ref().hash(), "Block not committed in due time, requesting view change..."); } else { // NOTE: Suspecting the leader node because it hasn't produced a block // If the current node has a transaction, the leader should have as well @@ -1001,18 +994,6 @@ fn add_signatures( } } -/// Create expired pipeline event for the given transaction. -fn expired_event(txn: &AcceptedTransaction) -> Event { - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Rejected(PipelineRejectionReason::Transaction( - TransactionRejectionReason::Expired, - )), - hash: txn.as_ref().hash().into(), - } - .into() -} - /// Type enumerating early return types to reduce cyclomatic /// complexity of the main loop items and allow direct short /// circuiting with the `?` operator. Candidate for `impl @@ -1092,10 +1073,11 @@ enum BlockSyncError { }, } -fn handle_block_sync<'state>( +fn handle_block_sync<'state, F: Fn(PipelineEventBox)>( chain_id: &ChainId, block: SignedBlock, state: &'state State, + handle_events: &F, ) -> Result, (SignedBlock, BlockSyncError)> { let block_height = block.header().height; let state_height = state.view().height(); @@ -1111,9 +1093,11 @@ fn handle_block_sync<'state>( Topology::recreate_topology(&last_committed_block, view_change_index, new_peers) }; ValidBlock::validate(block, &topology, chain_id, &mut state_block) + .unpack(handle_events) .and_then(|block| { block .commit(&topology) + .unpack(handle_events) .map_err(|(block, err)| (block.into(), err)) }) .map(|block| BlockSyncOk::CommitBlock(block, state_block)) @@ -1144,9 +1128,11 @@ fn handle_block_sync<'state>( Topology::recreate_topology(&last_committed_block, view_change_index, new_peers) }; ValidBlock::validate(block, &topology, chain_id, &mut state_block) + .unpack(handle_events) .and_then(|block| { block .commit(&topology) + .unpack(handle_events) .map_err(|(block, err)| (block.into(), err)) }) .map_err(|(block, error)| (block, BlockSyncError::SoftForkBlockNotValid(error))) @@ -1214,10 +1200,14 @@ mod tests { // Creating a block of two identical transactions and validating it let block = BlockBuilder::new(vec![tx.clone(), tx], topology.clone(), Vec::new()) .chain(0, &mut state_block) - .sign(leader_key_pair); - - let genesis = block.commit(topology).expect("Block is valid"); - state_block.apply(&genesis).expect("Failed to apply block"); + .sign(leader_key_pair) + .unpack(|_| {}); + + let genesis = block + .commit(topology) + .unpack(|_| {}) + .expect("Block is valid"); + let _events = state_block.apply(&genesis).expect("Failed to apply block"); state_block.commit(); kura.store_block(genesis); @@ -1256,6 +1246,7 @@ mod tests { BlockBuilder::new(vec![tx1, tx2], topology.clone(), Vec::new()) .chain(0, &mut state_block) .sign(leader_key_pair) + .unpack(|_| {}) }; (state, kura, block.into()) @@ -1276,7 +1267,7 @@ mod tests { // Malform block to make it invalid payload_mut(&mut block).commit_topology.clear(); - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!(result, Err((_, BlockSyncError::BlockNotValid(_))))) } @@ -1292,12 +1283,14 @@ mod tests { let (state, kura, mut block) = create_data_for_test(&chain_id, &topology, &leader_key_pair); let mut state_block = state.block(); - let validated_block = - ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block).unwrap(); - let committed_block = validated_block.commit(&topology).expect("Block is valid"); - state_block - .apply_without_execution(&committed_block) - .expect("Failed to apply block"); + let committed_block = + ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block) + .unpack(|_| {}) + .unwrap() + .commit(&topology) + .unpack(|_| {}) + .expect("Block is valid"); + let _events = state_block.apply_without_execution(&committed_block); state_block.commit(); kura.store_block(committed_block); @@ -1305,7 +1298,7 @@ mod tests { payload_mut(&mut block).commit_topology.clear(); payload_mut(&mut block).header.view_change_index = 1; - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!( result, Err((_, BlockSyncError::SoftForkBlockNotValid(_))) @@ -1324,7 +1317,7 @@ mod tests { // Change block height payload_mut(&mut block).header.height = 42; - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!( result, Err(( @@ -1348,7 +1341,7 @@ mod tests { leader_key_pair.public_key().clone(), )]); let (state, _, block) = create_data_for_test(&chain_id, &topology, &leader_key_pair); - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!(result, Ok(BlockSyncOk::CommitBlock(_, _)))) } @@ -1364,12 +1357,14 @@ mod tests { let (state, kura, mut block) = create_data_for_test(&chain_id, &topology, &leader_key_pair); let mut state_block = state.block(); - let validated_block = - ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block).unwrap(); - let committed_block = validated_block.commit(&topology).expect("Block is valid"); - state_block - .apply_without_execution(&committed_block) - .expect("Failed to apply block"); + let committed_block = + ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block) + .unpack(|_| {}) + .unwrap() + .commit(&topology) + .unpack(|_| {}) + .expect("Block is valid"); + let _events = state_block.apply_without_execution(&committed_block); state_block.commit(); kura.store_block(committed_block); @@ -1378,7 +1373,7 @@ mod tests { // Increase block view change index payload_mut(&mut block).header.view_change_index = 42; - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!(result, Ok(BlockSyncOk::ReplaceTopBlock(_, _)))) } @@ -1397,12 +1392,14 @@ mod tests { payload_mut(&mut block).header.view_change_index = 42; let mut state_block = state.block(); - let validated_block = - ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block).unwrap(); - let committed_block = validated_block.commit(&topology).expect("Block is valid"); - state_block - .apply_without_execution(&committed_block) - .expect("Failed to apply block"); + let committed_block = + ValidBlock::validate(block.clone(), &topology, &chain_id, &mut state_block) + .unpack(|_| {}) + .unwrap() + .commit(&topology) + .unpack(|_| {}) + .expect("Block is valid"); + let _events = state_block.apply_without_execution(&committed_block); state_block.commit(); kura.store_block(committed_block); assert_eq!(state.view().latest_block_view_change_index(), 42); @@ -1410,7 +1407,7 @@ mod tests { // Decrease block view change index back payload_mut(&mut block).header.view_change_index = 0; - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!( result, Err(( @@ -1437,7 +1434,7 @@ mod tests { payload_mut(&mut block).header.view_change_index = 42; payload_mut(&mut block).header.height = 1; - let result = handle_block_sync(&chain_id, block, &state); + let result = handle_block_sync(&chain_id, block, &state, &|_| {}); assert!(matches!( result, Err(( diff --git a/core/src/sumeragi/message.rs b/core/src/sumeragi/message.rs index b0a80207072..c5d4fa27fa7 100644 --- a/core/src/sumeragi/message.rs +++ b/core/src/sumeragi/message.rs @@ -62,14 +62,14 @@ pub struct BlockSigned { pub signatures: SignaturesOf, } -impl From for BlockSigned { - fn from(block: ValidBlock) -> Self { +impl From<&ValidBlock> for BlockSigned { + fn from(block: &ValidBlock) -> Self { let block_hash = block.as_ref().hash_of_payload(); - let SignedBlock::V1(block) = block.into(); + let block_signatures = block.as_ref().signatures().clone(); Self { hash: block_hash, - signatures: block.signatures, + signatures: block_signatures, } } } @@ -79,14 +79,14 @@ impl From for BlockSigned { #[non_exhaustive] pub struct BlockCommitted { /// Hash of the block being signed. - pub hash: HashOf, + pub hash: HashOf, /// Set of signatures. pub signatures: SignaturesOf, } -impl From for BlockCommitted { - fn from(block: CommittedBlock) -> Self { - let block_hash = block.as_ref().hash_of_payload(); +impl From<&CommittedBlock> for BlockCommitted { + fn from(block: &CommittedBlock) -> Self { + let block_hash = block.as_ref().hash(); let block_signatures = block.as_ref().signatures().clone(); Self { diff --git a/core/src/sumeragi/mod.rs b/core/src/sumeragi/mod.rs index 1e10895b992..f59a7ee6259 100644 --- a/core/src/sumeragi/mod.rs +++ b/core/src/sumeragi/mod.rs @@ -4,7 +4,7 @@ use std::{ fmt::{self, Debug, Formatter}, sync::{mpsc, Arc}, - time::{Duration, Instant}, + time::{Duration, Instant, SystemTime}, }; use eyre::{Result, WrapErr as _}; @@ -129,9 +129,13 @@ impl SumeragiHandle { #[allow(clippy::cast_possible_truncation)] if let Some(timestamp) = state_view.genesis_timestamp() { + let curr_time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time"); + // this will overflow in 584942417years. self.metrics.uptime_since_genesis_ms.set( - (current_time() - timestamp) + (curr_time - timestamp) .as_millis() .try_into() .expect("Timestamp should fit into u64"), @@ -193,24 +197,33 @@ impl SumeragiHandle { chain_id: &ChainId, block: &SignedBlock, state_block: &mut StateBlock<'_>, + events_sender: &EventsSender, mut current_topology: Topology, ) -> Topology { // NOTE: topology need to be updated up to block's view_change_index current_topology.rotate_all_n(block.header().view_change_index); let block = ValidBlock::validate(block.clone(), ¤t_topology, chain_id, state_block) - .expect("Kura blocks should be valid") + .unpack(|e| { + let _ = events_sender.send(e.into()); + }) + .expect("Kura: Invalid block") .commit(¤t_topology) - .expect("Kura blocks should be valid"); + .unpack(|e| { + let _ = events_sender.send(e.into()); + }) + .expect("Kura: Invalid block"); if block.as_ref().header().is_genesis() { *state_block.world.trusted_peers_ids = block.as_ref().commit_topology().clone(); } - state_block.apply_without_execution(&block).expect( - "Block application in init should not fail. \ - Blocks loaded from kura assumed to be valid", - ); + state_block + .apply_without_execution(&block) + .into_iter() + .for_each(|e| { + let _ = events_sender.send(e); + }); Topology::recreate_topology( block.as_ref(), @@ -278,6 +291,7 @@ impl SumeragiHandle { &common_config.chain_id, &block, &mut state_block, + &events_sender, current_topology, ); state_block.commit(); @@ -356,16 +370,21 @@ pub const PEERS_CONNECT_INTERVAL: Duration = Duration::from_secs(1); pub const TELEMETRY_INTERVAL: Duration = Duration::from_secs(5); /// Structure represents a block that is currently in discussion. -#[non_exhaustive] pub struct VotingBlock<'state> { + /// Valid Block + block: ValidBlock, /// At what time has this peer voted for this block pub voted_at: Instant, - /// Valid Block - pub block: ValidBlock, /// [`WorldState`] after applying transactions to it but before it was committed pub state_block: StateBlock<'state>, } +impl AsRef for VotingBlock<'_> { + fn as_ref(&self) -> &ValidBlock { + &self.block + } +} + impl VotingBlock<'_> { /// Construct new `VotingBlock` with current time. pub fn new(block: ValidBlock, state_block: StateBlock<'_>) -> VotingBlock { @@ -382,8 +401,8 @@ impl VotingBlock<'_> { voted_at: Instant, ) -> VotingBlock { VotingBlock { - voted_at, block, + voted_at, state_block, } } diff --git a/core/src/tx.rs b/core/src/tx.rs index 26e0858a4e5..1ae6b4b0b2b 100644 --- a/core/src/tx.rs +++ b/core/src/tx.rs @@ -14,7 +14,7 @@ pub use iroha_data_model::prelude::*; use iroha_data_model::{ isi::error::Mismatch, query::error::FindError, - transaction::{error::TransactionLimitError, TransactionLimits}, + transaction::{error::TransactionLimitError, TransactionLimits, TransactionPayload}, }; use iroha_genesis::GenesisTransaction; use iroha_logger::{debug, error}; diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index 012f475eda0..df2843afa06 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -14,7 +14,7 @@ use iroha_client::{ }; use iroha_config::parameters::actual::Root as Config; pub use iroha_core::state::StateReadOnly; -use iroha_crypto::prelude::*; +use iroha_crypto::KeyPair; use iroha_data_model::{query::QueryOutputBox, ChainId}; use iroha_genesis::{GenesisNetwork, RawGenesisBlockFile}; use iroha_logger::InstrumentFutures; @@ -54,11 +54,11 @@ pub fn get_chain_id() -> ChainId { /// Get a standardised key-pair from the hard-coded literals. pub fn get_key_pair() -> KeyPair { KeyPair::new( - PublicKey::from_str( + iroha_crypto::PublicKey::from_str( "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0", ).unwrap(), - PrivateKey::from_hex( - Algorithm::Ed25519, + iroha_crypto::PrivateKey::from_hex( + iroha_crypto::Algorithm::Ed25519, "9AC47ABF59B356E0BD7DCBBBB4DEC080E302156A48CA907E47CB6AEA1D32719E7233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" ).unwrap() ).unwrap() @@ -689,7 +689,7 @@ pub trait TestClient: Sized { fn test_with_account(api_url: &SocketAddr, keys: KeyPair, account_id: &AccountId) -> Self; /// Loop for events with filter and handler function - fn for_each_event(self, event_filter: impl Into, f: impl Fn(Result)); + fn for_each_event(self, event_filter: impl Into, f: impl Fn(Result)); /// Submit instruction with polling /// @@ -828,9 +828,9 @@ impl TestClient for Client { Client::new(config) } - fn for_each_event(self, event_filter: impl Into, f: impl Fn(Result)) { + fn for_each_event(self, event_filter: impl Into, f: impl Fn(Result)) { for event_result in self - .listen_for_events(event_filter) + .listen_for_events([event_filter]) .expect("Failed to create event iterator.") { f(event_result) diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index f1662780479..aafd7868459 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -27,8 +27,6 @@ use alloc::{ }; use core::{borrow::Borrow, fmt, str::FromStr}; -#[cfg(feature = "base64")] -pub use base64; #[cfg(not(feature = "ffi_import"))] pub use blake2; use derive_more::Display; @@ -857,11 +855,6 @@ mod ffi { pub(crate) use ffi_item; } -/// The prelude re-exports most commonly used items from this crate. -pub mod prelude { - pub use super::{Algorithm, Hash, KeyPair, PrivateKey, PublicKey, Signature}; -} - #[cfg(test)] mod tests { use parity_scale_codec::{Decode, Encode}; diff --git a/crypto/src/signature/mod.rs b/crypto/src/signature/mod.rs index b22eac891a7..66c75ef8e71 100644 --- a/crypto/src/signature/mod.rs +++ b/crypto/src/signature/mod.rs @@ -55,7 +55,7 @@ ffi::ffi_item! { public_key: PublicKey, /// Signature payload #[serde_as(as = "serde_with::hex::Hex")] - payload: ConstVec, + payload: ConstVec, } } diff --git a/data_model/derive/src/enum_ref.rs b/data_model/derive/src/enum_ref.rs index 8215be75795..eefb58fab78 100644 --- a/data_model/derive/src/enum_ref.rs +++ b/data_model/derive/src/enum_ref.rs @@ -151,7 +151,7 @@ impl ToTokens for EnumRef { quote! { #attrs - pub(crate) enum #ident<'a> #impl_generics #where_clause { + pub(super) enum #ident<'a> #impl_generics #where_clause { #(#variants),* } } diff --git a/data_model/derive/src/lib.rs b/data_model/derive/src/lib.rs index 384ca813542..32b11ecee39 100644 --- a/data_model/derive/src/lib.rs +++ b/data_model/derive/src/lib.rs @@ -15,35 +15,39 @@ use proc_macro2::TokenStream; /// # Example /// /// ``` -/// use iroha_data_model_derive::EnumRef; -/// use parity_scale_codec::Encode; -/// -/// #[derive(EnumRef)] -/// #[enum_ref(derive(Encode))] -/// pub enum InnerEnum { -/// A(u32), -/// B(i32) -/// } +/// mod model { +/// use iroha_data_model_derive::EnumRef; +/// use parity_scale_codec::Encode; +/// +/// #[derive(EnumRef)] +/// #[enum_ref(derive(Encode))] +/// pub enum InnerEnum { +/// A(u32), +/// B(i32) +/// } /// -/// #[derive(EnumRef)] -/// #[enum_ref(derive(Encode))] -/// pub enum OuterEnum { -/// A(String), -/// #[enum_ref(transparent)] -/// B(InnerEnum), +/// #[derive(EnumRef)] +/// #[enum_ref(derive(Encode))] +/// pub enum OuterEnum { +/// A(String), +/// #[enum_ref(transparent)] +/// B(InnerEnum), +/// } /// } /// /// /* will produce: -/// #[derive(Encode)] -/// pub(crate) enum InnerEnumRef<'a> { -/// A(&'a u32), -/// B(&'a i32), -/// } +/// mod model { +/// #[derive(Encode)] +/// pub(super) enum InnerEnumRef<'a> { +/// A(&'a u32), +/// B(&'a i32), +/// } /// -/// #[derive(Encode)] -/// pub(crate) enum OuterEnumRef<'a> { -/// A(&'a String), -/// B(InnerEnumRef<'a>), +/// #[derive(Encode)] +/// pub(super) enum OuterEnumRef<'a> { +/// A(&'a String), +/// B(InnerEnumRef<'a>), +/// } /// } /// */ /// ``` diff --git a/data_model/derive/src/model.rs b/data_model/derive/src/model.rs index a5fdb7a7510..0547fc99ab7 100644 --- a/data_model/derive/src/model.rs +++ b/data_model/derive/src/model.rs @@ -7,7 +7,6 @@ use syn::{parse_quote, Attribute}; pub fn impl_model(emitter: &mut Emitter, input: &syn::ItemMod) -> TokenStream { let syn::ItemMod { attrs, - vis, mod_token, ident, content, @@ -15,14 +14,6 @@ pub fn impl_model(emitter: &mut Emitter, input: &syn::ItemMod) -> TokenStream { .. } = input; - let syn::Visibility::Public(vis_public) = vis else { - emit!( - emitter, - input, - "The `model` attribute can only be used on public modules" - ); - return quote!(); - }; if ident != "model" { emit!( emitter, @@ -38,7 +29,7 @@ pub fn impl_model(emitter: &mut Emitter, input: &syn::ItemMod) -> TokenStream { quote! { #(#attrs)* #[allow(missing_docs)] - #vis_public #mod_token #ident { + #mod_token #ident { #(#items_code)* }#semi } diff --git a/data_model/src/account.rs b/data_model/src/account.rs index 2383bdc21ac..739b9d0d6c7 100644 --- a/data_model/src/account.rs +++ b/data_model/src/account.rs @@ -14,8 +14,6 @@ use derive_more::{Constructor, DebugCustom, Display}; use getset::Getters; use iroha_data_model_derive::{model, IdEqOrdHash}; use iroha_primitives::const_vec::ConstVec; -#[cfg(feature = "transparent_api")] -use iroha_primitives::must_use::MustUse; use iroha_schema::IntoSchema; use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; @@ -276,10 +274,11 @@ impl Account { /// Checks whether the transaction contains all the signatures required by the /// [`SignatureCheckCondition`] stored in this account. + #[must_use] pub fn check_signature_check_condition( &self, transaction_signatories: &btree_set::BTreeSet, - ) -> MustUse { + ) -> bool { self.signature_check_condition .check(&self.signatories, transaction_signatories) } @@ -399,12 +398,13 @@ impl SignatureCheckCondition { Self::AllAccountSignaturesAnd(ConstVec::new_empty()) } + #[must_use] #[cfg(feature = "transparent_api")] fn check( &self, account_signatories: &btree_set::BTreeSet, transaction_signatories: &btree_set::BTreeSet, - ) -> MustUse { + ) -> bool { let result = match &self { SignatureCheckCondition::AnyAccountSignatureOr(additional_allowed_signatures) => { account_signatories @@ -420,7 +420,7 @@ impl SignatureCheckCondition { } }; - MustUse::new(result) + result } } @@ -431,6 +431,8 @@ pub mod prelude { #[cfg(test)] mod tests { + #[cfg(not(feature = "std"))] + use alloc::{vec, vec::Vec}; use core::cmp::Ordering; use iroha_crypto::{KeyPair, PublicKey}; @@ -452,7 +454,7 @@ mod tests { let tx_signatories = tx_signatories.iter().copied().cloned().collect(); assert_eq!( - condition.check(&account_signatories, &tx_signatories,).0, + condition.check(&account_signatories, &tx_signatories,), result ); } diff --git a/data_model/src/block.rs b/data_model/src/block.rs index 93ce5bec045..e8d6c24a270 100644 --- a/data_model/src/block.rs +++ b/data_model/src/block.rs @@ -9,7 +9,6 @@ use alloc::{boxed::Box, format, string::String, vec::Vec}; use core::{fmt::Display, time::Duration}; use derive_more::Display; -use getset::Getters; #[cfg(all(feature = "std", feature = "transparent_api"))] use iroha_crypto::KeyPair; use iroha_crypto::{HashOf, MerkleTree, SignaturesOf}; @@ -26,6 +25,8 @@ use crate::{events::prelude::*, peer, transaction::prelude::*}; #[model] pub mod model { + use getset::{CopyGetters, Getters}; + use super::*; #[derive( @@ -37,6 +38,7 @@ pub mod model { PartialOrd, Ord, Getters, + CopyGetters, Decode, Encode, Deserialize, @@ -48,22 +50,24 @@ pub mod model { display(fmt = "Block №{height} (hash: {});", "HashOf::new(&self)") )] #[cfg_attr(not(feature = "std"), display(fmt = "Block №{height}"))] - #[getset(get = "pub")] #[allow(missing_docs)] #[ffi_type] pub struct BlockHeader { /// Number of blocks in the chain including this block. + #[getset(get_copy = "pub")] pub height: u64, - /// Creation timestamp (unix time in milliseconds). - #[getset(skip)] - pub timestamp_ms: u64, /// Hash of the previous block in the chain. + #[getset(get = "pub")] pub previous_block_hash: Option>, /// Hash of merkle tree root of transactions' hashes. + #[getset(get = "pub")] pub transactions_hash: Option>>, + /// Creation timestamp (unix time in milliseconds). + #[getset(skip)] + pub timestamp_ms: u64, /// Value of view change index. Used to resolve soft forks. - pub view_change_index: u64, #[getset(skip)] + pub view_change_index: u64, /// Estimation of consensus duration (in milliseconds). pub consensus_estimation_ms: u64, } @@ -76,7 +80,6 @@ pub mod model { Eq, PartialOrd, Ord, - Getters, Decode, Encode, Deserialize, @@ -84,45 +87,28 @@ pub mod model { IntoSchema, )] #[display(fmt = "({header})")] - #[getset(get = "pub")] #[allow(missing_docs)] - #[ffi_type] - pub struct BlockPayload { + pub(crate) struct BlockPayload { /// Block header pub header: BlockHeader, /// Topology of the network at the time of block commit. - #[getset(skip)] // FIXME: Because ffi related issues pub commit_topology: UniqueVec, /// array of transactions, which successfully passed validation and consensus step. - #[getset(skip)] // FIXME: Because ffi related issues pub transactions: Vec, /// Event recommendations. - #[getset(skip)] // NOTE: Unused ATM - pub event_recommendations: Vec, + pub event_recommendations: Vec, } /// Signed block #[version_with_scale(version = 1, versioned_alias = "SignedBlock")] #[derive( - Debug, - Display, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Getters, - Encode, - Serialize, - IntoSchema, + Debug, Display, Clone, PartialEq, Eq, PartialOrd, Ord, Encode, Serialize, IntoSchema, )] #[cfg_attr(not(feature = "std"), display(fmt = "Signed block"))] #[cfg_attr(feature = "std", display(fmt = "{}", "self.hash()"))] - #[getset(get = "pub")] #[ffi_type] pub struct SignedBlockV1 { /// Signatures of peers which approved this block. - #[getset(skip)] pub signatures: SignaturesOf, /// Block payload pub payload: BlockPayload, @@ -134,13 +120,6 @@ declare_versioned!(SignedBlock 1..2, Debug, Clone, PartialEq, Eq, PartialOrd, Or #[cfg(all(not(feature = "ffi_export"), not(feature = "ffi_import")))] declare_versioned!(SignedBlock 1..2, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, FromVariant, IntoSchema); -impl BlockPayload { - /// Calculate block payload [`Hash`](`iroha_crypto::HashOf`). - pub fn hash(&self) -> iroha_crypto::HashOf { - iroha_crypto::HashOf::new(self) - } -} - impl BlockHeader { /// Checks if it's a header of a genesis block. #[inline] @@ -153,11 +132,6 @@ impl BlockHeader { pub fn timestamp(&self) -> Duration { Duration::from_millis(self.timestamp_ms) } - - /// Consensus estimation - pub fn consensus_estimation(&self) -> Duration { - Duration::from_millis(self.consensus_estimation_ms) - } } impl SignedBlockV1 { @@ -168,21 +142,21 @@ impl SignedBlockV1 { } impl SignedBlock { - /// Block transactions + /// Block header #[inline] - pub fn transactions(&self) -> impl ExactSizeIterator { + pub fn header(&self) -> &BlockHeader { let SignedBlock::V1(block) = self; - block.payload.transactions.iter() + &block.payload.header } - /// Block header + /// Block transactions #[inline] - pub fn header(&self) -> &BlockHeader { + pub fn transactions(&self) -> impl ExactSizeIterator { let SignedBlock::V1(block) = self; - block.payload.header() + block.payload.transactions.iter() } - /// Block commit topology + /// Topology of the network at the time of block commit. #[inline] #[cfg(feature = "transparent_api")] pub fn commit_topology(&self) -> &UniqueVec { @@ -213,8 +187,8 @@ impl SignedBlock { } /// Add additional signatures to this block - #[cfg(feature = "transparent_api")] #[must_use] + #[cfg(feature = "transparent_api")] pub fn sign(mut self, key_pair: &KeyPair) -> Self { let SignedBlock::V1(block) = &mut self; let signature = iroha_crypto::SignatureOf::new(key_pair, &block.payload); @@ -292,7 +266,7 @@ mod candidate { } fn validate_header(&self) -> Result<(), &'static str> { - let actual_txs_hash = self.payload.header().transactions_hash; + let actual_txs_hash = self.payload.header.transactions_hash; let expected_txs_hash = self .payload diff --git a/data_model/src/events/data/filters.rs b/data_model/src/events/data/filters.rs index 4edc08c828e..92743725aaf 100644 --- a/data_model/src/events/data/filters.rs +++ b/data_model/src/events/data/filters.rs @@ -705,7 +705,6 @@ impl EventFilter for DataEventFilter { (DataEvent::Peer(event), Peer(filter)) => filter.matches(event), (DataEvent::Trigger(event), Trigger(filter)) => filter.matches(event), (DataEvent::Role(event), Role(filter)) => filter.matches(event), - (DataEvent::PermissionToken(_), PermissionTokenSchemaUpdate) => true, (DataEvent::Configuration(event), Configuration(filter)) => filter.matches(event), (DataEvent::Executor(event), Executor(filter)) => filter.matches(event), diff --git a/data_model/src/events/mod.rs b/data_model/src/events/mod.rs index 94c003526bc..d9e59fd6a8c 100644 --- a/data_model/src/events/mod.rs +++ b/data_model/src/events/mod.rs @@ -7,9 +7,11 @@ use iroha_data_model_derive::model; use iroha_macro::FromVariant; use iroha_schema::IntoSchema; use parity_scale_codec::{Decode, Encode}; +use pipeline::{BlockEvent, TransactionEvent}; use serde::{Deserialize, Serialize}; pub use self::model::*; +use self::pipeline::{BlockEventFilter, TransactionEventFilter}; pub mod data; pub mod execute_trigger; @@ -37,9 +39,9 @@ pub mod model { IntoSchema, )] #[ffi_type] - pub enum Event { + pub enum EventBox { /// Pipeline event. - Pipeline(pipeline::PipelineEvent), + Pipeline(pipeline::PipelineEventBox), /// Data event. Data(data::DataEvent), /// Time event. @@ -85,7 +87,7 @@ pub mod model { #[ffi_type(opaque)] pub enum EventFilterBox { /// Listen to pipeline events with filter. - Pipeline(pipeline::PipelineEventFilter), + Pipeline(pipeline::PipelineEventFilterBox), /// Listen to data events with filter. Data(data::DataEventFilter), /// Listen to time events with filter. @@ -116,7 +118,7 @@ pub mod model { #[ffi_type(opaque)] pub enum TriggeringEventFilterBox { /// Listen to pipeline events with filter. - Pipeline(pipeline::PipelineEventFilter), + Pipeline(pipeline::PipelineEventFilterBox), /// Listen to data events with filter. Data(data::DataEventFilter), /// Listen to time events with filter. @@ -126,6 +128,62 @@ pub mod model { } } +impl From for EventBox { + fn from(source: TransactionEvent) -> Self { + Self::Pipeline(source.into()) + } +} + +impl From for EventBox { + fn from(source: BlockEvent) -> Self { + Self::Pipeline(source.into()) + } +} + +impl From for EventFilterBox { + fn from(source: TransactionEventFilter) -> Self { + Self::Pipeline(source.into()) + } +} + +impl From for EventFilterBox { + fn from(source: BlockEventFilter) -> Self { + Self::Pipeline(source.into()) + } +} + +impl TryFrom for TransactionEvent { + type Error = iroha_macro::error::ErrorTryFromEnum; + + fn try_from(event: EventBox) -> Result { + use iroha_macro::error::ErrorTryFromEnum; + + let EventBox::Pipeline(pipeline_event) = event else { + return Err(ErrorTryFromEnum::default()); + }; + + pipeline_event + .try_into() + .map_err(|_| ErrorTryFromEnum::default()) + } +} + +impl TryFrom for BlockEvent { + type Error = iroha_macro::error::ErrorTryFromEnum; + + fn try_from(event: EventBox) -> Result { + use iroha_macro::error::ErrorTryFromEnum; + + let EventBox::Pipeline(pipeline_event) = event else { + return Err(ErrorTryFromEnum::default()); + }; + + pipeline_event + .try_into() + .map_err(|_| ErrorTryFromEnum::default()) + } +} + /// Trait for filters #[cfg(feature = "transparent_api")] pub trait EventFilter { @@ -156,25 +214,27 @@ pub trait EventFilter { #[cfg(feature = "transparent_api")] impl EventFilter for EventFilterBox { - type Event = Event; + type Event = EventBox; /// Apply filter to event. - fn matches(&self, event: &Event) -> bool { + fn matches(&self, event: &EventBox) -> bool { match (event, self) { - (Event::Pipeline(event), Self::Pipeline(filter)) => filter.matches(event), - (Event::Data(event), Self::Data(filter)) => filter.matches(event), - (Event::Time(event), Self::Time(filter)) => filter.matches(event), - (Event::ExecuteTrigger(event), Self::ExecuteTrigger(filter)) => filter.matches(event), - (Event::TriggerCompleted(event), Self::TriggerCompleted(filter)) => { + (EventBox::Pipeline(event), Self::Pipeline(filter)) => filter.matches(event), + (EventBox::Data(event), Self::Data(filter)) => filter.matches(event), + (EventBox::Time(event), Self::Time(filter)) => filter.matches(event), + (EventBox::ExecuteTrigger(event), Self::ExecuteTrigger(filter)) => { + filter.matches(event) + } + (EventBox::TriggerCompleted(event), Self::TriggerCompleted(filter)) => { filter.matches(event) } // Fail to compile in case when new variant to event or filter is added ( - Event::Pipeline(_) - | Event::Data(_) - | Event::Time(_) - | Event::ExecuteTrigger(_) - | Event::TriggerCompleted(_), + EventBox::Pipeline(_) + | EventBox::Data(_) + | EventBox::Time(_) + | EventBox::ExecuteTrigger(_) + | EventBox::TriggerCompleted(_), Self::Pipeline(_) | Self::Data(_) | Self::Time(_) @@ -187,22 +247,24 @@ impl EventFilter for EventFilterBox { #[cfg(feature = "transparent_api")] impl EventFilter for TriggeringEventFilterBox { - type Event = Event; + type Event = EventBox; /// Apply filter to event. - fn matches(&self, event: &Event) -> bool { + fn matches(&self, event: &EventBox) -> bool { match (event, self) { - (Event::Pipeline(event), Self::Pipeline(filter)) => filter.matches(event), - (Event::Data(event), Self::Data(filter)) => filter.matches(event), - (Event::Time(event), Self::Time(filter)) => filter.matches(event), - (Event::ExecuteTrigger(event), Self::ExecuteTrigger(filter)) => filter.matches(event), + (EventBox::Pipeline(event), Self::Pipeline(filter)) => filter.matches(event), + (EventBox::Data(event), Self::Data(filter)) => filter.matches(event), + (EventBox::Time(event), Self::Time(filter)) => filter.matches(event), + (EventBox::ExecuteTrigger(event), Self::ExecuteTrigger(filter)) => { + filter.matches(event) + } // Fail to compile in case when new variant to event or filter is added ( - Event::Pipeline(_) - | Event::Data(_) - | Event::Time(_) - | Event::ExecuteTrigger(_) - | Event::TriggerCompleted(_), + EventBox::Pipeline(_) + | EventBox::Data(_) + | EventBox::Time(_) + | EventBox::ExecuteTrigger(_) + | EventBox::TriggerCompleted(_), Self::Pipeline(_) | Self::Data(_) | Self::Time(_) | Self::ExecuteTrigger(_), ) => false, } @@ -279,16 +341,16 @@ pub mod stream { /// Event sent by the peer. #[derive(Debug, Clone, Decode, Encode, IntoSchema)] #[repr(transparent)] - pub struct EventMessage(pub Event); + pub struct EventMessage(pub EventBox); /// Message sent by the stream consumer. /// Request sent by the client to subscribe to events. #[derive(Debug, Clone, Constructor, Decode, Encode, IntoSchema)] #[repr(transparent)] - pub struct EventSubscriptionRequest(pub EventFilterBox); + pub struct EventSubscriptionRequest(pub Vec); } - impl From for Event { + impl From for EventBox { fn from(source: EventMessage) -> Self { source.0 } @@ -303,7 +365,7 @@ pub mod prelude { pub use super::EventFilter; pub use super::{ data::prelude::*, execute_trigger::prelude::*, pipeline::prelude::*, time::prelude::*, - trigger_completed::prelude::*, Event, EventFilterBox, TriggeringEventFilterBox, + trigger_completed::prelude::*, EventBox, EventFilterBox, TriggeringEventFilterBox, TriggeringEventType, }; } diff --git a/data_model/src/events/pipeline.rs b/data_model/src/events/pipeline.rs index 5d3b962144f..216f99ca05d 100644 --- a/data_model/src/events/pipeline.rs +++ b/data_model/src/events/pipeline.rs @@ -1,59 +1,55 @@ //! Pipeline events. #[cfg(not(feature = "std"))] -use alloc::{format, string::String, vec::Vec}; +use alloc::{boxed::Box, format, string::String, vec::Vec}; -use getset::Getters; -use iroha_crypto::Hash; +use iroha_crypto::HashOf; use iroha_data_model_derive::model; use iroha_macro::FromVariant; use iroha_schema::IntoSchema; use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; -use strum::EnumDiscriminants; pub use self::model::*; +use crate::{ + block::{BlockHeader, SignedBlock}, + transaction::SignedTransaction, +}; #[model] pub mod model { + use getset::Getters; + use super::*; - /// [`Event`] filter. #[derive( Debug, Clone, - Copy, PartialEq, Eq, PartialOrd, Ord, - Default, - Getters, + FromVariant, Decode, Encode, - Serialize, Deserialize, + Serialize, IntoSchema, )] - pub struct PipelineEventFilter { - /// If `Some::`, filter by the [`EntityKind`]. If `None`, accept all the [`EntityKind`]. - pub(super) entity_kind: Option, - /// If `Some::`, filter by the [`StatusKind`]. If `None`, accept all the [`StatusKind`]. - pub(super) status_kind: Option, - /// If `Some::`, filter by the [`struct@Hash`]. If `None`, accept all the [`struct@Hash`]. - // TODO: Can we make hash typed like HashOf? - pub(super) hash: Option, + #[ffi_type(opaque)] + pub enum PipelineEventBox { + Transaction(TransactionEvent), + Block(BlockEvent), } - /// The kind of the pipeline entity. #[derive( Debug, Clone, - Copy, PartialEq, Eq, PartialOrd, Ord, + Getters, Decode, Encode, Deserialize, @@ -61,15 +57,13 @@ pub mod model { IntoSchema, )] #[ffi_type] - #[repr(u8)] - pub enum PipelineEntityKind { - /// Block - Block, - /// Transaction - Transaction, + #[getset(get = "pub")] + pub struct BlockEvent { + pub header: BlockHeader, + pub hash: HashOf, + pub status: BlockStatus, } - /// Strongly-typed [`Event`] that tells the receiver the kind and the hash of the changed entity as well as its [`Status`]. #[derive( Debug, Clone, @@ -84,18 +78,15 @@ pub mod model { Serialize, IntoSchema, )] - #[getset(get = "pub")] #[ffi_type] - pub struct PipelineEvent { - /// [`EntityKind`] of the entity that caused this [`Event`]. - pub entity_kind: PipelineEntityKind, - /// [`Status`] of the entity that caused this [`Event`]. - pub status: PipelineStatus, - /// [`struct@Hash`] of the entity that caused this [`Event`]. - pub hash: Hash, + #[getset(get = "pub")] + pub struct TransactionEvent { + pub hash: HashOf, + pub block_height: Option, + pub status: TransactionStatus, } - /// [`Status`] of the entity. + /// Report of block's status in the pipeline #[derive( Debug, Clone, @@ -103,129 +94,221 @@ pub mod model { Eq, PartialOrd, Ord, - FromVariant, - EnumDiscriminants, Decode, Encode, + Deserialize, Serialize, + IntoSchema, + )] + #[ffi_type(opaque)] + pub enum BlockStatus { + /// Block was approved to participate in consensus + Approved, + /// Block was rejected by consensus + Rejected(crate::block::error::BlockRejectionReason), + /// Block has passed consensus successfully + Committed, + /// Changes have been reflected in the WSV + Applied, + } + + /// Report of transaction's status in the pipeline + #[derive( + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Decode, + Encode, Deserialize, + Serialize, IntoSchema, )] - #[strum_discriminants( - name(PipelineStatusKind), - derive(PartialOrd, Ord, Decode, Encode, Deserialize, Serialize, IntoSchema,) + #[ffi_type(opaque)] + pub enum TransactionStatus { + /// Transaction was received and enqueued + Queued, + /// Transaction was dropped(not stored in a block) + Expired, + /// Transaction was stored in the block as valid + Approved, + /// Transaction was stored in the block as invalid + Rejected(Box), + } + + #[derive( + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + FromVariant, + Decode, + Encode, + Deserialize, + Serialize, + IntoSchema, )] #[ffi_type] - pub enum PipelineStatus { - /// Entity has been seen in the blockchain but has not passed validation. - Validating, - /// Entity was rejected during validation. - Rejected(PipelineRejectionReason), - /// Entity has passed validation. - Committed, + pub enum PipelineEventFilterBox { + Transaction(TransactionEventFilter), + Block(BlockEventFilter), } - /// The reason for rejecting pipeline entity such as transaction or block. #[derive( Debug, - displaydoc::Display, Clone, PartialEq, Eq, PartialOrd, Ord, - FromVariant, + Default, + Getters, Decode, Encode, Deserialize, Serialize, IntoSchema, )] - #[cfg_attr(feature = "std", derive(thiserror::Error))] #[ffi_type] - pub enum PipelineRejectionReason { - /// Block was rejected - Block(#[cfg_attr(feature = "std", source)] crate::block::error::BlockRejectionReason), - /// Transaction was rejected - Transaction( - #[cfg_attr(feature = "std", source)] - crate::transaction::error::TransactionRejectionReason, - ), + #[getset(get = "pub")] + pub struct BlockEventFilter { + pub height: Option, + pub status: Option, + } + + #[derive( + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Getters, + Decode, + Encode, + Deserialize, + Serialize, + IntoSchema, + )] + #[ffi_type] + #[getset(get = "pub")] + pub struct TransactionEventFilter { + pub hash: Option>, + #[getset(skip)] + pub block_height: Option>, + pub status: Option, } } -impl PipelineEventFilter { - /// Creates a new [`PipelineEventFilter`] accepting all [`PipelineEvent`]s +impl BlockEventFilter { + /// Match only block with the given height #[must_use] - #[inline] - pub const fn new() -> Self { - Self { - status_kind: None, - entity_kind: None, - hash: None, - } + pub fn for_height(mut self, height: u64) -> Self { + self.height = Some(height); + self } - /// Modifies a [`PipelineEventFilter`] to accept only [`PipelineEvent`]s originating from a specific entity kind (block/transaction). + /// Match only block with the given status #[must_use] - #[inline] - pub const fn for_entity(mut self, entity_kind: PipelineEntityKind) -> Self { - self.entity_kind = Some(entity_kind); + pub fn for_status(mut self, status: BlockStatus) -> Self { + self.status = Some(status); self } +} + +impl TransactionEventFilter { + /// Get height of the block filter is set to track + pub fn block_height(&self) -> Option> { + self.block_height + } - /// Modifies a [`PipelineEventFilter`] to accept only [`PipelineEvent`]s with a specific status. + /// Match only transactions with the given block height #[must_use] - #[inline] - pub const fn for_status(mut self, status_kind: PipelineStatusKind) -> Self { - self.status_kind = Some(status_kind); + pub fn for_block_height(mut self, block_height: Option) -> Self { + self.block_height = Some(block_height); self } - /// Modifies a [`PipelineEventFilter`] to accept only [`PipelineEvent`]s originating from an entity with specified hash. + /// Match only transactions with the given hash #[must_use] - #[inline] - pub const fn for_hash(mut self, hash: Hash) -> Self { + pub fn for_hash(mut self, hash: HashOf) -> Self { self.hash = Some(hash); self } - #[inline] - #[cfg(feature = "transparent_api")] + /// Match only transactions with the given status + #[must_use] + pub fn for_status(mut self, status: TransactionStatus) -> Self { + self.status = Some(status); + self + } +} + +#[cfg(feature = "transparent_api")] +impl TransactionEventFilter { fn field_matches(filter: Option<&T>, event: &T) -> bool { filter.map_or(true, |field| field == event) } } #[cfg(feature = "transparent_api")] -impl super::EventFilter for PipelineEventFilter { - type Event = PipelineEvent; - - /// Check if `self` accepts the `event`. - #[inline] - fn matches(&self, event: &PipelineEvent) -> bool { - [ - Self::field_matches(self.entity_kind.as_ref(), &event.entity_kind), - Self::field_matches(self.status_kind.as_ref(), &event.status.kind()), - Self::field_matches(self.hash.as_ref(), &event.hash), - ] - .into_iter() - .all(core::convert::identity) +impl BlockEventFilter { + fn field_matches(filter: Option<&T>, event: &T) -> bool { + filter.map_or(true, |field| field == event) } } #[cfg(feature = "transparent_api")] -impl PipelineStatus { - fn kind(&self) -> PipelineStatusKind { - PipelineStatusKind::from(self) +impl super::EventFilter for PipelineEventFilterBox { + type Event = PipelineEventBox; + + /// Check if `self` accepts the `event`. + #[inline] + fn matches(&self, event: &PipelineEventBox) -> bool { + match (self, event) { + (Self::Block(block_filter), PipelineEventBox::Block(block_event)) => [ + BlockEventFilter::field_matches( + block_filter.height.as_ref(), + &block_event.header.height, + ), + BlockEventFilter::field_matches(block_filter.status.as_ref(), &block_event.status), + ] + .into_iter() + .all(core::convert::identity), + ( + Self::Transaction(transaction_filter), + PipelineEventBox::Transaction(transaction_event), + ) => [ + TransactionEventFilter::field_matches( + transaction_filter.hash.as_ref(), + &transaction_event.hash, + ), + TransactionEventFilter::field_matches( + transaction_filter.block_height.as_ref(), + &transaction_event.block_height, + ), + TransactionEventFilter::field_matches( + transaction_filter.status.as_ref(), + &transaction_event.status, + ), + ] + .into_iter() + .all(core::convert::identity), + _ => false, + } } } /// Exports common structs and enums from this module. pub mod prelude { pub use super::{ - PipelineEntityKind, PipelineEvent, PipelineEventFilter, PipelineRejectionReason, - PipelineStatus, PipelineStatusKind, + BlockEvent, BlockStatus, PipelineEventBox, PipelineEventFilterBox, TransactionEvent, + TransactionStatus, }; } @@ -235,94 +318,123 @@ mod tests { #[cfg(not(feature = "std"))] use alloc::{string::ToString as _, vec, vec::Vec}; - use super::{super::EventFilter, PipelineRejectionReason::*, *}; + use iroha_crypto::Hash; + + use super::{super::EventFilter, *}; use crate::{transaction::error::TransactionRejectionReason::*, ValidationFail}; + impl BlockHeader { + fn dummy(height: u64) -> Self { + Self { + height, + previous_block_hash: None, + transactions_hash: None, + timestamp_ms: 0, + view_change_index: 0, + consensus_estimation_ms: 0, + } + } + } + #[test] fn events_are_correctly_filtered() { let events = vec![ - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Validating, - hash: Hash::prehashed([0_u8; Hash::LENGTH]), - }, - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Rejected(Transaction(Validation( + TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([0_u8; Hash::LENGTH])), + block_height: None, + status: TransactionStatus::Queued, + } + .into(), + TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([0_u8; Hash::LENGTH])), + block_height: Some(3), + status: TransactionStatus::Rejected(Box::new(Validation( ValidationFail::TooComplex, ))), - hash: Hash::prehashed([0_u8; Hash::LENGTH]), - }, - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Committed, - hash: Hash::prehashed([2_u8; Hash::LENGTH]), - }, - PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Committed, - hash: Hash::prehashed([2_u8; Hash::LENGTH]), - }, + } + .into(), + TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([2_u8; Hash::LENGTH])), + block_height: None, + status: TransactionStatus::Approved, + } + .into(), + BlockEvent { + header: BlockHeader::dummy(7), + hash: HashOf::from_untyped_unchecked(Hash::prehashed([7_u8; Hash::LENGTH])), + status: BlockStatus::Committed, + } + .into(), ]; + assert_eq!( + events + .iter() + .filter(|&event| { + let filter: PipelineEventFilterBox = TransactionEventFilter::default() + .for_hash(HashOf::from_untyped_unchecked(Hash::prehashed( + [0_u8; Hash::LENGTH], + ))) + .into(); + + filter.matches(event) + }) + .cloned() + .collect::>(), vec![ - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Validating, - hash: Hash::prehashed([0_u8; Hash::LENGTH]), - }, - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Rejected(Transaction(Validation( + TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([0_u8; Hash::LENGTH])), + block_height: None, + status: TransactionStatus::Queued, + } + .into(), + TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([0_u8; Hash::LENGTH])), + block_height: Some(3), + status: TransactionStatus::Rejected(Box::new(Validation( ValidationFail::TooComplex, ))), - hash: Hash::prehashed([0_u8; Hash::LENGTH]), - }, + } + .into(), ], - events - .iter() - .filter(|&event| PipelineEventFilter::new() - .for_hash(Hash::prehashed([0_u8; Hash::LENGTH])) - .matches(event)) - .cloned() - .collect::>() ); + assert_eq!( - vec![PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Committed, - hash: Hash::prehashed([2_u8; Hash::LENGTH]), - }], events .iter() - .filter(|&event| PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Block) - .matches(event)) + .filter(|&event| { + let filter: PipelineEventFilterBox = BlockEventFilter::default().into(); + filter.matches(event) + }) .cloned() - .collect::>() + .collect::>(), + vec![BlockEvent { + status: BlockStatus::Committed, + hash: HashOf::from_untyped_unchecked(Hash::prehashed([7_u8; Hash::LENGTH])), + header: BlockHeader::dummy(7), + } + .into()], ); assert_eq!( - vec![PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Committed, - hash: Hash::prehashed([2_u8; Hash::LENGTH]), - }], events .iter() - .filter(|&event| PipelineEventFilter::new() - .for_entity(PipelineEntityKind::Transaction) - .for_hash(Hash::prehashed([2_u8; Hash::LENGTH])) - .matches(event)) + .filter(|&event| { + let filter: PipelineEventFilterBox = TransactionEventFilter::default() + .for_hash(HashOf::from_untyped_unchecked(Hash::prehashed( + [2_u8; Hash::LENGTH], + ))) + .into(); + + filter.matches(event) + }) .cloned() - .collect::>() + .collect::>(), + vec![TransactionEvent { + hash: HashOf::from_untyped_unchecked(Hash::prehashed([2_u8; Hash::LENGTH])), + block_height: None, + status: TransactionStatus::Approved, + } + .into()], ); - assert_eq!( - events, - events - .iter() - .filter(|&event| PipelineEventFilter::new().matches(event)) - .cloned() - .collect::>() - ) } } diff --git a/data_model/src/lib.rs b/data_model/src/lib.rs index 87352bead15..bd004680b5d 100644 --- a/data_model/src/lib.rs +++ b/data_model/src/lib.rs @@ -616,7 +616,6 @@ pub mod parameter { } #[model] -#[allow(irrefutable_let_patterns)] // Triggered from derives macros pub mod model { use super::*; @@ -1022,16 +1021,6 @@ impl From for RangeInclusive { } } -/// Get the current system time as `Duration` since the unix epoch. -#[cfg(feature = "std")] -pub fn current_time() -> core::time::Duration { - use std::time::SystemTime; - - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("Failed to get the current system time") -} - declare_versioned_with_scale!(BatchedResponse 1..2, Debug, Clone, iroha_macro::FromVariant, IntoSchema); impl From> for (T, crate::query::cursor::ForwardCursor) { @@ -1086,8 +1075,6 @@ pub mod prelude { pub use iroha_crypto::PublicKey; pub use iroha_primitives::numeric::{numeric, Numeric, NumericSpec}; - #[cfg(feature = "std")] - pub use super::current_time; pub use super::{ account::prelude::*, asset::prelude::*, domain::prelude::*, events::prelude::*, executor::prelude::*, isi::prelude::*, metadata::prelude::*, name::prelude::*, diff --git a/data_model/src/query/mod.rs b/data_model/src/query/mod.rs index 7fdb0b9a149..14c7b6caecf 100644 --- a/data_model/src/query/mod.rs +++ b/data_model/src/query/mod.rs @@ -144,7 +144,7 @@ pub mod model { )] #[enum_ref(derive(Encode, FromVariant))] #[strum_discriminants( - vis(pub(crate)), + vis(pub(super)), name(QueryType), derive(Encode), allow(clippy::enum_variant_names) diff --git a/data_model/src/smart_contract.rs b/data_model/src/smart_contract.rs index 379da0585d9..5d51c2f89d3 100644 --- a/data_model/src/smart_contract.rs +++ b/data_model/src/smart_contract.rs @@ -20,7 +20,7 @@ pub mod payloads { /// Trigger owner who registered the trigger pub owner: AccountId, /// Event which triggered the execution - pub event: Event, + pub event: EventBox, } /// Payload for migrate entrypoint diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index fbfded831ab..f5bd90b9c5c 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -9,7 +9,6 @@ use core::{ }; use derive_more::{DebugCustom, Display}; -use getset::Getters; use iroha_crypto::SignaturesOf; use iroha_data_model_derive::model; use iroha_macro::FromVariant; @@ -28,6 +27,8 @@ use crate::{ #[model] pub mod model { + use getset::{CopyGetters, Getters}; + use super::*; /// Either ISI or Wasm binary @@ -89,35 +90,26 @@ pub mod model { Eq, PartialOrd, Ord, - Getters, Decode, Encode, Deserialize, Serialize, IntoSchema, )] - #[getset(get = "pub")] - #[ffi_type] - pub struct TransactionPayload { + pub(crate) struct TransactionPayload { /// Unique id of the blockchain. Used for simple replay attack protection. - #[getset(skip)] // FIXME: ffi error pub chain_id: ChainId, - /// Creation timestamp (unix time in milliseconds). - #[getset(skip)] - pub creation_time_ms: u64, /// Account ID of transaction creator. pub authority: AccountId, + /// Creation timestamp (unix time in milliseconds). + pub creation_time_ms: u64, /// ISI or a `WebAssembly` smart contract. pub instructions: Executable, /// If transaction is not committed by this time it will be dropped. - #[getset(skip)] pub time_to_live_ms: Option, /// Random value to make different hashes for transactions which occur repeatedly and simultaneously. - // TODO: Only temporary - #[getset(skip)] pub nonce: Option, /// Store for additional information. - #[getset(skip)] // FIXME: ffi error pub metadata: UnlimitedMetadata, } @@ -131,7 +123,7 @@ pub mod model { Eq, PartialOrd, Ord, - Getters, + CopyGetters, Decode, Encode, Deserialize, @@ -139,7 +131,7 @@ pub mod model { IntoSchema, )] #[display(fmt = "{max_instruction_number},{max_wasm_size_bytes}_TL")] - #[getset(get = "pub")] + #[getset(get_copy = "pub")] #[ffi_type] pub struct TransactionLimits { /// Maximum number of instructions per transaction @@ -251,14 +243,14 @@ impl SignedTransaction { #[inline] pub fn instructions(&self) -> &Executable { let SignedTransaction::V1(tx) = self; - tx.payload.instructions() + &tx.payload.instructions } /// Return transaction authority #[inline] pub fn authority(&self) -> &AccountId { let SignedTransaction::V1(tx) = self; - tx.payload.authority() + &tx.payload.authority } /// Return transaction metadata. @@ -449,6 +441,8 @@ pub mod error { #[model] pub mod model { + use getset::Getters; + use super::*; /// Error which indicates max instruction count was reached @@ -565,8 +559,6 @@ pub mod error { InstructionExecution(#[cfg_attr(feature = "std", source)] InstructionExecutionFail), /// Failure in WebAssembly execution WasmExecution(#[cfg_attr(feature = "std", source)] WasmExecutionFail), - /// Transaction rejected due to being expired - Expired, } } @@ -638,7 +630,11 @@ mod http { #[inline] #[cfg(feature = "std")] pub fn new(chain_id: ChainId, authority: AccountId) -> Self { - let creation_time_ms = crate::current_time() + use std::time::SystemTime; + + let creation_time_ms = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time") .as_millis() .try_into() .expect("Unix timestamp exceedes u64::MAX"); @@ -738,13 +734,15 @@ pub mod prelude { #[cfg(feature = "http")] pub use super::http::TransactionBuilder; pub use super::{ - error::prelude::*, Executable, SignedTransaction, TransactionPayload, TransactionValue, - WasmSmartContract, + error::prelude::*, Executable, SignedTransaction, TransactionValue, WasmSmartContract, }; } #[cfg(test)] mod tests { + #[cfg(not(feature = "std"))] + use alloc::vec; + use super::*; #[test] diff --git a/data_model/src/trigger.rs b/data_model/src/trigger.rs index e65cf5f4a96..9a739cfead1 100644 --- a/data_model/src/trigger.rs +++ b/data_model/src/trigger.rs @@ -237,21 +237,21 @@ pub mod action { impl PartialOrd for Action { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Action { + fn cmp(&self, other: &Self) -> cmp::Ordering { // Exclude the executable. When debugging and replacing // the trigger, its position in Hash and Tree maps should // not change depending on the content. match self.repeats.cmp(&other.repeats) { cmp::Ordering::Equal => {} - ord => return Some(ord), + ord => return ord, } - Some(self.authority.cmp(&other.authority)) - } - } - impl Ord for Action { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.partial_cmp(other) - .expect("`PartialCmp::partial_cmp()` for `Action` should never return `None`") + self.authority.cmp(&other.authority) } } diff --git a/docs/source/references/schema.json b/docs/source/references/schema.json index 496bb8eec28..5e5c102ebce 100644 --- a/docs/source/references/schema.json +++ b/docs/source/references/schema.json @@ -594,14 +594,38 @@ } ] }, - "BlockHeader": { + "BlockEvent": { + "Struct": [ + { + "name": "header", + "type": "BlockHeader" + }, + { + "name": "hash", + "type": "HashOf" + }, + { + "name": "status", + "type": "BlockStatus" + } + ] + }, + "BlockEventFilter": { "Struct": [ { "name": "height", - "type": "u64" + "type": "Option" }, { - "name": "timestamp_ms", + "name": "status", + "type": "Option" + } + ] + }, + "BlockHeader": { + "Struct": [ + { + "name": "height", "type": "u64" }, { @@ -612,6 +636,10 @@ "name": "transactions_hash", "type": "Option>>" }, + { + "name": "timestamp_ms", + "type": "u64" + }, { "name": "view_change_index", "type": "u64" @@ -639,7 +667,7 @@ }, { "name": "event_recommendations", - "type": "Vec" + "type": "Vec" } ] }, @@ -651,6 +679,27 @@ } ] }, + "BlockStatus": { + "Enum": [ + { + "tag": "Approved", + "discriminant": 0 + }, + { + "tag": "Rejected", + "discriminant": 1, + "type": "BlockRejectionReason" + }, + { + "tag": "Committed", + "discriminant": 2 + }, + { + "tag": "Applied", + "discriminant": 3 + } + ] + }, "BlockSubscriptionRequest": "NonZero", "Burn": { "Struct": [ @@ -1023,12 +1072,12 @@ "u32" ] }, - "Event": { + "EventBox": { "Enum": [ { "tag": "Pipeline", "discriminant": 0, - "type": "PipelineEvent" + "type": "PipelineEventBox" }, { "tag": "Data", @@ -1057,7 +1106,7 @@ { "tag": "Pipeline", "discriminant": 0, - "type": "PipelineEventFilter" + "type": "PipelineEventFilterBox" }, { "tag": "Data", @@ -1081,8 +1130,8 @@ } ] }, - "EventMessage": "Event", - "EventSubscriptionRequest": "EventFilterBox", + "EventMessage": "EventBox", + "EventSubscriptionRequest": "Vec", "Executable": { "Enum": [ { @@ -2405,21 +2454,24 @@ "Option": { "Option": "AssetId" }, + "Option": { + "Option": "BlockStatus" + }, "Option": { "Option": "DomainId" }, "Option": { "Option": "Duration" }, - "Option": { - "Option": "Hash" - }, "Option>>": { "Option": "HashOf>" }, "Option>": { "Option": "HashOf" }, + "Option>": { + "Option": "HashOf" + }, "Option": { "Option": "IpfsPath" }, @@ -2429,18 +2481,15 @@ "Option>": { "Option": "NonZero" }, + "Option>": { + "Option": "Option" + }, "Option": { "Option": "ParameterId" }, "Option": { "Option": "PeerId" }, - "Option": { - "Option": "PipelineEntityKind" - }, - "Option": { - "Option": "PipelineStatusKind" - }, "Option": { "Option": "RoleId" }, @@ -2453,6 +2502,9 @@ "Option": { "Option": "TransactionRejectionReason" }, + "Option": { + "Option": "TransactionStatus" + }, "Option": { "Option": "TriggerCompletedOutcomeType" }, @@ -2462,6 +2514,9 @@ "Option": { "Option": "u32" }, + "Option": { + "Option": "u64" + }, "Parameter": { "Struct": [ { @@ -2603,94 +2658,31 @@ } ] }, - "PipelineEntityKind": { + "PipelineEventBox": { "Enum": [ - { - "tag": "Block", - "discriminant": 0 - }, { "tag": "Transaction", - "discriminant": 1 - } - ] - }, - "PipelineEvent": { - "Struct": [ - { - "name": "entity_kind", - "type": "PipelineEntityKind" - }, - { - "name": "status", - "type": "PipelineStatus" - }, - { - "name": "hash", - "type": "Hash" - } - ] - }, - "PipelineEventFilter": { - "Struct": [ - { - "name": "entity_kind", - "type": "Option" - }, - { - "name": "status_kind", - "type": "Option" - }, - { - "name": "hash", - "type": "Option" - } - ] - }, - "PipelineRejectionReason": { - "Enum": [ - { - "tag": "Block", "discriminant": 0, - "type": "BlockRejectionReason" + "type": "TransactionEvent" }, { - "tag": "Transaction", + "tag": "Block", "discriminant": 1, - "type": "TransactionRejectionReason" + "type": "BlockEvent" } ] }, - "PipelineStatus": { + "PipelineEventFilterBox": { "Enum": [ { - "tag": "Validating", - "discriminant": 0 + "tag": "Transaction", + "discriminant": 0, + "type": "TransactionEventFilter" }, { - "tag": "Rejected", + "tag": "Block", "discriminant": 1, - "type": "PipelineRejectionReason" - }, - { - "tag": "Committed", - "discriminant": 2 - } - ] - }, - "PipelineStatusKind": { - "Enum": [ - { - "tag": "Validating", - "discriminant": 0 - }, - { - "tag": "Rejected", - "discriminant": 1 - }, - { - "tag": "Committed", - "discriminant": 2 + "type": "BlockEventFilter" } ] }, @@ -3786,6 +3778,38 @@ } ] }, + "TransactionEvent": { + "Struct": [ + { + "name": "hash", + "type": "HashOf" + }, + { + "name": "block_height", + "type": "Option" + }, + { + "name": "status", + "type": "TransactionStatus" + } + ] + }, + "TransactionEventFilter": { + "Struct": [ + { + "name": "hash", + "type": "Option>" + }, + { + "name": "block_height", + "type": "Option>" + }, + { + "name": "status", + "type": "Option" + } + ] + }, "TransactionLimitError": { "Struct": [ { @@ -3812,14 +3836,14 @@ "name": "chain_id", "type": "ChainId" }, - { - "name": "creation_time_ms", - "type": "u64" - }, { "name": "authority", "type": "AccountId" }, + { + "name": "creation_time_ms", + "type": "u64" + }, { "name": "instructions", "type": "Executable" @@ -3876,10 +3900,27 @@ "tag": "WasmExecution", "discriminant": 4, "type": "WasmExecutionFail" + } + ] + }, + "TransactionStatus": { + "Enum": [ + { + "tag": "Queued", + "discriminant": 0 }, { "tag": "Expired", - "discriminant": 5 + "discriminant": 1 + }, + { + "tag": "Approved", + "discriminant": 2 + }, + { + "tag": "Rejected", + "discriminant": 3, + "type": "TransactionRejectionReason" } ] }, @@ -4127,7 +4168,7 @@ { "tag": "Pipeline", "discriminant": 0, - "type": "PipelineEventFilter" + "type": "PipelineEventFilterBox" }, { "tag": "Data", @@ -4295,8 +4336,11 @@ } ] }, - "Vec": { - "Vec": "Event" + "Vec": { + "Vec": "EventBox" + }, + "Vec": { + "Vec": "EventFilterBox" }, "Vec>": { "Vec": "GenericPredicateBox" diff --git a/primitives/src/must_use.rs b/primitives/src/must_use.rs index 40457a2009f..3b6823178d3 100644 --- a/primitives/src/must_use.rs +++ b/primitives/src/must_use.rs @@ -4,7 +4,7 @@ use core::borrow::{Borrow, BorrowMut}; use derive_more::{AsMut, AsRef, Constructor, Deref, Display}; -/// Wrapper type to annotate types with `must_use` attribute +/// Wrapper type to annotate types with `must_use` attribute. Only to be used with [`Result`] /// /// # Example /// ``` @@ -30,19 +30,7 @@ use derive_more::{AsMut, AsRef, Constructor, Deref, Display}; /// // is_odd(3).unwrap(); /// ``` #[derive( - Constructor, - Debug, - Display, - Copy, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - AsRef, - AsMut, - Deref, + Debug, Display, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Constructor, AsRef, AsMut, Deref, )] #[repr(transparent)] #[must_use] @@ -56,6 +44,12 @@ impl MustUse { } } +impl From for MustUse { + fn from(source: T) -> Self { + MustUse(source) + } +} + impl Borrow for MustUse { #[inline] fn borrow(&self) -> &T { diff --git a/schema/gen/src/lib.rs b/schema/gen/src/lib.rs index ebfb956f436..04d20a776f4 100644 --- a/schema/gen/src/lib.rs +++ b/schema/gen/src/lib.rs @@ -96,13 +96,17 @@ types!( BTreeSet>, BatchedResponse, BatchedResponseV1, + BlockEvent, + BlockEventFilter, BlockHeader, BlockMessage, BlockPayload, BlockRejectionReason, + BlockStatus, BlockSubscriptionRequest, Box>, Box, + Box, Burn, Burn, Burn, @@ -124,7 +128,7 @@ types!( DomainId, DomainOwnerChanged, Duration, - Event, + EventBox, EventMessage, EventSubscriptionRequest, Executable, @@ -232,25 +236,26 @@ types!( Numeric, NumericSpec, Option, + Option, Option, Option, Option, + Option, Option, Option, - Option, - Option>>, Option>, + Option>, Option, Option, Option, + Option>, Option, Option, - Option, - Option, Option, Option, Option, Option, + Option, Option, Option, Parameter, @@ -265,12 +270,8 @@ types!( PermissionToken, PermissionTokenSchema, PermissionTokenSchemaUpdateEvent, - PipelineEntityKind, - PipelineEvent, - PipelineEventFilter, - PipelineRejectionReason, - PipelineStatus, - PipelineStatusKind, + PipelineEventBox, + PipelineEventFilterBox, PredicateBox, PublicKey, QueryBox, @@ -338,12 +339,15 @@ types!( TimeEventFilter, TimeInterval, TimeSchedule, + TransactionEvent, + TransactionEventFilter, TransactionLimitError, TransactionLimits, TransactionPayload, TransactionQueryOutput, TransactionRejectionReason, TransactionValue, + TransactionStatus, Transfer, Transfer, Transfer, @@ -372,7 +376,8 @@ types!( UnregisterBox, Upgrade, ValidationFail, - Vec, + Vec, + Vec, Vec, Vec, Vec, @@ -412,6 +417,7 @@ mod tests { BlockHeader, BlockPayload, SignedBlock, SignedBlockV1, }, domain::NewDomain, + events::pipeline::{BlockEventFilter, TransactionEventFilter}, executor::Executor, ipfs::IpfsPath, isi::{ @@ -435,7 +441,10 @@ mod tests { }, ForwardCursor, QueryOutputBox, }, - transaction::{error::TransactionLimitError, SignedTransactionV1, TransactionLimits}, + transaction::{ + error::TransactionLimitError, SignedTransactionV1, TransactionLimits, + TransactionPayload, + }, BatchedResponse, BatchedResponseV1, Level, }; use iroha_primitives::{ diff --git a/smart_contract/executor/src/default.rs b/smart_contract/executor/src/default.rs index c409a082878..50b951d85a8 100644 --- a/smart_contract/executor/src/default.rs +++ b/smart_contract/executor/src/default.rs @@ -1246,7 +1246,7 @@ pub mod role { let role_id = $isi.object(); let find_role_query_res = match FindRoleByRoleId::new(role_id.clone()).execute() { - Ok(res) => res.into_raw_parts().0, + Ok(res) => res.into_parts().0, Err(error) => { deny!($executor, error); } diff --git a/smart_contract/executor/src/permission.rs b/smart_contract/executor/src/permission.rs index 1bcbb66489f..270048f2140 100644 --- a/smart_contract/executor/src/permission.rs +++ b/smart_contract/executor/src/permission.rs @@ -139,7 +139,7 @@ pub mod asset_definition { ) -> Result { let asset_definition = FindAssetDefinitionById::new(asset_definition_id.clone()) .execute() - .map(QueryOutputCursor::into_raw_parts) + .map(QueryOutputCursor::into_parts) .map(|(batch, _cursor)| batch)?; if asset_definition.owned_by() == authority { Ok(true) @@ -226,7 +226,7 @@ pub mod trigger { pub fn is_trigger_owner(trigger_id: &TriggerId, authority: &AccountId) -> Result { let trigger = FindTriggerById::new(trigger_id.clone()) .execute() - .map(QueryOutputCursor::into_raw_parts) + .map(QueryOutputCursor::into_parts) .map(|(batch, _cursor)| batch)?; if trigger.action().authority() == authority { Ok(true) @@ -271,7 +271,7 @@ pub mod domain { pub fn is_domain_owner(domain_id: &DomainId, authority: &AccountId) -> Result { FindDomainById::new(domain_id.clone()) .execute() - .map(QueryOutputCursor::into_raw_parts) + .map(QueryOutputCursor::into_parts) .map(|(batch, _cursor)| batch) .map(|domain| domain.owned_by() == authority) } diff --git a/smart_contract/src/lib.rs b/smart_contract/src/lib.rs index b6663381a2b..576f1915ccb 100644 --- a/smart_contract/src/lib.rs +++ b/smart_contract/src/lib.rs @@ -291,7 +291,7 @@ pub struct QueryOutputCursor { impl QueryOutputCursor { /// Get inner values of batch and cursor, consuming [`Self`]. - pub fn into_raw_parts(self) -> (T, ForwardCursor) { + pub fn into_parts(self) -> (T, ForwardCursor) { (self.batch, self.cursor) } } @@ -524,7 +524,7 @@ mod tests { let response: Result, ValidationFail> = Ok(BatchedResponseV1::new( - QUERY_RESULT.unwrap().into_raw_parts().0, + QUERY_RESULT.unwrap().into_parts().0, ForwardCursor::new(None, None), ) .into()); diff --git a/telemetry/derive/src/lib.rs b/telemetry/derive/src/lib.rs index 471260e9ee3..21f1d5bcbaf 100644 --- a/telemetry/derive/src/lib.rs +++ b/telemetry/derive/src/lib.rs @@ -242,11 +242,16 @@ fn impl_metrics(emitter: &mut Emitter, _specs: &MetricSpecs, func: &syn::ItemFn) quote!( #(#attrs)* #vis #sig { let _closure = || #block; + let start_time = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time"); - let start_time = #_metric_arg_ident.metrics.current_time(); #totals let res = _closure(); - let end_time = #_metric_arg_ident.metrics.current_time(); + let end_time = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Failed to get the current system time"); + #times if let Ok(_) = res { #successes diff --git a/telemetry/src/dev.rs b/telemetry/src/dev.rs index 43055a72339..1ca1511f3a0 100644 --- a/telemetry/src/dev.rs +++ b/telemetry/src/dev.rs @@ -63,7 +63,7 @@ pub async fn start_file_output( async fn write_telemetry(file: &mut File, item: &FuturePollTelemetry) -> Result<()> { let mut json = serde_json::to_string(&item).wrap_err("failed to serialize telemetry to JSON")?; - json.push_str("\n"); + json.push('\n'); file.write_all(json.as_bytes()) .await .wrap_err("failed to write data to the file")?; diff --git a/telemetry/src/metrics.rs b/telemetry/src/metrics.rs index 404e32c3916..7e93b02f94f 100644 --- a/telemetry/src/metrics.rs +++ b/telemetry/src/metrics.rs @@ -1,9 +1,6 @@ //! [`Metrics`] and [`Status`]-related logic and functions. -use std::{ - ops::Deref, - time::{Duration, SystemTime}, -}; +use std::{ops::Deref, time::Duration}; use parity_scale_codec::{Compact, Decode, Encode}; use prometheus::{ @@ -218,17 +215,6 @@ impl Metrics { Encoder::encode(&encoder, &metric_families, &mut buffer)?; Ok(String::from_utf8(buffer)?) } - - /// Get time elapsed since Unix epoch. - /// - /// # Panics - /// Never - #[allow(clippy::unused_self)] - pub fn current_time(&self) -> Duration { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("Failed to get the current system time") - } } #[cfg(test)] diff --git a/tools/parity_scale_decoder/Cargo.toml b/tools/parity_scale_decoder/Cargo.toml index 37087501892..909795cff49 100644 --- a/tools/parity_scale_decoder/Cargo.toml +++ b/tools/parity_scale_decoder/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [features] # Disable colour for all program output. # Useful for Docker-based deployment and terminals without colour support. -no-color = ["colored/no-color"] +no_color = ["colored/no-color"] [dependencies] iroha_data_model = { workspace = true, features = ["http"] } diff --git a/tools/parity_scale_decoder/README.md b/tools/parity_scale_decoder/README.md index a8390a6ef67..fc9fe7887f5 100644 --- a/tools/parity_scale_decoder/README.md +++ b/tools/parity_scale_decoder/README.md @@ -13,7 +13,7 @@ cargo build --bin parity_scale_decoder If your terminal does not support colours, run: ```bash -cargo build --features no-color --bin parity_scale_decoder +cargo build --features no_color --bin parity_scale_decoder ``` ## Usage @@ -66,9 +66,9 @@ Decode the data type from a given binary. ``` * If you are not sure which data type is encoded in the binary, run the tool without the `--type` option: - + ```bash - ./target/debug/parity_scale_decoder decode + ./target/debug/parity_scale_decoder decode ``` ### `decode` usage examples diff --git a/tools/parity_scale_decoder/src/main.rs b/tools/parity_scale_decoder/src/main.rs index 9bf3a824693..da17ddd92b9 100644 --- a/tools/parity_scale_decoder/src/main.rs +++ b/tools/parity_scale_decoder/src/main.rs @@ -21,6 +21,7 @@ use iroha_data_model::{ BlockHeader, BlockPayload, SignedBlock, SignedBlockV1, }, domain::NewDomain, + events::pipeline::{BlockEventFilter, TransactionEventFilter}, executor::Executor, ipfs::IpfsPath, isi::{ @@ -44,7 +45,9 @@ use iroha_data_model::{ }, ForwardCursor, QueryOutputBox, }, - transaction::{error::TransactionLimitError, SignedTransactionV1, TransactionLimits}, + transaction::{ + error::TransactionLimitError, SignedTransactionV1, TransactionLimits, TransactionPayload, + }, BatchedResponse, BatchedResponseV1, Level, }; use iroha_primitives::{ diff --git a/torii/src/event.rs b/torii/src/event.rs index 873f81d91ec..ee9d72757b0 100644 --- a/torii/src/event.rs +++ b/torii/src/event.rs @@ -44,7 +44,7 @@ pub type Result = core::result::Result; #[derive(Debug)] pub struct Consumer { stream: WebSocket, - filter: EventFilterBox, + filters: Vec, } impl Consumer { @@ -54,8 +54,8 @@ impl Consumer { /// Can fail due to timeout or without message at websocket or during decoding request #[iroha_futures::telemetry_future] pub async fn new(mut stream: WebSocket) -> Result { - let EventSubscriptionRequest(filter) = stream.recv().await?; - Ok(Consumer { stream, filter }) + let EventSubscriptionRequest(filters) = stream.recv().await?; + Ok(Consumer { stream, filters }) } /// Forwards the `event` over the `stream` if it matches the `filter`. @@ -63,8 +63,8 @@ impl Consumer { /// # Errors /// Can fail due to timeout or sending event. Also receiving might fail #[iroha_futures::telemetry_future] - pub async fn consume(&mut self, event: Event) -> Result<()> { - if !self.filter.matches(&event) { + pub async fn consume(&mut self, event: EventBox) -> Result<()> { + if !self.filters.iter().any(|filter| filter.matches(&event)) { return Ok(()); }