diff --git a/Cargo.lock b/Cargo.lock index 53f7d446e..030d59fb9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2854,6 +2854,7 @@ dependencies = [ "pallet-services-payment", "pallet-session", "pallet-staking", + "pallet-stream-payment", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -3987,6 +3988,7 @@ dependencies = [ "pallet-root-testing", "pallet-services-payment", "pallet-session", + "pallet-stream-payment", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -8729,6 +8731,30 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-stream-payment" +version = "0.1.0" +dependencies = [ + "dp-core", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "num-traits", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "serde", + "similar-asserts", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "tap", + "tp-maths", + "tp-traits", +] + [[package]] name = "pallet-sudo" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 77278b158..3a35b80c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ pallet-pooled-staking = { path = "pallets/pooled-staking", default-features = fa pallet-registrar = { path = "pallets/registrar", default-features = false } pallet-registrar-runtime-api = { path = "pallets/registrar/rpc/runtime-api", default-features = false } pallet-services-payment = { path = "pallets/services-payment", default-features = false } +pallet-stream-payment = { path = "pallets/stream-payment", default-features = false } container-chain-template-frontier-runtime = { path = "container-chains/templates/frontier/runtime", default-features = false } container-chain-template-simple-runtime = { path = "container-chains/templates/simple/runtime", default-features = false } @@ -47,6 +48,7 @@ tc-consensus = { path = "client/consensus" } tp-author-noting-inherent = { path = "primitives/author-noting-inherent", default-features = false } tp-consensus = { path = "primitives/consensus", default-features = false } tp-container-chain-genesis-data = { path = "primitives/container-chain-genesis-data", default-features = false } +tp-fungibles-ext = { path = "primitives/fungibles-ext", default-features = false } tp-maths = { path = "primitives/maths", default-features = false } tp-traits = { path = "primitives/traits", default-features = false } @@ -251,6 +253,7 @@ num_enum = { version = "0.7.1", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } serde = { version = "1.0.152", default-features = false } smallvec = "1.10.0" +tap = "1.0.1" # General (client) async-io = "1.3" diff --git a/client/consensus/src/collators.rs b/client/consensus/src/collators.rs index fdb6eb839..725e27939 100644 --- a/client/consensus/src/collators.rs +++ b/client/consensus/src/collators.rs @@ -16,36 +16,36 @@ pub mod basic; -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::ParachainCandidate; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, +use { + crate::{find_pre_digest, AuthorityId, OrchestratorAuraWorkerAuxData}, + cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface, + cumulus_client_consensus_common::ParachainCandidate, + cumulus_client_consensus_proposer::ProposerInterface, + cumulus_primitives_core::{ + relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, + }, + cumulus_primitives_parachain_inherent::ParachainInherentData, + cumulus_relay_chain_interface::RelayChainInterface, + futures::prelude::*, + nimbus_primitives::{CompatibleDigestItem as NimbusCompatibleDigestItem, NIMBUS_KEY_ID}, + parity_scale_codec::{Codec, Encode}, + polkadot_node_primitives::{Collation, MaybeCompressedPoV}, + polkadot_primitives::Id as ParaId, + sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}, + sp_application_crypto::{AppCrypto, AppPublic}, + sp_consensus::BlockOrigin, + sp_consensus_aura::{digests::CompatibleDigestItem, Slot}, + sp_core::crypto::{ByteArray, Pair}, + sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}, + sp_keystore::{Keystore, KeystorePtr}, + sp_runtime::{ + generic::Digest, + traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero}, + }, + sp_state_machine::StorageChanges, + sp_timestamp::Timestamp, + std::{convert::TryFrom, error::Error, time::Duration}, }; -use cumulus_primitives_parachain_inherent::ParachainInherentData; -use cumulus_relay_chain_interface::RelayChainInterface; -use parity_scale_codec::{Codec, Encode}; - -use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; -use polkadot_primitives::Id as ParaId; - -use crate::{find_pre_digest, AuthorityId, OrchestratorAuraWorkerAuxData}; -use futures::prelude::*; -use nimbus_primitives::{CompatibleDigestItem as NimbusCompatibleDigestItem, NIMBUS_KEY_ID}; -use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; -use sp_application_crypto::{AppCrypto, AppPublic}; -use sp_consensus::BlockOrigin; -use sp_consensus_aura::{digests::CompatibleDigestItem, Slot}; -use sp_core::crypto::{ByteArray, Pair}; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::{Keystore, KeystorePtr}; -use sp_runtime::{ - generic::Digest, - traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero}, -}; -use sp_state_machine::StorageChanges; -use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. pub struct Params { diff --git a/client/consensus/src/collators/basic.rs b/client/consensus/src/collators/basic.rs index b5999b69b..c51508d78 100644 --- a/client/consensus/src/collators/basic.rs +++ b/client/consensus/src/collators/basic.rs @@ -14,38 +14,39 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use cumulus_client_collator::{ - relay_chain_driven::CollationRequest, service::ServiceInterface as CollatorServiceInterface, +use { + crate::{ + collators as collator_util, consensus_orchestrator::RetrieveAuthoritiesFromOrchestrator, + OrchestratorAuraWorkerAuxData, + }, + cumulus_client_collator::{ + relay_chain_driven::CollationRequest, service::ServiceInterface as CollatorServiceInterface, + }, + cumulus_client_consensus_proposer::ProposerInterface, + cumulus_primitives_core::{ + relay_chain::{BlockId as RBlockId, Hash as PHash}, + PersistedValidationData, + }, + cumulus_relay_chain_interface::RelayChainInterface, + futures::{channel::mpsc::Receiver, prelude::*}, + parity_scale_codec::{Codec, Decode}, + polkadot_node_primitives::CollationResult, + polkadot_overseer::Handle as OverseerHandle, + polkadot_primitives::{CollatorPair, Id as ParaId}, + sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}, + sc_consensus::BlockImport, + sc_consensus_slots::InherentDataProviderExt, + sp_api::ProvideRuntimeApi, + sp_application_crypto::AppPublic, + sp_blockchain::HeaderBackend, + sp_consensus::SyncOracle, + sp_consensus_aura::SlotDuration, + sp_core::crypto::Pair, + sp_inherents::CreateInherentDataProviders, + sp_keystore::KeystorePtr, + sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}, + std::{convert::TryFrom, sync::Arc, time::Duration}, }; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_core::{ - relay_chain::{BlockId as RBlockId, Hash as PHash}, - PersistedValidationData, -}; -use cumulus_relay_chain_interface::RelayChainInterface; -use parity_scale_codec::{Codec, Decode}; - -use polkadot_node_primitives::CollationResult; -use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId}; - -use futures::{channel::mpsc::Receiver, prelude::*}; -use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; -use sc_consensus::BlockImport; -use sc_consensus_slots::InherentDataProviderExt; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppPublic; -use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; -use sp_consensus_aura::SlotDuration; -use sp_core::crypto::Pair; -use sp_inherents::CreateInherentDataProviders; -use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; -use std::{convert::TryFrom, sync::Arc, time::Duration}; - -use crate::consensus_orchestrator::RetrieveAuthoritiesFromOrchestrator; -use crate::{collators as collator_util, OrchestratorAuraWorkerAuxData}; /// Parameters for [`run`]. pub struct Params { diff --git a/client/consensus/src/consensus_orchestrator.rs b/client/consensus/src/consensus_orchestrator.rs index 3e22dc01e..1999317eb 100644 --- a/client/consensus/src/consensus_orchestrator.rs +++ b/client/consensus/src/consensus_orchestrator.rs @@ -21,9 +21,7 @@ //! the ParachainConsensus trait to access the orchestrator-dicated authorities, and further //! it implements the TanssiWorker to TanssiOnSlot trait. This trait is use { - crate::AuthorityId, - crate::Pair, - crate::Slot, + crate::{AuthorityId, Pair, Slot}, sc_consensus_slots::{SimpleSlotWorker, SlotInfo, SlotResult}, sp_consensus::Proposer, sp_runtime::traits::Block as BlockT, diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs index cd8257b32..ead2e712e 100644 --- a/client/consensus/src/lib.rs +++ b/client/consensus/src/lib.rs @@ -20,18 +20,15 @@ //! slot_author returns the author based on the slot number and authorities provided (aura-like) //! authorities retrieves the current set of authorities based on the first eligible key found in the keystore -use {sp_consensus_slots::Slot, sp_core::crypto::Pair}; - pub mod collators; mod consensus_orchestrator; mod manual_seal; + #[cfg(test)] mod tests; -pub use crate::consensus_orchestrator::OrchestratorAuraWorkerAuxData; -pub use sc_consensus_aura::CompatibilityMode; - pub use { + crate::consensus_orchestrator::OrchestratorAuraWorkerAuxData, cumulus_primitives_core::ParaId, manual_seal::{ get_aura_id_from_seed, ContainerManualSealAuraConsensusDataProvider, @@ -39,8 +36,10 @@ pub use { }, pallet_registrar_runtime_api::OnDemandBlockProductionApi, parity_scale_codec::{Decode, Encode}, - sc_consensus_aura::find_pre_digest, - sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}, + sc_consensus_aura::{ + find_pre_digest, slot_duration, AuraVerifier, BuildAuraWorkerParams, CompatibilityMode, + SlotProportion, + }, sc_consensus_slots::InherentDataProviderExt, sp_api::{Core, ProvideRuntimeApi}, sp_application_crypto::AppPublic, @@ -51,6 +50,9 @@ pub use { std::hash::Hash, tp_consensus::TanssiAuthorityAssignmentApi, }; + +use {sp_consensus_slots::Slot, sp_core::crypto::Pair}; + const LOG_TARGET: &str = "aura::tanssi"; type AuthorityId

=

::Public; diff --git a/pallets/collator-assignment/src/assignment.rs b/pallets/collator-assignment/src/assignment.rs index fb17885b6..2c8734cf5 100644 --- a/pallets/collator-assignment/src/assignment.rs +++ b/pallets/collator-assignment/src/assignment.rs @@ -20,12 +20,16 @@ use { cmp, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, marker::PhantomData, - mem, vec, + mem, vec::Vec, }, tp_traits::{ParaId, RemoveInvulnerables as RemoveInvulnerablesT}, }; +// Separate import of `sp_std::vec!` macro, which cause issues with rustfmt if grouped +// with `sp_std::vec::Vec`. +use sp_std::vec; + /// Helper methods to implement collator assignment algorithm pub struct Assignment(PhantomData); diff --git a/pallets/registrar/src/lib.rs b/pallets/registrar/src/lib.rs index 9bc52c405..52b07b0d6 100644 --- a/pallets/registrar/src/lib.rs +++ b/pallets/registrar/src/lib.rs @@ -59,8 +59,7 @@ use { #[frame_support::pallet] pub mod pallet { - use super::*; - use tp_traits::SessionContainerChains; + use {super::*, tp_traits::SessionContainerChains}; #[pallet::pallet] #[pallet::without_storage_info] diff --git a/pallets/stream-payment/Cargo.toml b/pallets/stream-payment/Cargo.toml new file mode 100644 index 000000000..8ea4e83ad --- /dev/null +++ b/pallets/stream-payment/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "pallet-stream-payment" +authors = { workspace = true } +description = "Stream payment pallet" +edition = "2021" +license = "GPL-3.0-only" +version = "0.1.0" + +[package.metadata.docs.rs] +targets = [ "x86_64-unknown-linux-gnu" ] + +[dependencies] +log = { workspace = true } +serde = { workspace = true, optional = true } + +dp-core = { workspace = true } +tp-maths = { workspace = true } +tp-traits = { workspace = true } + +# Substrate +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[dev-dependencies] +num-traits = { workspace = true } +pallet-balances = { workspace = true, features = [ "std" ] } +similar-asserts = { workspace = true } +sp-io = { workspace = true, features = [ "std" ] } +tap = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "dp-core/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-balances/std", + "parity-scale-codec/std", + "scale-info/std", + "serde", + "serde?/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "tp-maths/std", + "tp-traits/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "tp-maths/runtime-benchmarks", + "tp-traits/runtime-benchmarks", +] diff --git a/pallets/stream-payment/README.md b/pallets/stream-payment/README.md new file mode 100644 index 000000000..c85fd904f --- /dev/null +++ b/pallets/stream-payment/README.md @@ -0,0 +1,57 @@ +# Stream payment pallet + +A pallet to create payment streams, where users can setup recurrent payment at some rate per unit of +time. The pallet aims to be configurable and usage agnostic: + +- Runtime configures which assets are supported by providing an `AssetId` type and a type + implementing the `Assets` trait which only requires function needed by the pallet (increase + deposit when creating or refilling a stream, decrease deposit when closing a stream, and + transferring a deposit when the stream payment is performed). Both types allows to easily add new + supported assets in the future while being retro-compatible. The pallet make few assumptions about + how the funds are deposited (thanks to the custom trait), which should allow to easily support + assets from various pallets/sources. +- Runtime configure which unit of time is supported to express the rate of payment. Units of time + should be monotonically increasing. Users can then choose which unit of time they want to use. + +The pallet provides the following calls: +- `open_stream(target, time_unit, asset_id, rate, initial_deposit)`: The origin creates a stream + towards a target (payee), with given time unit, asset and rate. A deposit is made, which is able + to pay for `initial_deposit / rate`. Streams are indexed using a `StreamId` which is returned with + an event. +- `perform_payment(stream_id)`: can be called by anyone to update a stream, performing the payment + for the elapsed time since the last update. All other calls implicitly call `perform_payment`, + such that at any point in time you're guaranteed you'll be able to redeem the payment for the + elapsed time; which allow to call it only when the funds are needed without fear of non-payment. +- `close_stream(stream_id)`: only callable by the source or target of the stream. It pays for the + elapsed time then refund the remaining deposit to the source. +- `immediately_change_deposit(stream_id, asset_id, change)`: Change the deposit in the stream. It + first perform a payment before applying the change, which means a source will not retro-actively + pay for a drained stream. A target that provides services in exchange for payment should suspend + the service as soon as updating the stream would make it drain, and should resume services once + the stream is refilled. The call takes an asset id which must match the config asset id, which + prevents unwanted amounts when a change request that changes the asset is accepted. +- `request_change(stream_id, kind, new_config, deposit_change)`: Allows to request changing the + config of the stream. `kind` states if the change is a mere suggestion or is mandatory, in which + case there is a provided deadline at which point payments will no longer occur. Requests that + don't change the time unit or asset id and change the rate at a disadvantage for the caller is + applied immediately. An existing request can be overritten by both parties if it was a suggestion, + while only by the previous requester if it was mandatory. A nonce is increased to prevent to + prevent one to frontrunner the acceptation of a request with another request. The target of the + stream cannot provide a deposit change, while the source can. It is however mandatory to provide + change with absolute value when changing asset. +- `accept_requested_change(stream_id, request_nonce, deposit_change)`: Accept the change for this + stream id and request nonce. If one want to refuse a change they can either leave it as is (which + will do nothing if the request is a suggestion, or stop payment when reaching the deadline if + mandatory) or close the stream with `close_stream`. The target of the stream cannot provide a + deposit change, while the source can. It is however mandatory to provide change with absolute + value when changing asset. +- `cancel_change_request(stream_id)`: Cancel a change request, only callable by the requester of a + previous request. + +For UIs the pallet provides the following storages: +- `Streams: StreamId => Stream`: stream data indexed by stream id. +- `LookupStreamsWithSource: AccountId => StreamId => ()`: allows to list allow the streams with a + given source by iterating over all storage keys with the key prefix corresponding to the account. +- `LookupStreamsWithTarget: AccountId => StreamId => ()`: same but for the target. Those last 2 + storages are solely for UIs to list incoming and outgoing streams. Key prefix is used to reduce + the POV cost that would require a single Vec of StreamId. \ No newline at end of file diff --git a/pallets/stream-payment/src/benchmarking.rs b/pallets/stream-payment/src/benchmarking.rs new file mode 100644 index 000000000..9d86be2b2 --- /dev/null +++ b/pallets/stream-payment/src/benchmarking.rs @@ -0,0 +1,437 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +use { + crate::{ + Assets, Call, ChangeKind, Config, DepositChange, Event, Pallet, Party, StreamConfig, + Streams, TimeProvider, + }, + frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}, + frame_support::{assert_ok, dispatch::RawOrigin}, + frame_system::EventRecord, + sp_std::vec, +}; + +/// Create a funded user. +fn create_funded_user( + string: &'static str, + n: u32, + asset_id: &T::AssetId, + // amount: T::Balance, +) -> T::AccountId { + const SEED: u32 = 0; + let user = account(string, n, SEED); + + // create a large amount that should be greater than ED + let amount: T::Balance = 1_000_000_000u32.into(); + let amount: T::Balance = amount * T::Balance::from(1_000_000_000u32); + T::Assets::bench_set_balance(asset_id, &user, amount); + user +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn open_stream() -> Result<(), BenchmarkError> { + let asset_id = T::Assets::bench_worst_case_asset_id(); + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + #[extrinsic_call] + _( + RawOrigin::Signed(source.clone()), + target, + StreamConfig { + time_unit, + asset_id, + rate: 100u32.into(), + }, + 1_000_000u32.into(), + ); + + assert_last_event::( + Event::StreamOpened { + stream_id: 0u32.into(), + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn close_stream() -> Result<(), BenchmarkError> { + // Worst case is closing a stream with a pending payment. + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target, + StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }, + initial_deposit, + )); + + // Change time to trigger payment. + let now = T::TimeProvider::now(&time_unit).expect("can fetch time"); + let delta: T::Balance = 10u32.into(); + T::TimeProvider::bench_set_now(now + delta); + + #[extrinsic_call] + _(RawOrigin::Signed(source.clone()), 0u32.into()); + + assert_last_event::( + Event::StreamClosed { + stream_id: 0u32.into(), + refunded: initial_deposit - (rate * delta), + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn perform_payment() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target.clone(), + StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }, + initial_deposit, + )); + + // Change time to trigger payment. + let now = T::TimeProvider::now(&time_unit).expect("can fetch time"); + let delta: T::Balance = 10u32.into(); + T::TimeProvider::bench_set_now(now + delta); + + #[extrinsic_call] + _(RawOrigin::Signed(source.clone()), 0u32.into()); + + assert_last_event::( + Event::StreamPayment { + stream_id: 0u32.into(), + source, + target, + amount: rate * delta, + drained: false, + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn request_change_immediate() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + let config = StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }; + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target, + config.clone(), + initial_deposit, + )); + + let new_config = StreamConfig { + rate: 101u32.into(), + ..config.clone() + }; + + #[extrinsic_call] + Pallet::::request_change( + RawOrigin::Signed(source.clone()), + 0u32.into(), + ChangeKind::Suggestion, + new_config.clone(), + Some(DepositChange::Increase(1_000u32.into())), + ); + + assert_last_event::( + Event::StreamConfigChanged { + stream_id: 0u32.into(), + old_config: config, + new_config: new_config, + deposit_change: Some(DepositChange::Increase(1_000u32.into())), + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn request_change_delayed() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + let asset_id2 = T::Assets::bench_worst_case_asset_id2(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + let config = StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }; + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target, + config.clone(), + initial_deposit, + )); + + // Change the asset id. In the case asset_id == asset_id2, we decrease the rate so that + // the request is not executed immediately. + let new_config = StreamConfig { + asset_id: asset_id2, + rate: 99u32.into(), + ..config.clone() + }; + + let stream_id = 0u32.into(); + + #[extrinsic_call] + Pallet::::request_change( + RawOrigin::Signed(source.clone()), + stream_id, + ChangeKind::Suggestion, + new_config.clone(), + Some(DepositChange::Absolute(500u32.into())), + ); + + assert_last_event::( + Event::StreamConfigChangeRequested { + stream_id, + request_nonce: 1, + requester: Party::Source, + old_config: config, + new_config, + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn accept_requested_change() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + let asset_id2 = T::Assets::bench_worst_case_asset_id2(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + let config = StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }; + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target.clone(), + config.clone(), + initial_deposit, + )); + + // Change the asset id. In the case asset_id == asset_id2, we decrease the rate so that + // the request is not executed immediately. + let new_config = StreamConfig { + asset_id: asset_id2, + rate: 99u32.into(), + ..config.clone() + }; + + assert_ok!(Pallet::::request_change( + RawOrigin::Signed(source.clone()).into(), + 0u32.into(), + ChangeKind::Suggestion, + new_config.clone(), + Some(DepositChange::Absolute(500u32.into())), + )); + + #[extrinsic_call] + _(RawOrigin::Signed(target.clone()), 0u32.into(), 1, None); + + assert_last_event::( + Event::StreamConfigChanged { + stream_id: 0u32.into(), + old_config: config, + new_config, + deposit_change: Some(DepositChange::Absolute(500u32.into())), + } + .into(), + ); + + Ok(()) + } + + #[benchmark] + fn cancel_change_request() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + let asset_id2 = T::Assets::bench_worst_case_asset_id2(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + let config = StreamConfig { + time_unit: time_unit.clone(), + asset_id, + rate, + }; + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target.clone(), + config.clone(), + initial_deposit, + )); + + // Change the asset id. In the case asset_id == asset_id2, we decrease the rate so that + // the request is not executed immediately. + let new_config = StreamConfig { + asset_id: asset_id2, + rate: 99u32.into(), + ..config.clone() + }; + + assert_ok!(Pallet::::request_change( + RawOrigin::Signed(source.clone()).into(), + 0u32.into(), + ChangeKind::Suggestion, + new_config.clone(), + Some(DepositChange::Absolute(500u32.into())), + )); + + #[extrinsic_call] + _(RawOrigin::Signed(source), 0u32.into()); + + let stream_id: T::StreamId = 0u32.into(); + assert!(Streams::::get(stream_id) + .expect("to be a stream") + .pending_request + .is_none()); + + Ok(()) + } + + #[benchmark] + fn immediately_change_deposit() -> Result<(), BenchmarkError> { + let time_unit = T::TimeProvider::bench_worst_case_time_unit(); + let asset_id = T::Assets::bench_worst_case_asset_id(); + + let source = create_funded_user::("source", 1, &asset_id); + let target = create_funded_user::("target", 2, &asset_id); + + let rate = 100u32.into(); + let initial_deposit = 1_000_000u32.into(); + let config = StreamConfig { + time_unit: time_unit.clone(), + asset_id: asset_id.clone(), + rate, + }; + + assert_ok!(Pallet::::open_stream( + RawOrigin::Signed(source.clone()).into(), + target.clone(), + config.clone(), + initial_deposit, + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(source), + 0u32.into(), + asset_id, + DepositChange::Absolute(500u32.into()), + ); + + assert_last_event::( + Event::StreamConfigChanged { + stream_id: 0u32.into(), + old_config: config.clone(), + new_config: config, + deposit_change: Some(DepositChange::Absolute(500u32.into())), + } + .into(), + ); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::default().build(), + crate::mock::Runtime, + ); +} diff --git a/pallets/stream-payment/src/lib.rs b/pallets/stream-payment/src/lib.rs new file mode 100644 index 000000000..9a6f01530 --- /dev/null +++ b/pallets/stream-payment/src/lib.rs @@ -0,0 +1,938 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub mod weights; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +use { + crate::weights::WeightInfo, + core::cmp::min, + frame_support::{ + dispatch::DispatchErrorWithPostInfo, + pallet, + pallet_prelude::*, + storage::types::{StorageDoubleMap, StorageMap}, + traits::tokens::Balance, + Blake2_128Concat, + }, + frame_system::pallet_prelude::*, + parity_scale_codec::{FullCodec, MaxEncodedLen}, + scale_info::TypeInfo, + sp_runtime::{ + traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedSub, One, Saturating, Zero}, + ArithmeticError, + }, + sp_std::{fmt::Debug, marker::PhantomData}, +}; + +pub use pallet::*; + +/// Type able to provide the current time for given unit. +/// For each unit the returned number should monotonically increase and not +/// overflow. +pub trait TimeProvider { + fn now(unit: &Unit) -> Option; + + /// Benchmarks: should return the time unit which has the worst performance calling + /// `TimeProvider::now(unit)` with. + #[cfg(feature = "runtime-benchmarks")] + fn bench_worst_case_time_unit() -> Unit; + + /// Benchmarks: sets the "now" time for time unit returned by `bench_worst_case_time_unit`. + #[cfg(feature = "runtime-benchmarks")] + fn bench_set_now(instant: Number); +} + +/// Interactions the pallet needs with assets. +pub trait Assets { + /// Transfer assets deposited by an account to another account. + /// Those assets should not be considered deposited in the target account. + fn transfer_deposit( + asset_id: &AssetId, + from: &AccountId, + to: &AccountId, + amount: Balance, + ) -> DispatchResult; + + /// Increase the deposit for an account and asset id. Should fail if account doesn't have + /// enough of that asset. Funds should be safe and not slashable. + fn increase_deposit(asset_id: &AssetId, account: &AccountId, amount: Balance) + -> DispatchResult; + + /// Decrease the deposit for an account and asset id. Should fail on underflow. + fn decrease_deposit(asset_id: &AssetId, account: &AccountId, amount: Balance) + -> DispatchResult; + + /// Return the deposit for given asset and account. + fn get_deposit(asset_id: &AssetId, account: &AccountId) -> Balance; + + /// Benchmarks: should return the asset id which has the worst performance when interacting + /// with it. + #[cfg(feature = "runtime-benchmarks")] + fn bench_worst_case_asset_id() -> AssetId; + + /// Benchmarks: should return the another asset id which has the worst performance when interacting + /// with it afther `bench_worst_case_asset_id`. This is to benchmark the worst case when changing config + /// from one asset to another. If there is only one asset id it is fine to return it in both + /// `bench_worst_case_asset_id` and `bench_worst_case_asset_id2`. + #[cfg(feature = "runtime-benchmarks")] + fn bench_worst_case_asset_id2() -> AssetId; + + /// Benchmarks: should set the balance. + #[cfg(feature = "runtime-benchmarks")] + fn bench_set_balance(asset_id: &AssetId, account: &AccountId, amount: Balance); +} + +#[pallet] +pub mod pallet { + use super::*; + + /// Pooled Staking pallet. + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Overarching event type + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Type used to represent stream ids. Should be large enough to not overflow. + type StreamId: AtLeast32BitUnsigned + + Default + + Debug + + Copy + + Clone + + FullCodec + + TypeInfo + + MaxEncodedLen; + + /// The balance type, which is also the type representing time (as this + /// pallet will do math with both time and balances to compute how + /// much should be paid). + type Balance: Balance; + + /// Type representing an asset id, a identifier allowing distinguishing assets. + type AssetId: Debug + Clone + FullCodec + TypeInfo + MaxEncodedLen + PartialEq + Eq; + + /// Provide interaction with assets. + type Assets: Assets; + + /// Represents which units of time can be used. Designed to be an enum + /// with a variant for each kind of time source/scale supported. + type TimeUnit: Debug + Clone + FullCodec + TypeInfo + MaxEncodedLen + Eq; + + /// Provide the current time in given unit. + type TimeProvider: TimeProvider; + + type WeightInfo: weights::WeightInfo; + } + + type AccountIdOf = ::AccountId; + type AssetIdOf = ::AssetId; + + pub type RequestNonce = u32; + + /// A stream payment from source to target. + /// Stores the last time the stream was updated, which allows to compute + /// elapsed time and perform payment. + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[derive(RuntimeDebug, PartialEq, Eq, Encode, Decode, Clone, TypeInfo)] + pub struct Stream { + /// Payer, source of the stream. + pub source: AccountId, + /// Payee, target of the stream. + pub target: AccountId, + /// Steam config (time unit, asset id, rate) + pub config: StreamConfig, + /// How much is deposited to fund this stream. + pub deposit: Balance, + /// Last time the stream was updated in `config.time_unit`. + pub last_time_updated: Balance, + /// Nonce for requests. This prevents a request to make a first request + /// then change it to another request to frontrun the other party + /// accepting. + pub request_nonce: RequestNonce, + /// A pending change request if any. + pub pending_request: Option>, + } + + impl Stream { + pub fn account_to_party(&self, account: AccountId) -> Option { + match account { + a if a == self.source => Some(Party::Source), + a if a == self.target => Some(Party::Target), + _ => None, + } + } + } + + /// Stream configuration. + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[derive(RuntimeDebug, PartialEq, Eq, Encode, Decode, Copy, Clone, TypeInfo)] + pub struct StreamConfig { + /// Unit in which time is measured using a `TimeProvider`. + pub time_unit: Unit, + /// Asset used for payment. + pub asset_id: AssetId, + /// Amount of asset / unit. + pub rate: Balance, + } + + /// Origin of a change request. + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[derive(RuntimeDebug, PartialEq, Eq, Encode, Decode, Copy, Clone, TypeInfo)] + pub enum Party { + Source, + Target, + } + + impl Party { + pub fn inverse(self) -> Self { + match self { + Party::Source => Party::Target, + Party::Target => Party::Source, + } + } + } + + /// Kind of change requested. + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[derive(RuntimeDebug, PartialEq, Eq, Encode, Decode, Copy, Clone, TypeInfo)] + pub enum ChangeKind