diff --git a/Cargo.lock b/Cargo.lock index 6520afb0f7b2e..4b4949f6a884e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -328,12 +328,14 @@ dependencies = [ name = "chainx-pool" version = "0.1.0" dependencies = [ + "chainx-api 0.1.0", "chainx-executor 0.1.0", "chainx-primitives 0.1.0", "chainx-runtime 0.1.0", "ed25519 0.1.0 (git+https://github.com/chainx-org/substrate)", "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "substrate-client 0.1.0 (git+https://github.com/chainx-org/substrate)", "substrate-client-db 0.1.0 (git+https://github.com/chainx-org/substrate)", "substrate-codec 0.1.0 (git+https://github.com/chainx-org/substrate)", @@ -423,6 +425,7 @@ dependencies = [ name = "chainx-test" version = "0.1.0" dependencies = [ + "chainx-api 0.1.0", "chainx-pool 0.1.0", "chainx-primitives 0.1.0", "chainx-runtime 0.1.0", diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index b7aa2bfd9dd90..905f40b03e7e0 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -32,29 +32,28 @@ mod evaluation; mod service; mod error; +use chainx_primitives::{CandidateReceipt, BlockId, Hash, Block, Header, AccountId, BlockNumber, Timestamp, SessionKey}; +use dynamic_inclusion::DynamicInclusion; use tokio::timer::{Delay, Interval}; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; +use codec::{Decode, Encode}; +use primitives::AuthorityId; +use chainx_api::ChainXApi; use parking_lot::RwLock; use futures::prelude::*; use futures::future; use std::sync::Arc; -use codec::{Decode, Encode}; -use primitives::AuthorityId; - -use chainx_primitives::{CandidateReceipt, BlockId, Hash, Block, Header, AccountId, BlockNumber, Timestamp, SessionKey}; -use chainx_api::ChainXApi; - +type TransactionPool = substrate_extrinsic_pool::Pool>; pub use self::offline_tracker::OfflineTracker; -use dynamic_inclusion::DynamicInclusion; pub use self::error::{ErrorKind, Error}; pub use service::Service; -pub type TransactionPool = substrate_extrinsic_pool::Pool; /// Shared offline validator tracker. pub type SharedOfflineTracker = Arc>; + // block size limit. const MAX_TRANSACTIONS_SIZE: usize = 4 * 1024 * 1024; @@ -83,7 +82,7 @@ where /// The client instance. pub client: Arc

, /// transaction pool, - pub transaction_pool: Arc, + pub transaction_pool: Arc>, /// The backing network handle. pub network: N, /// handle to remote task executor @@ -146,7 +145,9 @@ where } /// The ChainX proposer logic. -pub struct Proposer { +pub struct Proposer where + C: ChainXApi + Send + Sync, +{ client: Arc, dynamic_inclusion: DynamicInclusion, local_key: Arc, @@ -154,7 +155,7 @@ pub struct Proposer { parent_id: BlockId, parent_number: BlockNumber, random_seed: Hash, - transaction_pool: Arc, + transaction_pool: Arc>, offline: SharedOfflineTracker, validators: Vec, } @@ -384,12 +385,13 @@ impl ProposalTiming { } /// Future which resolves upon the creation of a proposal. -pub struct CreateProposal { +pub struct CreateProposal where +{ parent_hash: Hash, parent_number: BlockNumber, parent_id: BlockId, client: Arc, - transaction_pool: Arc, + transaction_pool: Arc>, timing: ProposalTiming, validators: Vec, offline: SharedOfflineTracker, diff --git a/consensus/src/service.rs b/consensus/src/service.rs index 5a3065010d32f..ff96c3ed3b56c 100644 --- a/consensus/src/service.rs +++ b/consensus/src/service.rs @@ -110,7 +110,7 @@ impl Service { client: Arc, api: Arc, network: N, - transaction_pool: Arc, + transaction_pool: Arc>, thread_pool: ThreadPoolHandle, key: ed25519::Pair, ) -> Service diff --git a/pool/Cargo.toml b/pool/Cargo.toml index ff4db42cb28f5..af7fde8f7d7c8 100644 --- a/pool/Cargo.toml +++ b/pool/Cargo.toml @@ -19,9 +19,12 @@ substrate-client-db = { git = "https://github.com/chainx-org/substrate" } chainx-primitives = { path = "../primitives" } chainx-runtime = { path = "../runtime" } chainx-executor = {path = "../executor"} +chainx-api = { path = "../api" } triehash = { version = "0.2.3" } error-chain = "0.12" hex-literal = "0.1" +log = "0.3" + [dev-dependencies] substrate-keyring = { git = "https://github.com/chainx-org/substrate" } substrate-runtime-primitives = { git = "https://github.com/chainx-org/substrate" } diff --git a/pool/src/error.rs b/pool/src/error.rs new file mode 100644 index 0000000000000..7eedfde30e561 --- /dev/null +++ b/pool/src/error.rs @@ -0,0 +1,58 @@ +use chainx_runtime::{Address, UncheckedExtrinsic}; +use chainx_primitives::Hash; +use extrinsic_pool; +use chainx_api; + + +error_chain! { + links { + Pool(extrinsic_pool::Error, extrinsic_pool::ErrorKind); + Api(chainx_api::Error, chainx_api::ErrorKind); + } + errors { + /// Unexpected extrinsic format submitted + InvalidExtrinsicFormat { + description("Invalid extrinsic format."), + display("Invalid extrinsic format."), + } + /// Attempted to queue an inherent transaction. + IsInherent(xt: UncheckedExtrinsic) { + description("Inherent transactions cannot be queued."), + display("Inherent transactions cannot be queued."), + } + /// Attempted to queue a transaction with bad signature. + BadSignature(e: &'static str) { + description("Transaction had bad signature."), + display("Transaction had bad signature: {}", e), + } + /// Attempted to queue a transaction that is already in the pool. + AlreadyImported(hash: Hash) { + description("Transaction is already in the pool."), + display("Transaction {:?} is already in the pool.", hash), + } + /// Import error. + Import(err: Box<::std::error::Error + Send>) { + description("Error importing transaction"), + display("Error importing transaction: {}", err.description()), + } + /// Runtime failure. + UnrecognisedAddress(who: Address) { + description("Unrecognised address in extrinsic"), + display("Unrecognised address in extrinsic: {}", who), + } + /// Extrinsic too large + TooLarge(got: usize, max: usize) { + description("Extrinsic too large"), + display("Extrinsic is too large ({} > {})", got, max), + } + } +} + +impl extrinsic_pool::IntoPoolError for Error { + fn into_pool_error(self) -> ::std::result::Result { + match self { + Error(ErrorKind::Pool(e), c) => Ok(extrinsic_pool::Error(e, c)), + e => Err(e), + } + } +} diff --git a/pool/src/lib.rs b/pool/src/lib.rs index c3de160344b83..c523f1c493aba 100644 --- a/pool/src/lib.rs +++ b/pool/src/lib.rs @@ -1,19 +1,27 @@ // Copyright 2018 Chainpool. -extern crate substrate_codec as codec; extern crate substrate_runtime_primitives as runtime_primitives; extern crate substrate_primitives as substrate_primitives; +extern crate substrate_extrinsic_pool as extrinsic_pool; +extern crate substrate_codec as codec; extern crate substrate_client_db; -extern crate chainx_primitives; -extern crate chainx_runtime; -extern crate substrate_network; -extern crate chainx_executor; extern crate substrate_executor; -extern crate substrate_extrinsic_pool as extrinsic_pool; +extern crate substrate_network; extern crate substrate_client; +extern crate chainx_primitives; +extern crate chainx_executor; +extern crate chainx_runtime; +extern crate chainx_api; extern crate ed25519; +#[macro_use] +extern crate error_chain; +#[macro_use] +extern crate log; + mod pool; +mod error; pub use pool::TransactionPool; pub use pool::PoolApi; + diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 2db6dc79976f3..b2995bac46dcb 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -1,29 +1,35 @@ // Copyright 2018 Chainpool. -use std::{cmp::Ordering, collections::HashMap, sync::Arc}; - use extrinsic_pool::{Pool, ChainApi, VerifiedFor, ExtrinsicFor, scoring, - Readiness, VerifiedTransaction, Transaction, Error, ErrorKind, Options, - scoring::Choice}; -use runtime_primitives::traits::{Hash as HashT, BlakeTwo256}; -use codec::{Encode, Decode}; + Readiness, VerifiedTransaction, Transaction, Options, scoring::Choice}; +use runtime_primitives::traits::{Hash as HashT, Bounded, Checkable, BlakeTwo256}; +use std::{cmp::Ordering, collections::HashMap, sync::Arc}; +use chainx_primitives::{Block, Hash, BlockId, AccountId, Index}; +use chainx_runtime::{Address, UncheckedExtrinsic}; +use substrate_executor::NativeExecutor; use substrate_client::{self, Client}; +use extrinsic_pool::IntoPoolError; +use codec::{Encode, Decode}; +use chainx_api::ChainXApi; use substrate_client_db; use substrate_network; -use substrate_executor::NativeExecutor; - -use chainx_primitives::{Block, Hash, BlockId, AccountId}; -use chainx_runtime::UncheckedExtrinsic; use chainx_executor; +use extrinsic_pool; + +type CheckedExtrinsic = ::std::result::Result>>::Checked; +type Executor = substrate_client::LocalCallExecutor>; +type Backend = substrate_client_db::Backend; +use error::{Error, ErrorKind}; -pub type Backend = substrate_client_db::Backend; -pub type Executor = substrate_client::LocalCallExecutor>; +const MAX_TRANSACTION_SIZE: usize = 4 * 1024 * 1024; #[derive(Debug, Clone)] pub struct VerifiedExtrinsic { - sender: Hash, + inner: Option, + sender: Option, hash: Hash, encoded_size: usize, + index: Index, } impl VerifiedExtrinsic { @@ -36,14 +42,22 @@ impl VerifiedExtrinsic { self.encoded_size } /// Get the account ID of the sender of this transaction. - pub fn sender(&self) -> Option { - Some(self.sender) + pub fn sender(&self) -> Option { + self.sender + } + /// Get the account ID of the sender of this transaction. + pub fn index(&self) -> Index { + self.index + } + /// Returns `true` if the transaction is not yet fully verified. + pub fn is_fully_verified(&self) -> bool { + self.inner.is_some() } } impl VerifiedTransaction for VerifiedExtrinsic { type Hash = Hash; - type Sender = Hash; + type Sender = Option; fn hash(&self) -> &Self::Hash { &self.hash @@ -58,83 +72,183 @@ impl VerifiedTransaction for VerifiedExtrinsic { } } -pub struct PoolApi; -impl PoolApi { - pub fn default() -> Self { - PoolApi +pub struct PoolApi{ + api:Arc, +} + +impl PoolApi where + A: ChainXApi, +{ + const NO_ACCOUNT: &'static str = "Account not found."; + + /// Create a new instance. + pub fn new(api: Arc) -> Self { + PoolApi { + api, + } + } + + fn lookup(&self, at: &BlockId, address: Address) -> ::std::result::Result { + // TODO [ToDr] Consider introducing a cache for this. + match self.api.lookup(at, address.clone()) { + Ok(Some(address)) => Ok(address), + Ok(None) => Err(Self::NO_ACCOUNT.into()), + Err(e) => { + println!("Error looking up address: {:?}: {:?}", address, e); + Err("API error.") + }, + } } } -impl ChainApi for PoolApi { - type Block = Block; - type Hash = Hash; - type Sender = AccountId; - type VEx = VerifiedExtrinsic; +impl ChainApi for PoolApi where + A: ChainXApi + Send + Sync, +{ type Ready = HashMap; + type Sender = Option; + type VEx = VerifiedExtrinsic; + type Block = Block; type Error = Error; + type Hash = Hash; type Score = u64; type Event = (); fn verify_transaction( &self, - _at: &BlockId, - uxt: &ExtrinsicFor, + at: &BlockId, + xt: &ExtrinsicFor, ) -> Result { - let encoded = uxt.encode(); - let (encoded_size, hash) = (uxt.len(), BlakeTwo256::hash(&encoded)); - Ok(VerifiedExtrinsic{ - sender:hash, + + let encoded = xt.encode(); + let uxt = UncheckedExtrinsic::decode(&mut encoded.as_slice()).ok_or_else(|| ErrorKind::InvalidExtrinsicFormat)?; + + if !uxt.is_signed() { + bail!(ErrorKind::IsInherent(uxt)) + } + + let (encoded_size, hash) = (encoded.len(), BlakeTwo256::hash(&encoded)); + if encoded_size > MAX_TRANSACTION_SIZE { + bail!(ErrorKind::TooLarge(encoded_size, MAX_TRANSACTION_SIZE)); + } + + debug!(target: "transaction-pool", "Transaction submitted: {}", ::substrate_primitives::hexdisplay::HexDisplay::from(&encoded)); + let inner = match uxt.clone().check_with(|a| self.lookup(at, a)) { + Ok(xt) => Some(xt), + // keep the transaction around in the future pool and attempt to promote it later. + Err(Self::NO_ACCOUNT) => None, + Err(e) => bail!(e), + }; + let sender = inner.as_ref().map(|x| x.signed.clone()); + + if encoded_size < 1024 { + debug!(target: "transaction-pool", "Transaction verified: {} => {:?}", hash, uxt); + } else { + debug!(target: "transaction-pool", "Transaction verified: {} ({} bytes is too large to display)", hash, encoded_size); + } + + Ok(VerifiedExtrinsic { + index: uxt.extrinsic.index, + inner, + sender, hash, encoded_size, - } - ) + }) } fn ready(&self) -> Self::Ready { - HashMap::default() } - fn is_ready( &self, - _at: &BlockId, - _nonce_cache: &mut Self::Ready, - _xt: &VerifiedFor, + at: &BlockId, + nonce_cache: &mut Self::Ready, + xt: &VerifiedFor, ) -> Readiness { - Readiness::Ready + let sender = match xt.verified.sender() { + Some(sender) => sender, + None => return Readiness::Future + }; + + trace!(target: "transaction-pool", "Checking readiness of {} (from {})", xt.verified.hash, Hash::from(sender)); + let api = &self.api; + let s = api.index(at, sender).ok().unwrap_or_else(Bounded::max_value) as u64; + let next_index = nonce_cache.entry(sender).or_insert_with(|| s); + let tmp = *next_index as u32; + trace!(target: "transaction-pool", "Next index for sender is {}; xt index is {}", next_index, xt.verified.index); + + let result = match xt.verified.index.cmp(&tmp) { + Ordering::Greater => Readiness::Future, + Ordering::Equal => Readiness::Ready, + // TODO [ToDr] Should mark transactions referencing too old blockhash as `Stale` as well. + Ordering::Less => Readiness::Stale, + }; + + // remember to increment `next_index` + *next_index = next_index.saturating_add(1); + result } - fn compare(_old: &VerifiedFor, _other: &VerifiedFor) -> Ordering { - Ordering::Equal + fn compare(old: &VerifiedFor, other: &VerifiedFor) -> Ordering { + old.verified.index().cmp(&other.verified.index()) } - fn choose(_old: &VerifiedFor, _new: &VerifiedFor) -> scoring::Choice { + fn choose(old: &VerifiedFor, new: &VerifiedFor) -> scoring::Choice { + if old.verified.is_fully_verified() { + assert!(new.verified.is_fully_verified(), "Scoring::choose called with transactions from different senders"); + if old.verified.index() == new.verified.index() { + return Choice::ReplaceOld; + } + } + + // This will keep both transactions, even though they have the same indices. + // It's fine for not fully verified transactions, we might also allow it for + // verified transactions but it would mean that only one of the two is actually valid + // (most likely the first to be included in the block). Choice::InsertNew } fn update_scores( - _xts: &[Transaction>], - _scores: &mut [Self::Score], + xts: &[Transaction>], + scores: &mut [Self::Score], _change: scoring::Change<()>, - ) {} + ) { + for i in 0..xts.len() { + if !xts[i].verified.is_fully_verified() { + scores[i] = 0; + } else { + // all the same score since there are no fees. + // TODO: prioritize things like misbehavior or fishermen reports + scores[i] = 1; + } + } + } - fn should_replace(_old: &VerifiedFor, _new: &VerifiedFor) -> scoring::Choice { - Choice::InsertNew + fn should_replace(old: &VerifiedFor, _new: &VerifiedFor) -> scoring::Choice { + if old.verified.is_fully_verified() { + // Don't allow new transactions if we are reaching the limit. + Choice::RejectNew + } else { + // Always replace not fully verified transactions. + Choice::ReplaceOld + } } } - -pub struct TransactionPool { - inner: Arc>, +pub struct TransactionPool where + A: ChainXApi + Send + Sync, +{ + inner: Arc>>, client: Arc>, } -impl TransactionPool { +impl TransactionPool where + A: ChainXApi + Send + Sync, +{ /// Create a new transaction pool. pub fn new( options: Options, - api: PoolApi, + api: PoolApi, client: Arc>, ) -> Self { TransactionPool { @@ -150,13 +264,15 @@ impl TransactionPool { .ok() } - pub fn inner(&self) -> Arc> { + pub fn inner(&self) -> Arc>> { self.inner.clone() } } -impl substrate_network::TransactionPool for TransactionPool { - fn transactions(&self) -> Vec<(Hash, ExtrinsicFor)> { +impl substrate_network::TransactionPool for TransactionPool where + A: ChainXApi + Send + Sync, +{ + fn transactions(&self) -> Vec<(Hash, ExtrinsicFor>)> { let best_block_id = match self.best_block_id() { Some(id) => id, None => return vec![], @@ -166,43 +282,42 @@ impl substrate_network::TransactionPool for TransactionPool { pending .map(|t| { let hash = t.hash().clone(); - let ex:ExtrinsicFor = t.original.clone(); + let ex:ExtrinsicFor> = t.original.clone(); (hash, ex) }) .collect() }) - .unwrap_or_else(|_e| { - //warn!("Error retrieving pending set: {}", e); + .unwrap_or_else(|e| { + warn!("Error retrieving pending set: {}", e); vec![] }) } - fn import(&self, transaction: &ExtrinsicFor) -> Option { - match UncheckedExtrinsic::decode(&mut &transaction[..]) { - Some(_) => { - let best_block_id = self.best_block_id()?; - match self.inner.submit_one(&best_block_id, transaction.clone()) { - Ok(xt) => Some(*xt.hash()), + fn import(&self, transaction: &ExtrinsicFor>) -> Option { + let encoded = transaction.encode(); + if let Some(uxt) = Decode::decode(&mut &encoded[..]) { + let best_block_id = self.best_block_id()?; + match self.inner.submit_one(&best_block_id, uxt) { + Ok(xt) => Some(*xt.hash()), + Err(e) => match e.into_pool_error() { + Ok(e) => match e.kind() { + extrinsic_pool::ErrorKind::AlreadyImported(hash) => + Some(::std::str::FromStr::from_str(&hash).map_err(|_| {}) + .expect("Hash string is always valid")), + _ => { + debug!("Error adding transaction to the pool: {:?}", e); + None + }, + }, Err(e) => { - match e.kind() { - ErrorKind::AlreadyImported(hash) => Some( - ::std::str::FromStr::from_str(&hash) - .map_err(|_| {}) - .expect("Hash string is always valid"), - ), - _ => { - //debug!("Error adding transaction to the pool: {:?}", e); - None - } - } + debug!("Error converting pool error: {:?}", e); + None } } - }, - - None => { - //debug!("Error decoding transaction"); - None } + } else { + debug!("Error decoding transaction"); + None } } diff --git a/src/main.rs b/src/main.rs index 88b3cd9f2b1e9..e04dd0ee79d8f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,6 +46,7 @@ use substrate_client::BlockchainEvents; use chainx_network::consensus::ConsensusNetwork; use chainx_pool::{PoolApi, TransactionPool}; use chainx_primitives::{Block, Hash}; +use chainx_api::TClient; use cli::ChainSpec; use std::sync::Arc; @@ -94,10 +95,10 @@ fn main() { let task_executor = runtime.executor(); let extrinsic_pool = Arc::new(TransactionPool::new( - Default::default(), - PoolApi::default(), - client.clone(), - )); + Default::default(), + PoolApi::new( client.clone() as Arc ), + client.clone(), + )); let validator_mode = matches.subcommand_matches("validator").is_some(); let network = network::build_network( diff --git a/src/rpc.rs b/src/rpc.rs index 816cbc0b4806c..725133881d6d7 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,63 +1,55 @@ // Copyright 2018 chainpool -use chainx_api::TClient; +use jsonrpc_http_server::Server as HttpServer; +use jsonrpc_ws_server::Server as WsServer; +use chainx_rpc::chainext::ChainExt; +use rpc_server::apis::chain::Chain; use chainx_pool::TransactionPool; +use tokio::runtime::TaskExecutor; +use chainx_api::ChainXApi; +use chainx_api::TClient; use chainx_primitives; use chainx_rpc; -use chainx_rpc::chainext::ChainExt; -use clap; -use cli; -use jsonrpc_http_server::Server as HttpServer; -use jsonrpc_ws_server::Server as WsServer; use rpc_server; -use rpc_server::apis::chain::Chain; use std::io; -use tokio::runtime::TaskExecutor; use Arc; +use clap; +use cli; -pub fn start( +pub fn start( client: &Arc, task_executor: &TaskExecutor, matches: &clap::ArgMatches, - extrinsic_pool: &Arc, -) -> ( - Result, io::Error>, - Result, io::Error>, -) { - let handler = || { - let chain = Chain::new(client.clone(), task_executor.clone()); - let chain_ext = ChainExt::new(client.clone(), task_executor.clone()); - let state = rpc_server::apis::state::State::new(client.clone(), task_executor.clone()); - let author = rpc_server::apis::author::Author::new( - client.clone(), - extrinsic_pool.inner().clone(), - task_executor.clone(), - ); - chainx_rpc::servers::rpc_handler::< - chainx_primitives::Block, - chainx_primitives::Hash, - _, - _, - _, - _, - _, - _, - >( - state, - chain, - chain_ext, - author, - chainx_rpc::default_rpc_config(), - ) - }; - let rpc_interface: &str = "127.0.0.1"; - let ws_interface: &str = "127.0.0.1"; - let rpc_http_addr = Some( - cli::parse_address(&format!("{}:{}", rpc_interface, 8081), "rpc-port", &matches).unwrap(), - ); - let rpc_ws_addr = Some( - cli::parse_address(&format!("{}:{}", ws_interface, 8082), "ws-port", &matches).unwrap(), - ); + extrinsic_pool: &Arc>, + ) -> ( + Result, io::Error>, + Result, io::Error>, + ) where + A: ChainXApi + Send + Sync + 'static, +{ + let handler = || { + let chain = rpc_server::apis::chain::Chain::new(client.clone(), task_executor.clone()); + let state = rpc_server::apis::state::State::new(client.clone(), task_executor.clone()); + let author = rpc_server::apis::author::Author::new( + client.clone(), + extrinsic_pool.inner().clone(), + task_executor.clone(), + ); + rpc_server::rpc_handler::( + state, + chain, + author, + chainx_rpc::default_rpc_config(), + ) + }; + let rpc_interface: &str = "127.0.0.1"; + let ws_interface: &str = "127.0.0.1"; + let rpc_http_addr = Some( + cli::parse_address(&format!("{}:{}", rpc_interface, 8081), "rpc-port", &matches).unwrap(), + ); + let rpc_ws_addr = Some( + cli::parse_address(&format!("{}:{}", ws_interface, 8082), "ws-port", &matches).unwrap(), + ); let rpc_http: Result, io::Error> = chainx_rpc::maybe_start_server(rpc_http_addr, |address| { diff --git a/test/Cargo.toml b/test/Cargo.toml index 8ee4fc054cacc..eefc26ea2ec8c 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -13,3 +13,4 @@ substrate-codec = { git = "https://github.com/chainx-org/substrate" } chainx-primitives = { path = "../primitives" } chainx-runtime = { path = "../runtime" } chainx-pool = { path = "../pool" } +chainx-api = { path = "../api" } \ No newline at end of file diff --git a/test/src/lib.rs b/test/src/lib.rs index ea0f3b5331ab4..fcf78a3119c81 100644 --- a/test/src/lib.rs +++ b/test/src/lib.rs @@ -7,6 +7,10 @@ extern crate substrate_codec; extern crate chainx_primitives; extern crate chainx_runtime; extern crate chainx_pool; +extern crate chainx_api; + + +use chainx_api::ChainXApi; use substrate_network::TransactionPool as Pool; use substrate_runtime_primitives::MaybeUnsigned; @@ -40,6 +44,8 @@ fn xt() -> UncheckedExtrinsic { } -pub fn push_one_transaction(extrinsic_pool: Arc){ +pub fn push_one_transaction(extrinsic_pool: Arc>) where + A: ChainXApi + Send + Sync, +{ let _txhash = extrinsic_pool.clone().import(&xt().encode()).unwrap(); }