diff --git a/mm2src/adex_cli/Cargo.lock b/mm2src/adex_cli/Cargo.lock index 4de83cbd97..5d5eb5abeb 100644 --- a/mm2src/adex_cli/Cargo.lock +++ b/mm2src/adex_cli/Cargo.lock @@ -21,7 +21,7 @@ dependencies = [ "common", "derive_more", "directories", - "env_logger 0.7.1", + "env_logger", "gstuff", "http 0.2.9", "hyper", @@ -29,6 +29,7 @@ dependencies = [ "inquire", "itertools", "log", + "mm2_core", "mm2_net", "mm2_number", "mm2_rpc", @@ -285,7 +286,7 @@ checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -626,7 +627,7 @@ dependencies = [ "chrono", "crossbeam", "derive_more", - "env_logger 0.9.3", + "env_logger", "findshlibs", "fnv", "futures 0.1.31", @@ -857,7 +858,7 @@ dependencies = [ "proc-macro2", "quote 1.0.27", "scratch", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -874,7 +875,7 @@ checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -900,7 +901,7 @@ checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -967,19 +968,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.9.3" @@ -987,7 +975,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", - "humantime 2.1.0", + "humantime", "log", "regex", "termcolor", @@ -1488,15 +1476,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "humantime" version = "2.1.0" @@ -1603,7 +1582,7 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2244,7 +2223,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2466,7 +2445,7 @@ dependencies = [ "itertools", "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2485,12 +2464,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "0.3.15" @@ -3135,7 +3108,7 @@ dependencies = [ "proc-macro2", "quote 1.0.27", "ser_error", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3189,7 +3162,7 @@ checksum = "2dc6b7951b17b051f3210b063f12cc17320e2fe30ae05b0fe2a3abb068551c76" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3392,9 +3365,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.95" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote 1.0.27", @@ -3475,7 +3448,7 @@ checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3576,7 +3549,7 @@ checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] diff --git a/mm2src/adex_cli/Cargo.toml b/mm2src/adex_cli/Cargo.toml index d2b38a4cba..cb477cacb0 100644 --- a/mm2src/adex_cli/Cargo.toml +++ b/mm2src/adex_cli/Cargo.toml @@ -7,23 +7,24 @@ description = "Provides a CLI interface and facilitates interoperating to komodo # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -anyhow = { version = "=1.0.42", features = ["std"] } -async-trait = "=0.1.52" +anyhow = { version = "1.0", features = ["std"] } +async-trait = "0.1" clap = { version = "4.2", features = ["derive"] } common = { path = "../common" } derive_more = "0.99" directories = "5.0" -env_logger = "0.7.1" +env_logger = "0.9.3" http = "0.2" hyper = { version = "0.14.26", features = ["client", "http2", "tcp"] } -hyper-rustls = "0.24.0" -gstuff = { version = "=0.7.4" , features = [ "nightly" ]} +hyper-rustls = "0.24" +gstuff = { version = "0.7" , features = [ "nightly" ]} inquire = "0.6" itertools = "0.10" log = "0.4.21" mm2_net = { path = "../mm2_net" } mm2_number = { path = "../mm2_number" } mm2_rpc = { path = "../mm2_rpc"} +mm2_core = { path = "../mm2_core" } passwords = "3.1" rpc = { path = "../mm2_bitcoin/rpc" } rustls = { version = "0.21", features = [ "dangerous_configuration" ] } @@ -31,8 +32,8 @@ serde = "1.0" serde_json = { version = "1", features = ["preserve_order", "raw_value"] } sysinfo = "0.28" tiny-bip39 = "0.8.0" -tokio = { version = "=1.25.0", features = [ "macros" ] } -uuid = { version = "=1.2.2", features = ["fast-rng", "serde", "v4"] } +tokio = { version = "1.20.0", features = [ "macros" ] } +uuid = { version = "1.2.2", features = ["fast-rng", "serde", "v4"] } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3.3", features = ["processthreadsapi", "winnt"] } diff --git a/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs b/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs index 33d4fbfb62..24c108c6da 100644 --- a/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs +++ b/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, bail, Result}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use mm2_rpc::data::legacy::{BalanceResponse, CoinInitResponse, GetEnabledResponse, Mm2RpcResult, MmVersionResponse, OrderbookRequest, OrderbookResponse, SellBuyRequest, SellBuyResponse, Status}; use serde_json::{json, Value as Json}; @@ -38,7 +38,7 @@ impl AdexProc<'_, '_, let activation_scheme = get_activation_scheme()?; let activation_method = activation_scheme.get_activation_method(asset)?; - + debug!("Got activation scheme for the coin: {}, {:?}", asset, activation_method); let enable = Command::builder() .flatten_data(activation_method) .userpass(self.get_rpc_password()?) diff --git a/mm2src/adex_cli/src/rpc_data.rs b/mm2src/adex_cli/src/rpc_data.rs index a3146cbe47..2c634759ef 100644 --- a/mm2src/adex_cli/src/rpc_data.rs +++ b/mm2src/adex_cli/src/rpc_data.rs @@ -40,6 +40,10 @@ pub(crate) struct ElectrumRequest { #[serde(skip_serializing_if = "Vec::is_empty")] pub(super) servers: Vec, #[serde(skip_serializing_if = "Option::is_none")] + min_connected: Option, + #[serde(skip_serializing_if = "Option::is_none")] + max_connected: Option, + #[serde(skip_serializing_if = "Option::is_none")] mm2: Option, #[serde(default)] tx_history: bool, @@ -62,4 +66,5 @@ pub(super) struct Server { protocol: ElectrumProtocol, #[serde(default)] disable_cert_verification: bool, + pub timeout_sec: Option, } diff --git a/mm2src/coins/eth/web3_transport/http_transport.rs b/mm2src/coins/eth/web3_transport/http_transport.rs index 5d6ad98a26..196a6c4920 100644 --- a/mm2src/coins/eth/web3_transport/http_transport.rs +++ b/mm2src/coins/eth/web3_transport/http_transport.rs @@ -120,10 +120,10 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result r, Err(err) => { - return Err(request_failed_error(request, Web3RpcError::Transport(err.to_string()))); + return Err(request_failed_error(&request, Web3RpcError::Transport(err.to_string()))); }, }; @@ -160,7 +160,7 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result r, Err(err) => { return Err(request_failed_error( - request, + &request, Web3RpcError::InvalidResponse(format!("Server: '{}', error: {}", transport.node.uri, err)), )); }, @@ -195,10 +195,10 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result Ok(response_json), Err(Error::Transport(e)) => Err(request_failed_error( - request, + &request, Web3RpcError::Transport(format!("Server: '{}', error: {}", transport.node.uri, e)), )), Err(e) => Err(request_failed_error( - request, + &request, Web3RpcError::InvalidResponse(format!("Server: '{}', error: {}", transport.node.uri, e)), )), } @@ -275,7 +275,7 @@ async fn send_request_once( } } -fn request_failed_error(request: Call, error: Web3RpcError) -> Error { +fn request_failed_error(request: &Call, error: Web3RpcError) -> Error { let error = format!("request {:?} failed: {}", request, error); Error::Transport(TransportError::Message(error)) } diff --git a/mm2src/coins/eth/web3_transport/websocket_transport.rs b/mm2src/coins/eth/web3_transport/websocket_transport.rs index fd1220e92e..63f76aa32e 100644 --- a/mm2src/coins/eth/web3_transport/websocket_transport.rs +++ b/mm2src/coins/eth/web3_transport/websocket_transport.rs @@ -359,7 +359,7 @@ async fn send_request( let mut tx = transport.controller_channel.tx.lock().await; - let (notification_sender, notification_receiver) = futures::channel::oneshot::channel::>(); + let (notification_sender, notification_receiver) = oneshot::channel::>(); event_handlers.on_outgoing_request(&request_bytes); diff --git a/mm2src/coins/lightning/ln_platform.rs b/mm2src/coins/lightning/ln_platform.rs index 0a5fae0a4f..59e3e19488 100644 --- a/mm2src/coins/lightning/ln_platform.rs +++ b/mm2src/coins/lightning/ln_platform.rs @@ -1,8 +1,8 @@ use super::*; use crate::lightning::ln_errors::{SaveChannelClosingError, SaveChannelClosingResult}; -use crate::utxo::rpc_clients::{BestBlock as RpcBestBlock, BlockHashOrHeight, ConfirmedTransactionInfo, - ElectrumBlockHeader, ElectrumClient, ElectrumNonce, EstimateFeeMethod, - UtxoRpcClientEnum, UtxoRpcResult}; +use crate::lightning::ln_utils::RpcBestBlock; +use crate::utxo::rpc_clients::{BlockHashOrHeight, ConfirmedTransactionInfo, ElectrumBlockHeader, ElectrumClient, + ElectrumNonce, EstimateFeeMethod, UtxoRpcClientEnum, UtxoRpcResult}; use crate::utxo::spv::SimplePaymentVerification; use crate::utxo::utxo_standard::UtxoStandardCoin; use crate::utxo::GetConfirmedTxError; diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 693b7c3a4f..5b4ac5698d 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -3,7 +3,7 @@ use crate::lightning::ln_db::LightningDB; use crate::lightning::ln_platform::{get_best_header, ln_best_block_update_loop, update_best_block}; use crate::lightning::ln_sql::SqliteLightningDB; use crate::lightning::ln_storage::{LightningStorage, NodesAddressesMap}; -use crate::utxo::rpc_clients::BestBlock as RpcBestBlock; +use crate::utxo::rpc_clients::ElectrumBlockHeader; use bitcoin::hash_types::BlockHash; use bitcoin_hashes::{sha256d, Hash}; use common::executor::SpawnFuture; @@ -38,6 +38,21 @@ pub type ChainMonitor = chainmonitor::ChainMonitor< pub type ChannelManager = SimpleArcChannelManager; pub type Router = DefaultRouter, Arc, Arc>; +#[derive(Debug, PartialEq)] +pub struct RpcBestBlock { + pub height: u64, + pub hash: H256Json, +} + +impl From for RpcBestBlock { + fn from(block_header: ElectrumBlockHeader) -> Self { + RpcBestBlock { + height: block_header.block_height(), + hash: block_header.block_hash(), + } + } +} + #[inline] fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir().join("LIGHTNING").join(ticker) } diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 500963fba8..8715babd24 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4393,9 +4393,10 @@ pub enum CoinProtocol { }, } -pub type RpcTransportEventHandlerShared = Arc; - -/// Common methods to measure the outgoing requests and incoming responses statistics. +/// Common methods to handle the connection events. +/// +/// Note that the handler methods are sync and shouldn't take long time executing, otherwise it will hurt the performance. +/// If a handler needs to do some heavy work, it should be spawned/done in a separate thread. pub trait RpcTransportEventHandler { fn debug_info(&self) -> String; @@ -4403,12 +4404,15 @@ pub trait RpcTransportEventHandler { fn on_incoming_response(&self, data: &[u8]); - fn on_connected(&self, address: String) -> Result<(), String>; + fn on_connected(&self, address: &str) -> Result<(), String>; - fn on_disconnected(&self, address: String) -> Result<(), String>; + fn on_disconnected(&self, address: &str) -> Result<(), String>; } -impl fmt::Debug for dyn RpcTransportEventHandler + Send + Sync { +pub type SharableRpcTransportEventHandler = dyn RpcTransportEventHandler + Send + Sync; +pub type RpcTransportEventHandlerShared = Arc; + +impl fmt::Debug for SharableRpcTransportEventHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.debug_info()) } } @@ -4419,9 +4423,21 @@ impl RpcTransportEventHandler for RpcTransportEventHandlerShared { fn on_incoming_response(&self, data: &[u8]) { self.as_ref().on_incoming_response(data) } - fn on_connected(&self, address: String) -> Result<(), String> { self.as_ref().on_connected(address) } + fn on_connected(&self, address: &str) -> Result<(), String> { self.as_ref().on_connected(address) } - fn on_disconnected(&self, address: String) -> Result<(), String> { self.as_ref().on_disconnected(address) } + fn on_disconnected(&self, address: &str) -> Result<(), String> { self.as_ref().on_disconnected(address) } +} + +impl RpcTransportEventHandler for Box { + fn debug_info(&self) -> String { self.as_ref().debug_info() } + + fn on_outgoing_request(&self, data: &[u8]) { self.as_ref().on_outgoing_request(data) } + + fn on_incoming_response(&self, data: &[u8]) { self.as_ref().on_incoming_response(data) } + + fn on_connected(&self, address: &str) -> Result<(), String> { self.as_ref().on_connected(address) } + + fn on_disconnected(&self, address: &str) -> Result<(), String> { self.as_ref().on_disconnected(address) } } impl RpcTransportEventHandler for Vec { @@ -4442,16 +4458,28 @@ impl RpcTransportEventHandler for Vec { } } - fn on_connected(&self, address: String) -> Result<(), String> { + fn on_connected(&self, address: &str) -> Result<(), String> { + let mut errors = vec![]; for handler in self { - try_s!(handler.on_connected(address.clone())) + if let Err(e) = handler.on_connected(address) { + errors.push((handler.debug_info(), e)) + } + } + if !errors.is_empty() { + return Err(format!("Errors: {:?}", errors)); } Ok(()) } - fn on_disconnected(&self, address: String) -> Result<(), String> { + fn on_disconnected(&self, address: &str) -> Result<(), String> { + let mut errors = vec![]; for handler in self { - try_s!(handler.on_disconnected(address.clone())) + if let Err(e) = handler.on_disconnected(address) { + errors.push((handler.debug_info(), e)) + } + } + if !errors.is_empty() { + return Err(format!("Errors: {:?}", errors)); } Ok(()) } @@ -4512,17 +4540,9 @@ impl RpcTransportEventHandler for CoinTransportMetrics { "coin" => self.ticker.to_owned(), "client" => self.client.to_owned()); } - fn on_connected(&self, _address: String) -> Result<(), String> { - // Handle a new connected endpoint if necessary. - // Now just return the Ok - Ok(()) - } + fn on_connected(&self, _address: &str) -> Result<(), String> { Ok(()) } - fn on_disconnected(&self, _address: String) -> Result<(), String> { - // Handle disconnected endpoint if necessary. - // Now just return the Ok - Ok(()) - } + fn on_disconnected(&self, _address: &str) -> Result<(), String> { Ok(()) } } #[async_trait] diff --git a/mm2src/coins/utxo.rs b/mm2src/coins/utxo.rs index 328863865c..6d98451c7f 100644 --- a/mm2src/coins/utxo.rs +++ b/mm2src/coins/utxo.rs @@ -99,16 +99,15 @@ use utxo_hd_wallet::UtxoHDWallet; use utxo_signer::with_key_pair::sign_tx; use utxo_signer::{TxProvider, TxProviderError, UtxoSignTxError, UtxoSignTxResult}; -use self::rpc_clients::{electrum_script_hash, ElectrumClient, ElectrumRpcRequest, EstimateFeeMethod, EstimateFeeMode, - NativeClient, UnspentInfo, UnspentMap, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, - UtxoRpcResult}; +use self::rpc_clients::{electrum_script_hash, ElectrumClient, ElectrumConnectionSettings, EstimateFeeMethod, + EstimateFeeMode, NativeClient, UnspentInfo, UnspentMap, UtxoRpcClientEnum, UtxoRpcError, + UtxoRpcFut, UtxoRpcResult}; use super::{big_decimal_from_sat_unsigned, BalanceError, BalanceFut, BalanceResult, CoinBalance, CoinFutSpawner, CoinsContext, DerivationMethod, FeeApproxStage, FoundSwapTxSpend, HistorySyncState, KmdRewardsDetails, MarketCoinOps, MmCoin, NumConversError, NumConversResult, PrivKeyActivationPolicy, PrivKeyPolicy, - PrivKeyPolicyNotAllowed, RawTransactionFut, RpcTransportEventHandler, RpcTransportEventHandlerShared, - TradeFee, TradePreimageError, TradePreimageFut, TradePreimageResult, Transaction, TransactionDetails, - TransactionEnum, TransactionErr, UnexpectedDerivationMethod, VerificationError, WithdrawError, - WithdrawRequest}; + PrivKeyPolicyNotAllowed, RawTransactionFut, TradeFee, TradePreimageError, TradePreimageFut, + TradePreimageResult, Transaction, TransactionDetails, TransactionEnum, TransactionErr, + UnexpectedDerivationMethod, VerificationError, WithdrawError, WithdrawRequest}; use crate::coin_balance::{EnableCoinScanPolicy, EnabledCoinBalanceParams, HDAddressBalanceScanner}; use crate::hd_wallet::{HDAccountOps, HDAddressOps, HDPathAccountToAddressId, HDWalletCoinOps, HDWalletOps}; use crate::utxo::tx_cache::UtxoVerboseCacheShared; @@ -143,7 +142,6 @@ pub type RecentlySpentOutPointsGuard<'a> = AsyncMutexGuard<'a, RecentlySpentOutP pub enum ScripthashNotification { Triggered(String), SubscribeToAddresses(HashSet
), - RefreshSubscriptions, } pub type ScripthashNotificationSender = Option>; @@ -1386,43 +1384,6 @@ pub fn coin_daemon_data_dir(name: &str, is_asset_chain: bool) -> PathBuf { data_dir } -enum ElectrumProtoVerifierEvent { - Connected(String), - Disconnected(String), -} - -/// Electrum protocol version verifier. -/// The structure is used to handle the `on_connected` event and notify `electrum_version_loop`. -struct ElectrumProtoVerifier { - on_event_tx: UnboundedSender, -} - -impl ElectrumProtoVerifier { - fn into_shared(self) -> RpcTransportEventHandlerShared { Arc::new(self) } -} - -impl RpcTransportEventHandler for ElectrumProtoVerifier { - fn debug_info(&self) -> String { "ElectrumProtoVerifier".into() } - - fn on_outgoing_request(&self, _data: &[u8]) {} - - fn on_incoming_response(&self, _data: &[u8]) {} - - fn on_connected(&self, address: String) -> Result<(), String> { - try_s!(self - .on_event_tx - .unbounded_send(ElectrumProtoVerifierEvent::Connected(address))); - Ok(()) - } - - fn on_disconnected(&self, address: String) -> Result<(), String> { - try_s!(self - .on_event_tx - .unbounded_send(ElectrumProtoVerifierEvent::Disconnected(address))); - Ok(()) - } -} - #[derive(Clone, Debug, Deserialize, Serialize)] pub struct UtxoActivationParams { pub mode: UtxoRpcMode, @@ -1472,7 +1433,13 @@ impl UtxoActivationParams { Some("electrum") => { let servers = json::from_value(req["servers"].clone()).map_to_mm(UtxoFromLegacyReqErr::InvalidElectrumServers)?; - UtxoRpcMode::Electrum { servers } + let min_connected = req["min_connected"].as_u64().map(|m| m as usize); + let max_connected = req["max_connected"].as_u64().map(|m| m as usize); + UtxoRpcMode::Electrum { + servers, + min_connected, + max_connected, + } }, _ => return MmError::err(UtxoFromLegacyReqErr::UnexpectedMethod), }; @@ -1524,7 +1491,14 @@ impl UtxoActivationParams { #[serde(tag = "rpc", content = "rpc_data")] pub enum UtxoRpcMode { Native, - Electrum { servers: Vec }, + Electrum { + /// The settings of each electrum server. + servers: Vec, + /// The minimum number of connections to electrum servers to keep alive/maintained at all times. + min_connected: Option, + /// The maximum number of connections to electrum servers to not exceed at any time. + max_connected: Option, + }, } impl UtxoRpcMode { diff --git a/mm2src/coins/utxo/rpc_clients.rs b/mm2src/coins/utxo/rpc_clients.rs index f5150e4a9a..fce6afa82a 100644 --- a/mm2src/coins/utxo/rpc_clients.rs +++ b/mm2src/coins/utxo/rpc_clients.rs @@ -1,78 +1,51 @@ #![cfg_attr(target_arch = "wasm32", allow(unused_macros))] #![cfg_attr(target_arch = "wasm32", allow(dead_code))] -use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; -use crate::utxo::{output_script, output_script_p2pk, sat_from_big_decimal, GetBlockHeaderError, GetConfirmedTxError, - GetTxError, GetTxHeightError, NumConversResult, ScripthashNotification}; -use crate::{big_decimal_from_sat_unsigned, MyAddressError, NumConversError, RpcTransportEventHandler, - RpcTransportEventHandlerShared}; -use async_trait::async_trait; -use chain::{BlockHeader, BlockHeaderBits, BlockHeaderNonce, OutPoint, Transaction as UtxoTx, TransactionInput, - TxHashAlgo}; -use common::custom_futures::{select_ok_sequential, timeout::FutureTimerExt}; +mod electrum_rpc; +pub use electrum_rpc::*; + +use crate::utxo::{sat_from_big_decimal, GetBlockHeaderError, GetTxError, NumConversError, NumConversResult}; +use crate::{big_decimal_from_sat_unsigned, MyAddressError, RpcTransportEventHandlerShared}; +use chain::{OutPoint, Transaction as UtxoTx, TransactionInput, TxHashAlgo}; use common::custom_iter::TryIntoGroupMap; -use common::executor::{abortable_queue, abortable_queue::AbortableQueue, AbortableSystem, SpawnFuture, Timer}; -use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcBatchResponse, JsonRpcClient, JsonRpcError, JsonRpcErrorType, - JsonRpcId, JsonRpcMultiClient, JsonRpcRemoteAddr, JsonRpcRequest, JsonRpcRequestEnum, - JsonRpcResponse, JsonRpcResponseEnum, JsonRpcResponseFut, RpcRes}; -use common::log::{debug, LogOnError}; +use common::executor::Timer; +use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcClient, JsonRpcError, JsonRpcErrorType, JsonRpcRequest, + JsonRpcRequestEnum, JsonRpcResponseFut, RpcRes}; use common::log::{error, info, warn}; -use common::{median, now_float, now_ms, now_sec, OrdRange}; -use derive_more::Display; +use common::{median, now_sec}; use enum_derives::EnumFromStringify; -use futures::channel::oneshot as async_oneshot; -use futures::compat::{Future01CompatExt, Stream01CompatExt}; -use futures::future::{join_all, FutureExt, TryFutureExt}; -use futures::lock::Mutex as AsyncMutex; -use futures::{select, StreamExt}; -use futures01::future::select_ok; -use futures01::sync::mpsc; -use futures01::{Future, Sink, Stream}; -use http::Uri; -use itertools::Itertools; use keys::hash::H256; use keys::Address; use mm2_err_handle::prelude::*; -use mm2_number::{BigDecimal, BigInt, MmNumber}; -use mm2_rpc::data::legacy::ElectrumProtocol; -#[cfg(test)] use mocktopus::macros::*; +use mm2_number::{BigDecimal, MmNumber}; use rpc::v1::types::{Bytes as BytesJson, Transaction as RpcTransaction, H256 as H256Json}; use script::Script; -use serde_json::{self as json, Value as Json}; -use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, CompactInteger, Reader, - SERIALIZE_TRANSACTION_WITNESS}; -use sha2::{Digest, Sha256}; -use spv_validation::helpers_validation::SPVError; -use spv_validation::storage::BlockHeaderStorageOps; -use std::collections::hash_map::Entry; +use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, SERIALIZE_TRANSACTION_WITNESS}; + use std::collections::HashMap; -use std::convert::TryInto; use std::fmt; -use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; +use std::fmt::Debug; use std::num::NonZeroU64; use std::ops::Deref; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::Arc; -use std::time::Duration; -use super::ScripthashNotificationSender; +use async_trait::async_trait; +use derive_more::Display; +use futures::channel::oneshot as async_oneshot; +use futures::compat::Future01CompatExt; +use futures::future::{FutureExt, TryFutureExt}; +use futures::lock::Mutex as AsyncMutex; +use futures01::Future; +#[cfg(test)] use mocktopus::macros::*; +use serde_json::{self as json, Value as Json}; cfg_native! { - use futures::future::Either; - use futures::io::Error; + use crate::RpcTransportEventHandler; + use common::jsonrpc_client::{JsonRpcRemoteAddr, JsonRpcResponseEnum}; + use http::header::AUTHORIZATION; use http::{Request, StatusCode}; - use rustls::client::ServerCertVerified; - use rustls::{Certificate, ClientConfig, ServerName, OwnedTrustAnchor, RootCertStore}; - use std::convert::TryFrom; - use std::pin::Pin; - use std::task::{Context, Poll}; - use std::time::SystemTime; - use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, ReadBuf}; - use tokio::net::TcpStream; - use tokio_rustls::{client::TlsStream, TlsConnector}; - use webpki_roots::TLS_SERVER_ROOTS; } pub const NO_TX_ERROR_CODE: &str = "'code': -5"; @@ -80,39 +53,15 @@ const RESPONSE_TOO_LARGE_CODE: i16 = -32600; const TX_NOT_FOUND_RETRIES: u8 = 10; pub type AddressesByLabelResult = HashMap; -pub type JsonRpcPendingRequestsShared = Arc>; -pub type JsonRpcPendingRequests = HashMap>; pub type UnspentMap = HashMap>; -type ElectrumTxHistory = Vec; -type ElectrumScriptHash = String; -type ScriptHashUnspents = Vec; - #[derive(Debug, Deserialize)] #[allow(dead_code)] pub struct AddressPurpose { purpose: String, } -/// Skips the server certificate verification on TLS connection -pub struct NoCertificateVerification {} - -#[cfg(not(target_arch = "wasm32"))] -impl rustls::client::ServerCertVerifier for NoCertificateVerification { - fn verify_server_cert( - &self, - _: &Certificate, - _: &[Certificate], - _: &ServerName, - _: &mut dyn Iterator, - _: &[u8], - _: SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) - } -} - -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum UtxoRpcClientEnum { Native(NativeClient), Electrum(ElectrumClient), @@ -145,15 +94,6 @@ impl Deref for UtxoRpcClientEnum { } } -impl Clone for UtxoRpcClientEnum { - fn clone(&self) -> Self { - match self { - UtxoRpcClientEnum::Native(c) => UtxoRpcClientEnum::Native(c.clone()), - UtxoRpcClientEnum::Electrum(c) => UtxoRpcClientEnum::Electrum(c.clone()), - } - } -} - impl UtxoRpcClientEnum { pub fn wait_for_confirmations( &self, @@ -306,6 +246,14 @@ pub struct SpentOutputInfo { pub spent_in_block: BlockHashOrHeight, } +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Deserialize, Serialize)] +pub enum EstimateFeeMode { + ECONOMICAL, + CONSERVATIVE, + UNSET, +} + pub type UtxoRpcResult = Result>; pub type UtxoRpcFut = Box> + Send + 'static>; @@ -381,7 +329,8 @@ pub trait UtxoRpcClientOps: fmt::Debug + Send + Sync + 'static { /// Submits the raw `tx` transaction (serialized, hex-encoded) to blockchain network. fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut; - fn blockchain_scripthash_subscribe(&self, scripthash: String) -> UtxoRpcFut; + /// Subscribe to scripthash notifications from `server_address` for the given `scripthash`. + fn blockchain_scripthash_subscribe_using(&self, server_address: &str, scripthash: String) -> UtxoRpcFut; /// Returns raw transaction (serialized, hex-encoded) by the given `txid`. fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut; @@ -656,6 +605,66 @@ pub struct ListUnspentArgs { addresses: Vec, } +#[derive(Debug)] +struct ConcurrentRequestState { + is_running: bool, + subscribers: Vec>, +} + +impl ConcurrentRequestState { + fn new() -> Self { + ConcurrentRequestState { + is_running: false, + subscribers: Vec::new(), + } + } +} + +#[derive(Debug)] +pub struct ConcurrentRequestMap { + inner: AsyncMutex>>, +} + +impl Default for ConcurrentRequestMap { + fn default() -> Self { + ConcurrentRequestMap { + inner: AsyncMutex::new(HashMap::new()), + } + } +} + +impl ConcurrentRequestMap { + pub fn new() -> ConcurrentRequestMap { ConcurrentRequestMap::default() } + + async fn wrap_request(&self, request_arg: K, request_fut: RpcRes) -> Result { + let mut map = self.inner.lock().await; + let state = map + .entry(request_arg.clone()) + .or_insert_with(ConcurrentRequestState::new); + if state.is_running { + let (tx, rx) = async_oneshot::channel(); + state.subscribers.push(tx); + // drop here to avoid holding the lock during await + drop(map); + rx.await.unwrap() + } else { + state.is_running = true; + // drop here to avoid holding the lock during await + drop(map); + let request_res = request_fut.compat().await; + let mut map = self.inner.lock().await; + let state = map.get_mut(&request_arg).unwrap(); + for sub in state.subscribers.drain(..) { + if sub.send(request_res.clone()).is_err() { + warn!("subscriber is dropped"); + } + } + state.is_running = false; + request_res + } + } +} + /// RPC client for UTXO based coins /// https://developer.bitcoin.org/reference/rpc/index.html - Bitcoin RPC API reference /// Other coins have additional methods or miss some of these @@ -711,7 +720,7 @@ impl UtxoJsonRpcClientInfo for NativeClientImpl { impl JsonRpcClient for NativeClientImpl { fn version(&self) -> &'static str { "1.0" } - fn next_id(&self) -> String { self.request_id.fetch_add(1, AtomicOrdering::Relaxed).to_string() } + fn next_id(&self) -> u64 { self.request_id.fetch_add(1, AtomicOrdering::Relaxed) } fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } @@ -732,10 +741,10 @@ impl JsonRpcClient for NativeClientImpl { self.event_handlers.on_outgoing_request(request_body.as_bytes()); let uri = self.uri.clone(); - + let auth = self.auth.clone(); let http_request = try_f!(Request::builder() .method("POST") - .header(AUTHORIZATION, self.auth.clone()) + .header(AUTHORIZATION, auth) .uri(uri.clone()) .body(Vec::from(request_body)) .map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))); @@ -828,7 +837,7 @@ impl UtxoRpcClientOps for NativeClient { Box::new(rpc_func!(self, "sendrawtransaction", tx).map_to_mm_fut(UtxoRpcError::from)) } - fn blockchain_scripthash_subscribe(&self, _scripthash: String) -> UtxoRpcFut { + fn blockchain_scripthash_subscribe_using(&self, _: &str, _scripthash: String) -> UtxoRpcFut { Box::new(futures01::future::err( UtxoRpcError::Internal("blockchain_scripthash_subscribe` is not supported for Native Clients".to_owned()) .into(), @@ -1225,1858 +1234,6 @@ impl NativeClientImpl { } } -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumUnspent { - pub height: Option, - pub tx_hash: H256Json, - pub tx_pos: u32, - pub value: u64, -} - -#[derive(Clone, Debug, Deserialize)] -#[serde(untagged)] -pub enum ElectrumNonce { - Number(u64), - Hash(H256Json), -} - -#[allow(clippy::from_over_into)] -impl Into for ElectrumNonce { - fn into(self) -> BlockHeaderNonce { - match self { - ElectrumNonce::Number(n) => BlockHeaderNonce::U32(n as u32), - ElectrumNonce::Hash(h) => BlockHeaderNonce::H256(h.into()), - } - } -} - -#[derive(Debug, Deserialize)] -pub struct ElectrumBlockHeadersRes { - pub count: u64, - pub hex: BytesJson, - #[allow(dead_code)] - max: u64, -} - -/// The block header compatible with Electrum 1.2 -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBlockHeaderV12 { - pub bits: u64, - pub block_height: u64, - pub merkle_root: H256Json, - pub nonce: ElectrumNonce, - pub prev_block_hash: H256Json, - pub timestamp: u64, - pub version: u64, -} - -impl ElectrumBlockHeaderV12 { - fn as_block_header(&self) -> BlockHeader { - BlockHeader { - version: self.version as u32, - previous_header_hash: self.prev_block_hash.into(), - merkle_root_hash: self.merkle_root.into(), - claim_trie_root: None, - hash_final_sapling_root: None, - time: self.timestamp as u32, - bits: BlockHeaderBits::U32(self.bits as u32), - nonce: self.nonce.clone().into(), - solution: None, - aux_pow: None, - prog_pow: None, - mtp_pow: None, - is_verus: false, - hash_state_root: None, - hash_utxo_root: None, - prevout_stake: None, - vch_block_sig_dlgt: None, - n_height: None, - n_nonce_u64: None, - mix_hash: None, - } - } - - #[inline] - pub fn as_hex(&self) -> String { - let block_header = self.as_block_header(); - let serialized = serialize(&block_header); - hex::encode(serialized) - } - - #[inline] - pub fn hash(&self) -> H256Json { - let block_header = self.as_block_header(); - BlockHeader::hash(&block_header).into() - } -} - -/// The block header compatible with Electrum 1.4 -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBlockHeaderV14 { - pub height: u64, - pub hex: BytesJson, -} - -impl ElectrumBlockHeaderV14 { - pub fn hash(&self) -> H256Json { self.hex.clone().into_vec()[..].into() } -} - -#[derive(Clone, Debug, Deserialize)] -#[serde(untagged)] -pub enum ElectrumBlockHeader { - V12(ElectrumBlockHeaderV12), - V14(ElectrumBlockHeaderV14), -} - -/// The merkle branch of a confirmed transaction -#[derive(Clone, Debug, Deserialize)] -pub struct TxMerkleBranch { - pub merkle: Vec, - pub block_height: u64, - pub pos: usize, -} - -#[derive(Clone)] -pub struct ConfirmedTransactionInfo { - pub tx: UtxoTx, - pub header: BlockHeader, - pub index: u64, - pub height: u64, -} - -#[derive(Debug, PartialEq)] -pub struct BestBlock { - pub height: u64, - pub hash: H256Json, -} - -impl From for BestBlock { - fn from(block_header: ElectrumBlockHeader) -> Self { - BestBlock { - height: block_header.block_height(), - hash: block_header.block_hash(), - } - } -} - -#[allow(clippy::upper_case_acronyms)] -#[derive(Debug, Deserialize, Serialize)] -pub enum EstimateFeeMode { - ECONOMICAL, - CONSERVATIVE, - UNSET, -} - -impl ElectrumBlockHeader { - pub fn block_height(&self) -> u64 { - match self { - ElectrumBlockHeader::V12(h) => h.block_height, - ElectrumBlockHeader::V14(h) => h.height, - } - } - - fn block_hash(&self) -> H256Json { - match self { - ElectrumBlockHeader::V12(h) => h.hash(), - ElectrumBlockHeader::V14(h) => h.hash(), - } - } -} - -#[derive(Debug, Deserialize)] -pub struct ElectrumTxHistoryItem { - pub height: i64, - pub tx_hash: H256Json, - pub fee: Option, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBalance { - pub(crate) confirmed: i128, - pub(crate) unconfirmed: i128, -} - -impl ElectrumBalance { - #[inline] - pub fn to_big_decimal(&self, decimals: u8) -> BigDecimal { - let balance_sat = BigInt::from(self.confirmed) + BigInt::from(self.unconfirmed); - BigDecimal::from(balance_sat) / BigDecimal::from(10u64.pow(decimals as u32)) - } -} - -#[inline] -fn sha_256(input: &[u8]) -> Vec { - let mut sha = Sha256::new(); - sha.update(input); - sha.finalize().to_vec() -} - -#[inline] -pub fn electrum_script_hash(script: &[u8]) -> Vec { - let mut result = sha_256(script); - result.reverse(); - result -} - -#[derive(Debug, Deserialize, Serialize)] -/// Deserializable Electrum protocol version representation for RPC -/// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#server.version -pub struct ElectrumProtocolVersion { - pub server_software_version: String, - pub protocol_version: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -/// Electrum request RPC representation -pub struct ElectrumRpcRequest { - pub url: String, - #[serde(default)] - pub protocol: ElectrumProtocol, - #[serde(default)] - pub disable_cert_verification: bool, -} - -/// Electrum client configuration -#[allow(clippy::upper_case_acronyms)] -#[cfg(not(target_arch = "wasm32"))] -#[derive(Clone, Debug, Serialize)] -enum ElectrumConfig { - TCP, - SSL { dns_name: String, skip_validation: bool }, -} - -/// Electrum client configuration -#[allow(clippy::upper_case_acronyms)] -#[cfg(target_arch = "wasm32")] -#[derive(Clone, Debug, Serialize)] -enum ElectrumConfig { - WS, - WSS, -} - -fn addr_to_socket_addr(input: &str) -> Result { - let mut addr = match input.to_socket_addrs() { - Ok(a) => a, - Err(e) => return ERR!("{} resolve error {:?}", input, e), - }; - match addr.next() { - Some(a) => Ok(a), - None => ERR!("{} resolved to None.", input), - } -} - -#[cfg(not(target_arch = "wasm32"))] -fn server_name_from_domain(dns_name: &str) -> Result { - match ServerName::try_from(dns_name) { - Ok(dns_name) if matches!(dns_name, ServerName::DnsName(_)) => Ok(dns_name), - _ => ERR!("Couldn't parse DNS name from '{}'", dns_name), - } -} - -/// Attempts to process the request (parse url, etc), build up the config and create new electrum connection -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -#[cfg(not(target_arch = "wasm32"))] -pub fn spawn_electrum( - req: &ElectrumRpcRequest, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> Result { - let config = match req.protocol { - ElectrumProtocol::TCP => ElectrumConfig::TCP, - ElectrumProtocol::SSL => { - let uri: Uri = try_s!(req.url.parse()); - let host = uri - .host() - .ok_or(ERRL!("Couldn't retrieve host from addr {}", req.url))?; - - try_s!(server_name_from_domain(host)); - - ElectrumConfig::SSL { - dns_name: host.into(), - skip_validation: req.disable_cert_verification, - } - }, - ElectrumProtocol::WS | ElectrumProtocol::WSS => { - return ERR!("'ws' and 'wss' protocols are not supported yet. Consider using 'TCP' or 'SSL'") - }, - }; - - Ok(electrum_connect( - req.url.clone(), - config, - event_handlers, - scripthash_notification_sender, - abortable_system, - )) -} - -/// Attempts to process the request (parse url, etc), build up the config and create new electrum connection -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -#[cfg(target_arch = "wasm32")] -pub fn spawn_electrum( - req: &ElectrumRpcRequest, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> Result { - let mut url = req.url.clone(); - let uri: Uri = try_s!(req.url.parse()); - - if uri.scheme().is_some() { - return ERR!( - "There has not to be a scheme in the url: {}. \ - 'ws://' scheme is used by default. \ - Consider using 'protocol: \"WSS\"' in the electrum request to switch to the 'wss://' scheme.", - url - ); - } - - let config = match req.protocol { - ElectrumProtocol::WS => { - url.insert_str(0, "ws://"); - ElectrumConfig::WS - }, - ElectrumProtocol::WSS => { - url.insert_str(0, "wss://"); - ElectrumConfig::WSS - }, - ElectrumProtocol::TCP | ElectrumProtocol::SSL => { - return ERR!("'TCP' and 'SSL' are not supported in a browser. Please use 'WS' or 'WSS' protocols"); - }, - }; - - Ok(electrum_connect( - url, - config, - event_handlers, - scripthash_notification_sender, - abortable_system, - )) -} - -/// Represents the active Electrum connection to selected address -pub struct ElectrumConnection { - /// The client connected to this SocketAddr - addr: String, - /// Configuration - #[allow(dead_code)] - config: ElectrumConfig, - /// The Sender forwarding requests to writing part of underlying stream - tx: Arc>>>>, - /// Responses are stored here - responses: JsonRpcPendingRequestsShared, - /// Selected protocol version. The value is initialized after the server.version RPC call. - protocol_version: AsyncMutex>, - /// This spawner is used to spawn Electrum's related futures that should be aborted on coin deactivation. - /// and on [`MmArc::stop`]. - /// This field is not used directly, but it holds all abort handles of futures spawned at `electrum_connect`. - /// - /// Please also note that this abortable system is a subsystem of [`ElectrumClientImpl::abortable_system`]. - /// For more info see [`ElectrumClientImpl::add_server`]. - _abortable_system: AbortableQueue, -} - -impl ElectrumConnection { - async fn is_connected(&self) -> bool { self.tx.lock().await.is_some() } - - async fn set_protocol_version(&self, version: f32) { self.protocol_version.lock().await.replace(version); } - - async fn reset_protocol_version(&self) { *self.protocol_version.lock().await = None; } -} - -#[derive(Debug)] -struct ConcurrentRequestState { - is_running: bool, - subscribers: Vec>, -} - -impl ConcurrentRequestState { - fn new() -> Self { - ConcurrentRequestState { - is_running: false, - subscribers: Vec::new(), - } - } -} - -#[derive(Debug)] -pub struct ConcurrentRequestMap { - inner: AsyncMutex>>, -} - -impl Default for ConcurrentRequestMap { - fn default() -> Self { - ConcurrentRequestMap { - inner: AsyncMutex::new(HashMap::new()), - } - } -} - -impl ConcurrentRequestMap { - pub fn new() -> ConcurrentRequestMap { ConcurrentRequestMap::default() } - - async fn wrap_request(&self, request_arg: K, request_fut: RpcRes) -> Result { - let mut map = self.inner.lock().await; - let state = map - .entry(request_arg.clone()) - .or_insert_with(ConcurrentRequestState::new); - if state.is_running { - let (tx, rx) = async_oneshot::channel(); - state.subscribers.push(tx); - // drop here to avoid holding the lock during await - drop(map); - rx.await.unwrap() - } else { - // drop here to avoid holding the lock during await - drop(map); - let request_res = request_fut.compat().await; - let mut map = self.inner.lock().await; - let state = map.get_mut(&request_arg).unwrap(); - for sub in state.subscribers.drain(..) { - if sub.send(request_res.clone()).is_err() { - warn!("subscriber is dropped"); - } - } - state.is_running = false; - request_res - } - } -} - -#[derive(Debug)] -pub struct ElectrumClientImpl { - coin_ticker: String, - connections: AsyncMutex>, - next_id: AtomicU64, - event_handlers: Vec, - protocol_version: OrdRange, - get_balance_concurrent_map: ConcurrentRequestMap, - list_unspent_concurrent_map: ConcurrentRequestMap>, - block_headers_storage: BlockHeaderStorage, - /// This spawner is used to spawn Electrum's related futures that should be aborted on coin deactivation, - /// and on [`MmArc::stop`]. - /// - /// Please also note that this abortable system is a subsystem of [`UtxoCoinFields::abortable_system`]. - abortable_system: AbortableQueue, - negotiate_version: bool, - /// This is used for balance event streaming implementation for UTXOs. - /// If balance event streaming isn't enabled, this value will always be `None`; otherwise, - /// it will be used for sending scripthash messages to trigger re-connections, re-fetching the balances, etc. - pub(crate) scripthash_notification_sender: ScripthashNotificationSender, -} - -async fn electrum_request_multi( - client: ElectrumClient, - request: JsonRpcRequestEnum, -) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { - let mut futures = vec![]; - let connections = client.connections.lock().await; - for (i, connection) in connections.iter().enumerate() { - if client.negotiate_version && connection.protocol_version.lock().await.is_none() { - continue; - } - - let connection_addr = connection.addr.clone(); - let json = json::to_string(&request).map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))?; - if let Some(tx) = &*connection.tx.lock().await { - let fut = electrum_request( - json, - request.rpc_id(), - tx.clone(), - connection.responses.clone(), - ELECTRUM_TIMEOUT / (connections.len() - i) as u64, - ) - .map(|response| (JsonRpcRemoteAddr(connection_addr), response)); - futures.push(fut) - } - } - drop(connections); - - if futures.is_empty() { - return Err(JsonRpcErrorType::Transport( - "All electrums are currently disconnected".to_string(), - )); - } - - if let JsonRpcRequestEnum::Single(single) = &request { - if single.method == "server.ping" { - // server.ping must be sent to all servers to keep all connections alive - return select_ok(futures).map(|(result, _)| result).compat().await; - } - } - - let (res, no_of_failed_requests) = select_ok_sequential(futures) - .compat() - .await - .map_err(|e| JsonRpcErrorType::Transport(format!("{:?}", e)))?; - client.rotate_servers(no_of_failed_requests).await; - - Ok(res) -} - -async fn electrum_request_to( - client: ElectrumClient, - request: JsonRpcRequestEnum, - to_addr: String, -) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { - let (tx, responses) = { - let connections = client.connections.lock().await; - let connection = connections - .iter() - .find(|c| c.addr == to_addr) - .ok_or_else(|| JsonRpcErrorType::Internal(format!("Unknown destination address {}", to_addr)))?; - let responses = connection.responses.clone(); - let tx = { - match &*connection.tx.lock().await { - Some(tx) => tx.clone(), - None => { - return Err(JsonRpcErrorType::Transport(format!( - "Connection {} is not established yet", - to_addr - ))) - }, - } - }; - (tx, responses) - }; - let json = json::to_string(&request).map_err(|err| JsonRpcErrorType::InvalidRequest(err.to_string()))?; - let response = electrum_request(json, request.rpc_id(), tx, responses, ELECTRUM_TIMEOUT) - .compat() - .await?; - Ok((JsonRpcRemoteAddr(to_addr.to_owned()), response)) -} - -impl ElectrumClientImpl { - pub fn spawner(&self) -> abortable_queue::WeakSpawner { self.abortable_system.weak_spawner() } - - /// Create an Electrum connection and spawn a green thread actor to handle it. - pub async fn add_server(&self, req: &ElectrumRpcRequest) -> Result<(), String> { - let subsystem = try_s!(self.abortable_system.create_subsystem()); - let connection = try_s!(spawn_electrum( - req, - self.event_handlers.clone(), - &self.scripthash_notification_sender, - subsystem, - )); - self.connections.lock().await.push(connection); - Ok(()) - } - - /// Remove an Electrum connection and stop corresponding spawned actor. - pub async fn remove_server(&self, server_addr: &str) -> Result<(), String> { - let mut connections = self.connections.lock().await; - // do not use retain, we would have to return an error if we did not find connection by the passd address - let pos = connections - .iter() - .position(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - // shutdown_tx will be closed immediately on the connection drop - connections.remove(pos); - Ok(()) - } - - /// Moves the Electrum servers that fail in a multi request to the end. - pub async fn rotate_servers(&self, no_of_rotations: usize) { - let mut connections = self.connections.lock().await; - connections.rotate_left(no_of_rotations); - } - - /// Check if one of the spawned connections is connected. - pub async fn is_connected(&self) -> bool { - for connection in self.connections.lock().await.iter() { - if connection.is_connected().await { - return true; - } - } - false - } - - /// Check if all connections have been removed. - pub async fn is_connections_pool_empty(&self) -> bool { self.connections.lock().await.is_empty() } - - pub async fn count_connections(&self) -> usize { self.connections.lock().await.len() } - - /// Check if the protocol version was checked for one of the spawned connections. - pub async fn is_protocol_version_checked(&self) -> bool { - for connection in self.connections.lock().await.iter() { - if connection.protocol_version.lock().await.is_some() { - return true; - } - } - false - } - - /// Set the protocol version for the specified server. - pub async fn set_protocol_version(&self, server_addr: &str, version: f32) -> Result<(), String> { - let connections = self.connections.lock().await; - let con = connections - .iter() - .find(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - con.set_protocol_version(version).await; - - if let Some(sender) = &self.scripthash_notification_sender { - sender - .unbounded_send(ScripthashNotification::RefreshSubscriptions) - .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; - } - - Ok(()) - } - - /// Reset the protocol version for the specified server. - pub async fn reset_protocol_version(&self, server_addr: &str) -> Result<(), String> { - let connections = self.connections.lock().await; - let con = connections - .iter() - .find(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - con.reset_protocol_version().await; - Ok(()) - } - - /// Get available protocol versions. - pub fn protocol_version(&self) -> &OrdRange { &self.protocol_version } - - /// Get block headers storage. - pub fn block_headers_storage(&self) -> &BlockHeaderStorage { &self.block_headers_storage } -} - -#[derive(Clone, Debug)] -pub struct ElectrumClient(pub Arc); -impl Deref for ElectrumClient { - type Target = ElectrumClientImpl; - fn deref(&self) -> &ElectrumClientImpl { &self.0 } -} - -const BLOCKCHAIN_HEADERS_SUB_ID: &str = "blockchain.headers.subscribe"; - -const BLOCKCHAIN_SCRIPTHASH_SUB_ID: &str = "blockchain.scripthash.subscribe"; - -impl UtxoJsonRpcClientInfo for ElectrumClient { - fn coin_name(&self) -> &str { self.coin_ticker.as_str() } -} - -impl JsonRpcClient for ElectrumClient { - fn version(&self) -> &'static str { "2.0" } - - fn next_id(&self) -> String { self.next_id.fetch_add(1, AtomicOrdering::Relaxed).to_string() } - - fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } - - fn transport(&self, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { - Box::new(electrum_request_multi(self.clone(), request).boxed().compat()) - } -} - -impl JsonRpcBatchClient for ElectrumClient {} - -impl JsonRpcMultiClient for ElectrumClient { - fn transport_exact(&self, to_addr: String, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { - Box::new(electrum_request_to(self.clone(), request, to_addr).boxed().compat()) - } -} - -impl ElectrumClient { - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-ping - pub fn server_ping(&self) -> RpcRes<()> { rpc_func!(self, "server.ping") } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-version - pub fn server_version( - &self, - server_address: &str, - client_name: &str, - version: &OrdRange, - ) -> RpcRes { - let protocol_version: Vec = version.flatten().into_iter().map(|v| format!("{}", v)).collect(); - rpc_func_from!(self, server_address, "server.version", client_name, protocol_version) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe - pub fn get_block_count_from(&self, server_address: &str) -> RpcRes { - Box::new( - rpc_func_from!(self, server_address, BLOCKCHAIN_HEADERS_SUB_ID) - .map(|r: ElectrumBlockHeader| r.block_height()), - ) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers - pub fn get_block_headers_from( - &self, - server_address: &str, - start_height: u64, - count: NonZeroU64, - ) -> RpcRes { - rpc_func_from!(self, server_address, "blockchain.block.headers", start_height, count) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent - /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 - /// We should remove them to build valid transactions - pub fn scripthash_list_unspent(&self, hash: &str) -> RpcRes> { - let request_fut = Box::new(rpc_func!(self, "blockchain.scripthash.listunspent", hash).and_then( - move |unspents: Vec| { - let mut map: HashMap<(H256Json, u32), bool> = HashMap::new(); - let unspents = unspents - .into_iter() - .filter(|unspent| match map.entry((unspent.tx_hash, unspent.tx_pos)) { - Entry::Occupied(_) => false, - Entry::Vacant(e) => { - e.insert(true); - true - }, - }) - .collect(); - Ok(unspents) - }, - )); - let arc = self.clone(); - let hash = hash.to_owned(); - let fut = async move { arc.list_unspent_concurrent_map.wrap_request(hash, request_fut).await }; - Box::new(fut.boxed().compat()) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent - /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 - /// We should remove them to build valid transactions. - /// Please note the function returns `ScriptHashUnspents` elements in the same order in which they were requested. - pub fn scripthash_list_unspent_batch(&self, hashes: Vec) -> RpcRes> { - let requests = hashes - .iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.listunspent", hash)); - Box::new(self.batch_rpc(requests).map(move |unspents: Vec| { - unspents - .into_iter() - .map(|hash_unspents| { - hash_unspents - .into_iter() - .unique_by(|unspent| (unspent.tx_hash, unspent.tx_pos)) - .collect::>() - }) - .collect() - })) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history - pub fn scripthash_get_history(&self, hash: &str) -> RpcRes { - rpc_func!(self, "blockchain.scripthash.get_history", hash) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history - /// Requests history of the `hashes` in a batch and returns them in the same order they were requested. - pub fn scripthash_get_history_batch(&self, hashes: I) -> RpcRes> - where - I: IntoIterator, - { - let requests = hashes - .into_iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.get_history", hash)); - self.batch_rpc(requests) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory - pub fn scripthash_get_balance(&self, hash: &str) -> RpcRes { - let arc = self.clone(); - let hash = hash.to_owned(); - let fut = async move { - let request = rpc_func!(arc, "blockchain.scripthash.get_balance", &hash); - arc.get_balance_concurrent_map.wrap_request(hash, request).await - }; - Box::new(fut.boxed().compat()) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory - /// Requests balances in a batch and returns them in the same order they were requested. - pub fn scripthash_get_balances(&self, hashes: I) -> RpcRes> - where - I: IntoIterator, - { - let requests = hashes - .into_iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.get_balance", &hash)); - self.batch_rpc(requests) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe - pub fn blockchain_headers_subscribe(&self) -> RpcRes { - rpc_func!(self, BLOCKCHAIN_HEADERS_SUB_ID) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-broadcast - pub fn blockchain_transaction_broadcast(&self, tx: BytesJson) -> RpcRes { - rpc_func!(self, "blockchain.transaction.broadcast", tx) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-estimatefee - /// It is recommended to set n_blocks as low as possible. - /// However, in some cases, n_blocks = 1 leads to an unreasonably high fee estimation. - /// https://github.com/KomodoPlatform/atomicDEX-API/issues/656#issuecomment-743759659 - pub fn estimate_fee(&self, mode: &Option, n_blocks: u32) -> UtxoRpcFut { - match mode { - Some(m) => { - Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks, m).map_to_mm_fut(UtxoRpcError::from)) - }, - None => Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks).map_to_mm_fut(UtxoRpcError::from)), - } - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-header - pub fn blockchain_block_header(&self, height: u64) -> RpcRes { - rpc_func!(self, "blockchain.block.header", height) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers - pub fn blockchain_block_headers(&self, start_height: u64, count: NonZeroU64) -> RpcRes { - rpc_func!(self, "blockchain.block.headers", start_height, count) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get-merkle - pub fn blockchain_transaction_get_merkle(&self, txid: H256Json, height: u64) -> RpcRes { - rpc_func!(self, "blockchain.transaction.get_merkle", txid, height) - } - - // get_tx_height_from_rpc is costly since it loops through history after requesting the whole history of the script pubkey - // This method should always be used if the block headers are saved to the DB - async fn get_tx_height_from_storage(&self, tx: &UtxoTx) -> Result> { - let tx_hash = tx.hash().reversed(); - let blockhash = self.get_verbose_transaction(&tx_hash.into()).compat().await?.blockhash; - Ok(self - .block_headers_storage() - .get_block_height_by_hash(blockhash.into()) - .await? - .ok_or_else(|| { - GetTxHeightError::HeightNotFound(format!( - "Transaction block header is not found in storage for {}", - self.0.coin_ticker - )) - })? - .try_into()?) - } - - // get_tx_height_from_storage is always preferred to be used instead of this, but if there is no headers in storage (storing headers is not enabled) - // this function can be used instead - async fn get_tx_height_from_rpc(&self, tx: &UtxoTx) -> Result { - for output in tx.outputs.clone() { - let script_pubkey_str = hex::encode(electrum_script_hash(&output.script_pubkey)); - if let Ok(history) = self.scripthash_get_history(script_pubkey_str.as_str()).compat().await { - if let Some(item) = history - .into_iter() - .find(|item| item.tx_hash.reversed() == H256Json(*tx.hash()) && item.height > 0) - { - return Ok(item.height as u64); - } - } - } - Err(GetTxHeightError::HeightNotFound(format!( - "Couldn't find height through electrum for {}", - self.coin_ticker - ))) - } - - async fn block_header_from_storage(&self, height: u64) -> Result> { - self.block_headers_storage() - .get_block_header(height) - .await? - .ok_or_else(|| { - GetBlockHeaderError::Internal(format!("Header not found in storage for {}", self.coin_ticker)).into() - }) - } - - async fn block_header_from_storage_or_rpc(&self, height: u64) -> Result> { - match self.block_header_from_storage(height).await { - Ok(h) => Ok(h), - Err(_) => Ok(deserialize( - self.blockchain_block_header(height).compat().await?.as_slice(), - )?), - } - } - - pub async fn get_confirmed_tx_info_from_rpc( - &self, - tx: &UtxoTx, - ) -> Result { - let height = self.get_tx_height_from_rpc(tx).await?; - - let merkle_branch = self - .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) - .compat() - .await?; - - let header = deserialize(self.blockchain_block_header(height).compat().await?.as_slice())?; - - Ok(ConfirmedTransactionInfo { - tx: tx.clone(), - header, - index: merkle_branch.pos as u64, - height, - }) - } - - pub async fn get_merkle_and_validated_header( - &self, - tx: &UtxoTx, - ) -> Result<(TxMerkleBranch, BlockHeader, u64), MmError> { - let height = self.get_tx_height_from_storage(tx).await?; - - let merkle_branch = self - .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) - .compat() - .await - .map_to_mm(|err| SPVError::UnableToGetMerkle { - coin: self.coin_ticker.clone(), - err: err.to_string(), - })?; - - let header = self.block_header_from_storage(height).await?; - - Ok((merkle_branch, header, height)) - } -} - -#[cfg_attr(test, mockable)] -impl ElectrumClient { - pub fn retrieve_headers_from( - &self, - server_address: &str, - from_height: u64, - to_height: u64, - ) -> UtxoRpcFut<(HashMap, Vec)> { - let coin_name = self.coin_ticker.clone(); - if from_height == 0 || to_height < from_height { - return Box::new(futures01::future::err( - UtxoRpcError::Internal("Invalid values for from/to parameters".to_string()).into(), - )); - } - let count: NonZeroU64 = match (to_height - from_height + 1).try_into() { - Ok(c) => c, - Err(e) => return Box::new(futures01::future::err(UtxoRpcError::Internal(e.to_string()).into())), - }; - Box::new( - self.get_block_headers_from(server_address, from_height, count) - .map_to_mm_fut(UtxoRpcError::from) - .and_then(move |headers| { - let (block_registry, block_headers) = { - if headers.count == 0 { - return MmError::err(UtxoRpcError::Internal("No headers available".to_string())); - } - let len = CompactInteger::from(headers.count); - let mut serialized = serialize(&len).take(); - serialized.extend(headers.hex.0.into_iter()); - drop_mutability!(serialized); - let mut reader = - Reader::new_with_coin_variant(serialized.as_slice(), coin_name.as_str().into()); - let maybe_block_headers = reader.read_list::(); - let block_headers = match maybe_block_headers { - Ok(headers) => headers, - Err(e) => return MmError::err(UtxoRpcError::InvalidResponse(format!("{:?}", e))), - }; - let mut block_registry: HashMap = HashMap::with_capacity(block_headers.len()); - let mut starting_height = from_height; - for block_header in &block_headers { - block_registry.insert(starting_height, block_header.clone()); - starting_height += 1; - } - (block_registry, block_headers) - }; - Ok((block_registry, block_headers)) - }), - ) - } - - pub(crate) fn get_servers_with_latest_block_count(&self) -> UtxoRpcFut<(Vec, u64)> { - let selfi = self.clone(); - let fut = async move { - let connections = selfi.connections.lock().await; - let futures = connections - .iter() - .map(|connection| { - let addr = connection.addr.clone(); - selfi - .get_block_count_from(&addr) - .map(|response| (addr, response)) - .compat() - }) - .collect::>(); - drop(connections); - - let responses = join_all(futures).await; - - // First, we use filter_map to get rid of any errors and collect the - // server addresses and block counts into two vectors - let (responding_servers, block_counts_from_all_servers): (Vec<_>, Vec<_>) = - responses.clone().into_iter().filter_map(|res| res.ok()).unzip(); - - // Next, we use max to find the maximum block count from all servers - if let Some(max_block_count) = block_counts_from_all_servers.clone().iter().max() { - // Then, we use filter and collect to get the servers that have the maximum block count - let servers_with_max_count: Vec<_> = responding_servers - .into_iter() - .zip(block_counts_from_all_servers) - .filter(|(_, count)| count == max_block_count) - .map(|(addr, _)| addr) - .collect(); - - // Finally, we return a tuple of servers with max count and the max count - return Ok((servers_with_max_count, *max_block_count)); - } - - Err(MmError::new(UtxoRpcError::Internal(format!( - "Couldn't get block count from any server for {}, responses: {:?}", - &selfi.coin_ticker, responses - )))) - }; - - Box::new(fut.boxed().compat()) - } -} - -// if mockable is placed before async_trait there is `munmap_chunk(): invalid pointer` error on async fn mocking attempt -#[async_trait] -#[cfg_attr(test, mockable)] -impl UtxoRpcClientOps for ElectrumClient { - fn list_unspent(&self, address: &Address, _decimals: u8) -> UtxoRpcFut> { - let mut output_scripts = vec![try_f!(output_script(address))]; - - // If the plain pubkey is available, fetch the UTXOs found in P2PK outputs as well (if any). - if let Some(pubkey) = address.pubkey() { - let p2pk_output_script = output_script_p2pk(pubkey); - output_scripts.push(p2pk_output_script); - } - - let this = self.clone(); - let fut = async move { - let hashes = output_scripts - .iter() - .map(|s| hex::encode(electrum_script_hash(s))) - .collect(); - let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; - - let unspents = unspents - .into_iter() - .zip(output_scripts) - .flat_map(|(unspents, output_script)| { - unspents - .into_iter() - .map(move |unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) - }) - .collect(); - Ok(unspents) - }; - - Box::new(fut.boxed().compat()) - } - - fn list_unspent_group(&self, addresses: Vec
, _decimals: u8) -> UtxoRpcFut { - let output_scripts = try_f!(addresses - .iter() - .map(output_script) - .collect::, keys::Error>>()); - - let this = self.clone(); - let fut = async move { - let hashes = output_scripts - .iter() - .map(|s| hex::encode(electrum_script_hash(s))) - .collect(); - let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; - - let unspents: Vec> = unspents - .into_iter() - .zip(output_scripts) - .map(|(unspents, output_script)| { - unspents - .into_iter() - .map(|unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) - .collect() - }) - .collect(); - - let unspent_map = addresses - .into_iter() - // `scripthash_list_unspent_batch` returns `ScriptHashUnspents` elements in the same order in which they were requested. - // So we can zip `addresses` and `unspents` into one iterator. - .zip(unspents) - .collect(); - Ok(unspent_map) - }; - Box::new(fut.boxed().compat()) - } - - fn send_transaction(&self, tx: &UtxoTx) -> UtxoRpcFut { - let bytes = if tx.has_witness() { - BytesJson::from(serialize_with_flags(tx, SERIALIZE_TRANSACTION_WITNESS)) - } else { - BytesJson::from(serialize(tx)) - }; - Box::new( - self.blockchain_transaction_broadcast(bytes) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut { - Box::new( - self.blockchain_transaction_broadcast(tx) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn blockchain_scripthash_subscribe(&self, scripthash: String) -> UtxoRpcFut { - Box::new(rpc_func!(self, BLOCKCHAIN_SCRIPTHASH_SUB_ID, scripthash).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// returns transaction bytes by default - fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut { - let verbose = false; - Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// returns verbose transaction by default - fn get_verbose_transaction(&self, txid: &H256Json) -> UtxoRpcFut { - let verbose = true; - Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// Returns verbose transactions in a batch. - fn get_verbose_transactions(&self, tx_ids: &[H256Json]) -> UtxoRpcFut> { - let verbose = true; - let requests = tx_ids - .iter() - .map(|txid| rpc_req!(self, "blockchain.transaction.get", txid, verbose)); - Box::new(self.batch_rpc(requests).map_to_mm_fut(UtxoRpcError::from)) - } - - fn get_block_count(&self) -> UtxoRpcFut { - Box::new( - self.blockchain_headers_subscribe() - .map(|r| r.block_height()) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn display_balance(&self, address: Address, decimals: u8) -> RpcRes { - let output_script = try_f!(output_script(&address).map_err(|err| JsonRpcError::new( - UtxoJsonRpcClientInfo::client_info(self), - rpc_req!(self, "blockchain.scripthash.get_balance").into(), - JsonRpcErrorType::Internal(err.to_string()) - ))); - let mut hashes = vec![hex::encode(electrum_script_hash(&output_script))]; - - // If the plain pubkey is available, fetch the balance found in P2PK output as well (if any). - if let Some(pubkey) = address.pubkey() { - let p2pk_output_script = output_script_p2pk(pubkey); - hashes.push(hex::encode(electrum_script_hash(&p2pk_output_script))); - } - - let this = self.clone(); - let fut = async move { - Ok(this - .scripthash_get_balances(hashes) - .compat() - .await? - .into_iter() - .fold(BigDecimal::from(0), |sum, electrum_balance| { - sum + electrum_balance.to_big_decimal(decimals) - })) - }; - Box::new(fut.boxed().compat()) - } - - fn display_balances(&self, addresses: Vec
, decimals: u8) -> UtxoRpcFut> { - let this = self.clone(); - let fut = async move { - let hashes = addresses - .iter() - .map(|address| { - let output_script = output_script(address)?; - let hash = electrum_script_hash(&output_script); - - Ok(hex::encode(hash)) - }) - .collect::, keys::Error>>()?; - - let electrum_balances = this.scripthash_get_balances(hashes).compat().await?; - let balances = electrum_balances - .into_iter() - // `scripthash_get_balances` returns `ElectrumBalance` elements in the same order in which they were requested. - // So we can zip `addresses` and the balances into one iterator. - .zip(addresses) - .map(|(electrum_balance, address)| (address, electrum_balance.to_big_decimal(decimals))) - .collect(); - Ok(balances) - }; - - Box::new(fut.boxed().compat()) - } - - fn estimate_fee_sat( - &self, - decimals: u8, - _fee_method: &EstimateFeeMethod, - mode: &Option, - n_blocks: u32, - ) -> UtxoRpcFut { - Box::new(self.estimate_fee(mode, n_blocks).map(move |fee| { - if fee > 0.00001 { - (fee * 10.0_f64.powf(decimals as f64)) as u64 - } else { - 1000 - } - })) - } - - fn get_relay_fee(&self) -> RpcRes { rpc_func!(self, "blockchain.relayfee") } - - fn find_output_spend( - &self, - tx_hash: H256, - script_pubkey: &[u8], - vout: usize, - _from_block: BlockHashOrHeight, - tx_hash_algo: TxHashAlgo, - ) -> Box, Error = String> + Send> { - let selfi = self.clone(); - let script_hash = hex::encode(electrum_script_hash(script_pubkey)); - let fut = async move { - let history = try_s!(selfi.scripthash_get_history(&script_hash).compat().await); - - if history.len() < 2 { - return Ok(None); - } - - for item in history.iter() { - let transaction = try_s!(selfi.get_transaction_bytes(&item.tx_hash).compat().await); - - let mut maybe_spend_tx: UtxoTx = - try_s!(deserialize(transaction.as_slice()).map_err(|e| ERRL!("{:?}", e))); - maybe_spend_tx.tx_hash_algo = tx_hash_algo; - drop_mutability!(maybe_spend_tx); - - for (index, input) in maybe_spend_tx.inputs.iter().enumerate() { - if input.previous_output.hash == tx_hash && input.previous_output.index == vout as u32 { - return Ok(Some(SpentOutputInfo { - input: input.clone(), - input_index: index, - spending_tx: maybe_spend_tx, - spent_in_block: BlockHashOrHeight::Height(item.height), - })); - } - } - } - Ok(None) - }; - Box::new(fut.boxed().compat()) - } - - fn get_median_time_past( - &self, - starting_block: u64, - count: NonZeroU64, - coin_variant: CoinVariant, - ) -> UtxoRpcFut { - let from = if starting_block <= count.get() { - 0 - } else { - starting_block - count.get() + 1 - }; - Box::new( - self.blockchain_block_headers(from, count) - .map_to_mm_fut(UtxoRpcError::from) - .and_then(|res| { - if res.count == 0 { - return MmError::err(UtxoRpcError::InvalidResponse("Server returned zero count".to_owned())); - } - let len = CompactInteger::from(res.count); - let mut serialized = serialize(&len).take(); - serialized.extend(res.hex.0.into_iter()); - let mut reader = Reader::new_with_coin_variant(serialized.as_slice(), coin_variant); - let headers = reader.read_list::()?; - let mut timestamps: Vec<_> = headers.into_iter().map(|block| block.time).collect(); - // can unwrap because count is non zero - Ok(median(timestamps.as_mut_slice()).unwrap()) - }), - ) - } - - async fn get_block_timestamp(&self, height: u64) -> Result> { - Ok(self.block_header_from_storage_or_rpc(height).await?.time as u64) - } -} - -#[cfg_attr(test, mockable)] -impl ElectrumClientImpl { - pub fn new( - coin_ticker: String, - event_handlers: Vec, - block_headers_storage: BlockHeaderStorage, - abortable_system: AbortableQueue, - negotiate_version: bool, - scripthash_notification_sender: ScripthashNotificationSender, - ) -> ElectrumClientImpl { - let protocol_version = OrdRange::new(1.2, 1.4).unwrap(); - ElectrumClientImpl { - coin_ticker, - connections: AsyncMutex::new(vec![]), - next_id: 0.into(), - event_handlers, - protocol_version, - get_balance_concurrent_map: ConcurrentRequestMap::new(), - list_unspent_concurrent_map: ConcurrentRequestMap::new(), - block_headers_storage, - abortable_system, - negotiate_version, - scripthash_notification_sender, - } - } - - #[cfg(test)] - pub fn with_protocol_version( - coin_ticker: String, - event_handlers: Vec, - protocol_version: OrdRange, - block_headers_storage: BlockHeaderStorage, - abortable_system: AbortableQueue, - scripthash_notification_sender: ScripthashNotificationSender, - ) -> ElectrumClientImpl { - ElectrumClientImpl { - protocol_version, - ..ElectrumClientImpl::new( - coin_ticker, - event_handlers, - block_headers_storage, - abortable_system, - false, - scripthash_notification_sender, - ) - } - } -} - -/// Helper function casting mpsc::Receiver as Stream. -fn rx_to_stream(rx: mpsc::Receiver>) -> impl Stream, Error = io::Error> { - rx.map_err(|_| panic!("errors not possible on rx")) -} - -async fn electrum_process_json( - raw_json: Json, - arc: &JsonRpcPendingRequestsShared, - scripthash_notification_sender: &ScripthashNotificationSender, -) { - // detect if we got standard JSONRPC response or subscription response as JSONRPC request - #[derive(Deserialize)] - #[serde(untagged)] - enum ElectrumRpcResponseEnum { - /// The subscription response as JSONRPC request. - /// - /// NOTE Because JsonRpcResponse uses default values for each of its field, - /// this variant has to stay at top in this enumeration to be properly deserialized - /// from serde. - SubscriptionNotification(JsonRpcRequest), - /// The standard JSONRPC single response. - SingleResponse(JsonRpcResponse), - /// The batch of standard JSONRPC responses. - BatchResponses(JsonRpcBatchResponse), - } - - let response: ElectrumRpcResponseEnum = match json::from_value(raw_json) { - Ok(res) => res, - Err(e) => { - error!("{}", e); - return; - }, - }; - - let response = match response { - ElectrumRpcResponseEnum::SingleResponse(single) => JsonRpcResponseEnum::Single(single), - ElectrumRpcResponseEnum::BatchResponses(batch) => JsonRpcResponseEnum::Batch(batch), - ElectrumRpcResponseEnum::SubscriptionNotification(req) => { - let id = match req.method.as_ref() { - BLOCKCHAIN_HEADERS_SUB_ID => BLOCKCHAIN_HEADERS_SUB_ID, - BLOCKCHAIN_SCRIPTHASH_SUB_ID => { - let scripthash = match req.params.first() { - Some(t) => t.as_str().unwrap_or_default(), - None => { - debug!("Notification must contain the scripthash value."); - return; - }, - }; - - if let Some(sender) = scripthash_notification_sender { - debug!("Sending scripthash message"); - if let Err(e) = sender.unbounded_send(ScripthashNotification::Triggered(scripthash.to_string())) - { - error!("Failed sending scripthash message. {e}"); - return; - }; - }; - BLOCKCHAIN_SCRIPTHASH_SUB_ID - }, - _ => { - error!("Couldn't get id of request {:?}", req); - return; - }, - }; - JsonRpcResponseEnum::Single(JsonRpcResponse { - id: id.into(), - jsonrpc: "2.0".into(), - result: req.params[0].clone(), - error: Json::Null, - }) - }, - }; - - // the corresponding sender may not exist, receiver may be dropped - // these situations are not considered as errors so we just silently skip them - let mut pending = arc.lock().await; - if let Some(tx) = pending.remove(&response.rpc_id()) { - tx.send(response).ok(); - } -} - -async fn electrum_process_chunk( - chunk: &[u8], - arc: &JsonRpcPendingRequestsShared, - scripthash_notification_sender: ScripthashNotificationSender, -) { - // we should split the received chunk because we can get several responses in 1 chunk. - let split = chunk.split(|item| *item == b'\n'); - for chunk in split { - // split returns empty slice if it ends with separator which is our case - if !chunk.is_empty() { - let raw_json: Json = match json::from_slice(chunk) { - Ok(json) => json, - Err(e) => { - error!("{}", e); - return; - }, - }; - electrum_process_json(raw_json, arc, &scripthash_notification_sender).await - } - } -} - -fn increase_delay(delay: &AtomicU64) { - if delay.load(AtomicOrdering::Relaxed) < 60 { - delay.fetch_add(5, AtomicOrdering::Relaxed); - } -} - -macro_rules! try_loop { - ($e:expr, $addr: ident, $delay: ident) => { - match $e { - Ok(res) => res, - Err(e) => { - error!("{:?} error {:?}", $addr, e); - increase_delay(&$delay); - continue; - }, - } - }; -} - -/// The enum wrapping possible variants of underlying Streams -#[cfg(not(target_arch = "wasm32"))] -#[allow(clippy::large_enum_variant)] -enum ElectrumStream { - Tcp(TcpStream), - Tls(TlsStream), -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsRef for ElectrumStream { - fn as_ref(&self) -> &TcpStream { - match self { - ElectrumStream::Tcp(stream) => stream, - ElectrumStream::Tls(stream) => stream.get_ref().0, - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsyncRead for ElectrumStream { - fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), - ElectrumStream::Tls(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsyncWrite for ElectrumStream { - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), - ElectrumStream::Tls(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), - ElectrumStream::Tls(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), - } - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), - ElectrumStream::Tls(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), - } - } -} - -const ELECTRUM_TIMEOUT: u64 = 60; - -async fn electrum_last_chunk_loop(last_chunk: Arc) { - loop { - Timer::sleep(ELECTRUM_TIMEOUT as f64).await; - let last = (last_chunk.load(AtomicOrdering::Relaxed) / 1000) as f64; - if now_float() - last > ELECTRUM_TIMEOUT as f64 { - warn!( - "Didn't receive any data since {}. Shutting down the connection.", - last as i64 - ); - break; - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -fn rustls_client_config(unsafe_conf: bool) -> Arc { - let mut cert_store = RootCertStore::empty(); - - cert_store.add_trust_anchors( - TLS_SERVER_ROOTS - .iter() - .map(|ta| OwnedTrustAnchor::from_subject_spki_name_constraints(ta.subject, ta.spki, ta.name_constraints)), - ); - - let mut tls_config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(cert_store) - .with_no_client_auth(); - - if unsafe_conf { - tls_config - .dangerous() - .set_certificate_verifier(Arc::new(NoCertificateVerification {})); - } - Arc::new(tls_config) -} - -#[cfg(not(target_arch = "wasm32"))] -lazy_static! { - static ref SAFE_TLS_CONFIG: Arc = rustls_client_config(false); - static ref UNSAFE_TLS_CONFIG: Arc = rustls_client_config(true); -} - -#[cfg(not(target_arch = "wasm32"))] -async fn connect_loop( - config: ElectrumConfig, - addr: String, - responses: JsonRpcPendingRequestsShared, - connection_tx: Arc>>>>, - event_handlers: Vec, - scripthash_notification_sender: ScripthashNotificationSender, - _spawner: Spawner, -) -> Result<(), ()> { - let delay = Arc::new(AtomicU64::new(0)); - - loop { - let current_delay = delay.load(AtomicOrdering::Relaxed); - if current_delay > 0 { - Timer::sleep(current_delay as f64).await; - }; - - let socket_addr = addr_to_socket_addr(&addr).map_err(|e| { - error!("{:?} error {:?}", addr, e); - })?; - - let connect_f = match config.clone() { - ElectrumConfig::TCP => Either::Left(TcpStream::connect(&socket_addr).map_ok(ElectrumStream::Tcp)), - ElectrumConfig::SSL { - dns_name, - skip_validation, - } => { - let tls_connector = if skip_validation { - TlsConnector::from(UNSAFE_TLS_CONFIG.clone()) - } else { - TlsConnector::from(SAFE_TLS_CONFIG.clone()) - }; - // The address should always be correct since we checked it beforehand in initializaiton. - let dns = server_name_from_domain(dns_name.as_str()).map_err(|e| { - error!("{:?} error {:?}", addr, e); - })?; - - Either::Right( - TcpStream::connect(&socket_addr) - .and_then(move |stream| tls_connector.connect(dns, stream).map_ok(ElectrumStream::Tls)), - ) - }, - }; - - let stream = try_loop!(connect_f.await, addr, delay); - try_loop!(stream.as_ref().set_nodelay(true), addr, delay); - info!("Electrum client connected to {}", addr); - try_loop!(event_handlers.on_connected(addr.clone()), addr, delay); - let last_chunk = Arc::new(AtomicU64::new(now_ms())); - let mut last_chunk_f = electrum_last_chunk_loop(last_chunk.clone()).boxed().fuse(); - - let (tx, rx) = mpsc::channel(0); - *connection_tx.lock().await = Some(tx); - let rx = rx_to_stream(rx).inspect(|data| { - // measure the length of each sent packet - event_handlers.on_outgoing_request(data); - }); - - let (read, mut write) = tokio::io::split(stream); - let recv_f = { - let delay = delay.clone(); - let addr = addr.clone(); - let responses = responses.clone(); - let scripthash_notification_sender = scripthash_notification_sender.clone(); - let event_handlers = event_handlers.clone(); - async move { - let mut buffer = String::with_capacity(1024); - let mut buf_reader = BufReader::new(read); - loop { - match buf_reader.read_line(&mut buffer).await { - Ok(c) => { - if c == 0 { - info!("EOF from {}", addr); - break; - } - // reset the delay if we've connected successfully and only if we received some data from connection - delay.store(0, AtomicOrdering::Relaxed); - }, - Err(e) => { - error!("Error on read {} from {}", e, addr); - break; - }, - }; - // measure the length of each incoming packet - event_handlers.on_incoming_response(buffer.as_bytes()); - last_chunk.store(now_ms(), AtomicOrdering::Relaxed); - - electrum_process_chunk(buffer.as_bytes(), &responses, scripthash_notification_sender.clone()).await; - buffer.clear(); - } - } - }; - let mut recv_f = Box::pin(recv_f).fuse(); - - let send_f = { - let addr = addr.clone(); - let mut rx = rx.compat(); - async move { - while let Some(Ok(bytes)) = rx.next().await { - if let Err(e) = write.write_all(&bytes).await { - error!("Write error {} to {}", e, addr); - } - } - } - }; - let mut send_f = Box::pin(send_f).fuse(); - macro_rules! reset_tx_and_continue { - () => { - info!("{} connection dropped", addr); - event_handlers.on_disconnected(addr.clone()).error_log(); - *connection_tx.lock().await = None; - increase_delay(&delay); - continue; - }; - } - - select! { - _last_chunk = last_chunk_f => { reset_tx_and_continue!(); }, - _recv = recv_f => { reset_tx_and_continue!(); }, - _send = send_f => { reset_tx_and_continue!(); }, - } - } -} - -#[cfg(target_arch = "wasm32")] -async fn connect_loop( - _config: ElectrumConfig, - addr: String, - responses: JsonRpcPendingRequestsShared, - connection_tx: Arc>>>>, - event_handlers: Vec, - scripthash_notification_sender: ScripthashNotificationSender, - spawner: Spawner, -) -> Result<(), ()> { - use std::sync::atomic::AtomicUsize; - - lazy_static! { - static ref CONN_IDX: Arc = Arc::new(AtomicUsize::new(0)); - } - - use mm2_net::wasm::wasm_ws::ws_transport; - - let delay = Arc::new(AtomicU64::new(0)); - loop { - let current_delay = delay.load(AtomicOrdering::Relaxed); - if current_delay > 0 { - Timer::sleep(current_delay as f64).await; - } - - let conn_idx = CONN_IDX.fetch_add(1, AtomicOrdering::Relaxed); - let (mut transport_tx, mut transport_rx) = - try_loop!(ws_transport(conn_idx, &addr, &spawner).await, addr, delay); - - info!("Electrum client connected to {}", addr); - try_loop!(event_handlers.on_connected(addr.clone()), addr, delay); - - let last_chunk = Arc::new(AtomicU64::new(now_ms())); - let mut last_chunk_fut = electrum_last_chunk_loop(last_chunk.clone()).boxed().fuse(); - - let (outgoing_tx, outgoing_rx) = mpsc::channel(0); - *connection_tx.lock().await = Some(outgoing_tx); - - let incoming_fut = { - let delay = delay.clone(); - let addr = addr.clone(); - let responses = responses.clone(); - let scripthash_notification_sender = scripthash_notification_sender.clone(); - let event_handlers = event_handlers.clone(); - async move { - while let Some(incoming_res) = transport_rx.next().await { - last_chunk.store(now_ms(), AtomicOrdering::Relaxed); - match incoming_res { - Ok(incoming_json) => { - // reset the delay if we've connected successfully and only if we received some data from connection - delay.store(0, AtomicOrdering::Relaxed); - // measure the length of each incoming packet - let incoming_str = incoming_json.to_string(); - event_handlers.on_incoming_response(incoming_str.as_bytes()); - - electrum_process_json(incoming_json, &responses, &scripthash_notification_sender).await; - }, - Err(e) => { - error!("{} error: {:?}", addr, e); - }, - } - } - } - }; - let mut incoming_fut = Box::pin(incoming_fut).fuse(); - - let outgoing_fut = { - let addr = addr.clone(); - let mut outgoing_rx = rx_to_stream(outgoing_rx).compat(); - let event_handlers = event_handlers.clone(); - async move { - while let Some(Ok(data)) = outgoing_rx.next().await { - let raw_json: Json = match json::from_slice(&data) { - Ok(js) => js, - Err(e) => { - error!("Error {} deserializing the outgoing data: {:?}", e, data); - continue; - }, - }; - // measure the length of each sent packet - event_handlers.on_outgoing_request(&data); - - if let Err(e) = transport_tx.send(raw_json).await { - error!("Error sending to {}: {:?}", addr, e); - } - } - } - }; - let mut outgoing_fut = Box::pin(outgoing_fut).fuse(); - - macro_rules! reset_tx_and_continue { - () => { - info!("{} connection dropped", addr); - *connection_tx.lock().await = None; - event_handlers.on_disconnected(addr.clone()).error_log(); - increase_delay(&delay); - continue; - }; - } - - select! { - _last_chunk = last_chunk_fut => { reset_tx_and_continue!(); }, - _incoming = incoming_fut => { reset_tx_and_continue!(); }, - _outgoing = outgoing_fut => { reset_tx_and_continue!(); }, - } - } -} - -/// Builds up the electrum connection, spawns endless loop that attempts to reconnect to the server -/// in case of connection errors. -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -fn electrum_connect( - addr: String, - config: ElectrumConfig, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> ElectrumConnection { - let responses = Arc::new(AsyncMutex::new(JsonRpcPendingRequests::default())); - let tx = Arc::new(AsyncMutex::new(None)); - - let spawner = abortable_system.weak_spawner(); - let fut = connect_loop( - config.clone(), - addr.clone(), - responses.clone(), - tx.clone(), - event_handlers, - scripthash_notification_sender.clone(), - spawner.clone(), - ) - .then(|_| futures::future::ready(())); - - spawner.spawn(fut); - ElectrumConnection { - addr, - config, - tx, - responses, - protocol_version: AsyncMutex::new(None), - _abortable_system: abortable_system, - } -} - -/// # Important -/// `electrum_request` should always return [`JsonRpcErrorType::Transport`] error. -fn electrum_request( - mut req_json: String, - rpc_id: JsonRpcId, - tx: mpsc::Sender>, - responses: JsonRpcPendingRequestsShared, - timeout: u64, -) -> Box + Send + 'static> { - let send_fut = async move { - #[cfg(not(target_arch = "wasm"))] - { - // Electrum request and responses must end with \n - // https://electrumx.readthedocs.io/en/latest/protocol-basics.html#message-stream - req_json.push('\n'); - } - let (req_tx, resp_rx) = async_oneshot::channel(); - responses.lock().await.insert(rpc_id, req_tx); - tx.send(req_json.into_bytes()) - .compat() - .await - .map_err(|err| JsonRpcErrorType::Transport(err.to_string()))?; - let resps = resp_rx.await.map_err(|e| JsonRpcErrorType::Transport(e.to_string()))?; - Ok(resps) - }; - let send_fut = send_fut - .boxed() - .timeout(Duration::from_secs(timeout)) - .compat() - .then(move |res| res.map_err(|err| JsonRpcErrorType::Transport(err.to_string()))?); - Box::new(send_fut) -} - fn address_balance_from_unspent_map(address: &Address, unspent_map: &UnspentMap, decimals: u8) -> BigDecimal { let unspents = match unspent_map.get(address) { Some(unspents) => unspents, diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs new file mode 100644 index 0000000000..ce0498cc31 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs @@ -0,0 +1,1068 @@ +use super::super::{BlockHashOrHeight, EstimateFeeMethod, EstimateFeeMode, SpentOutputInfo, UnspentInfo, UnspentMap, + UtxoJsonRpcClientInfo, UtxoRpcClientOps, UtxoRpcError, UtxoRpcFut}; +use super::connection::{ElectrumConnection, ElectrumConnectionErr, ElectrumConnectionSettings}; +use super::connection_manager::ConnectionManager; +use super::constants::{BLOCKCHAIN_HEADERS_SUB_ID, BLOCKCHAIN_SCRIPTHASH_SUB_ID, ELECTRUM_REQUEST_TIMEOUT, + NO_FORCE_CONNECT_METHODS, SEND_TO_ALL_METHODS}; +use super::electrum_script_hash; +use super::event_handlers::{ElectrumConnectionManagerNotifier, ElectrumScriptHashNotificationBridge}; +use super::rpc_responses::*; + +use crate::utxo::rpc_clients::ConcurrentRequestMap; +use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; +use crate::utxo::{output_script, output_script_p2pk, GetBlockHeaderError, GetConfirmedTxError, GetTxHeightError, + ScripthashNotification}; +use crate::RpcTransportEventHandler; +use crate::SharableRpcTransportEventHandler; +use chain::{BlockHeader, Transaction as UtxoTx, TxHashAlgo}; +use common::executor::abortable_queue::{AbortableQueue, WeakSpawner}; +use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcClient, JsonRpcError, JsonRpcErrorType, JsonRpcId, + JsonRpcMultiClient, JsonRpcRemoteAddr, JsonRpcRequest, JsonRpcRequestEnum, + JsonRpcResponseEnum, JsonRpcResponseFut, RpcRes}; +use common::log::warn; +use common::{median, OrdRange}; +use keys::hash::H256; +use keys::Address; +use mm2_err_handle::prelude::*; +use mm2_number::BigDecimal; +#[cfg(test)] use mocktopus::macros::*; +use rpc::v1::types::{Bytes as BytesJson, Transaction as RpcTransaction, H256 as H256Json}; +use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, CompactInteger, Reader, + SERIALIZE_TRANSACTION_WITNESS}; +use spv_validation::helpers_validation::SPVError; +use spv_validation::storage::BlockHeaderStorageOps; + +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::HashSet; +use std::convert::TryInto; +use std::fmt::Debug; +use std::iter::FromIterator; +use std::num::NonZeroU64; +use std::ops::Deref; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::sync::Arc; + +use async_trait::async_trait; +use futures::channel::mpsc::UnboundedSender; +use futures::compat::Future01CompatExt; +use futures::future::{join_all, FutureExt, TryFutureExt}; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use futures01::Future; +use itertools::Itertools; +use serde_json::{self as json, Value as Json}; + +type ElectrumTxHistory = Vec; +type ElectrumScriptHash = String; +type ScriptHashUnspents = Vec; + +#[derive(Debug)] +pub struct ElectrumClientSettings { + pub client_name: String, + pub servers: Vec, + pub coin_ticker: String, + pub negotiate_version: bool, + pub spawn_ping: bool, + /// Minimum number of connections to keep alive at all times (best effort). + pub min_connected: usize, + /// Maximum number of connections to keep alive at any time. + pub max_connected: usize, +} + +#[derive(Debug)] +pub struct ElectrumClientImpl { + client_name: String, + coin_ticker: String, + pub connection_manager: ConnectionManager, + next_id: AtomicU64, + negotiate_version: bool, + protocol_version: OrdRange, + get_balance_concurrent_map: ConcurrentRequestMap, + list_unspent_concurrent_map: ConcurrentRequestMap>, + block_headers_storage: BlockHeaderStorage, + /// Event handlers that are triggered on (dis)connection & transport events. They are wrapped + /// in an `Arc` since they are shared outside `ElectrumClientImpl`. They are handed to each active + /// `ElectrumConnection` to notify them about the events. + event_handlers: Arc>>, + pub scripthash_notification_sender: Option>, + abortable_system: AbortableQueue, +} + +#[cfg_attr(test, mockable)] +impl ElectrumClientImpl { + /// Returns a new instance of `ElectrumClientImpl`. + /// + /// This doesn't initialize the connection manager contained within `ElectrumClientImpl`. + /// Use `try_new_arc` to create an Arc-wrapped instance with an initialized connection manager. + fn try_new( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + mut event_handlers: Vec>, + scripthash_notification_sender: Option>, + ) -> Result { + // This is used for balance event streaming implementation for UTXOs. + // Will be used for sending scripthash messages to trigger re-connections, re-fetching the balances, etc. + if let Some(scripthash_notification_sender) = scripthash_notification_sender.clone() { + event_handlers.push(Box::new(ElectrumScriptHashNotificationBridge { + scripthash_notification_sender, + })); + } + + let connection_manager = ConnectionManager::try_new( + client_settings.servers, + client_settings.spawn_ping, + (client_settings.min_connected, client_settings.max_connected), + &abortable_system, + )?; + + event_handlers.push(Box::new(ElectrumConnectionManagerNotifier { + connection_manager: connection_manager.clone(), + })); + + Ok(ElectrumClientImpl { + client_name: client_settings.client_name, + coin_ticker: client_settings.coin_ticker, + connection_manager, + next_id: 0.into(), + negotiate_version: client_settings.negotiate_version, + protocol_version: OrdRange::new(1.2, 1.4).unwrap(), + get_balance_concurrent_map: ConcurrentRequestMap::new(), + list_unspent_concurrent_map: ConcurrentRequestMap::new(), + block_headers_storage, + abortable_system, + scripthash_notification_sender, + event_handlers: Arc::new(event_handlers), + }) + } + + /// Create a new Electrum client instance. + /// This function initializes the connection manager and starts the connection process. + pub fn try_new_arc( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + event_handlers: Vec>, + scripthash_notification_sender: Option>, + ) -> Result, String> { + let client_impl = Arc::new(ElectrumClientImpl::try_new( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )?); + // Initialize the connection manager. + client_impl + .connection_manager + .initialize(Arc::downgrade(&client_impl)) + .map_err(|e| e.to_string())?; + + Ok(client_impl) + } + + /// Remove an Electrum connection and stop corresponding spawned actor. + pub fn remove_server(&self, server_addr: &str) -> Result, String> { + self.connection_manager + .remove_connection(server_addr) + .map_err(|err| err.to_string()) + } + + /// Check if all connections have been removed. + pub fn is_connections_pool_empty(&self) -> bool { self.connection_manager.is_connections_pool_empty() } + + /// Get available protocol versions. + pub fn protocol_version(&self) -> &OrdRange { &self.protocol_version } + + pub fn coin_ticker(&self) -> &str { &self.coin_ticker } + + /// Whether to negotiate the protocol version. + pub fn negotiate_version(&self) -> bool { self.negotiate_version } + + /// Get the event handlers. + pub fn event_handlers(&self) -> Arc>> { self.event_handlers.clone() } + + /// Sends a list of addresses through the scripthash notification sender to subscribe to their scripthash notifications. + pub fn subscribe_addresses(&self, addresses: HashSet
) -> Result<(), String> { + if let Some(sender) = &self.scripthash_notification_sender { + sender + .unbounded_send(ScripthashNotification::SubscribeToAddresses(addresses)) + .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; + } + + Ok(()) + } + + /// Get block headers storage. + pub fn block_headers_storage(&self) -> &BlockHeaderStorage { &self.block_headers_storage } + + pub fn weak_spawner(&self) -> WeakSpawner { self.abortable_system.weak_spawner() } + + #[cfg(test)] + pub fn with_protocol_version( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + event_handlers: Vec>, + scripthash_notification_sender: Option>, + protocol_version: OrdRange, + ) -> Result, String> { + let client_impl = Arc::new(ElectrumClientImpl { + protocol_version, + ..ElectrumClientImpl::try_new( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )? + }); + // Initialize the connection manager. + client_impl + .connection_manager + .initialize(Arc::downgrade(&client_impl)) + .map_err(|e| e.to_string())?; + + Ok(client_impl) + } +} + +#[derive(Clone, Debug)] +pub struct ElectrumClient(pub Arc); + +impl Deref for ElectrumClient { + type Target = ElectrumClientImpl; + fn deref(&self) -> &ElectrumClientImpl { &self.0 } +} + +impl UtxoJsonRpcClientInfo for ElectrumClient { + fn coin_name(&self) -> &str { self.coin_ticker.as_str() } +} + +impl JsonRpcClient for ElectrumClient { + fn version(&self) -> &'static str { "2.0" } + + fn next_id(&self) -> u64 { self.next_id.fetch_add(1, AtomicOrdering::Relaxed) } + + fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } + + fn transport(&self, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { + Box::new(self.clone().electrum_request_multi(request).boxed().compat()) + } +} + +impl JsonRpcBatchClient for ElectrumClient {} + +impl JsonRpcMultiClient for ElectrumClient { + fn transport_exact(&self, to_addr: String, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { + Box::new( + self.clone() + .electrum_request_to(to_addr.clone(), request) + .map_ok(|response| (JsonRpcRemoteAddr(to_addr), response)) + .boxed() + .compat(), + ) + } +} + +#[cfg_attr(test, mockable)] +impl ElectrumClient { + pub fn try_new( + client_settings: ElectrumClientSettings, + event_handlers: Vec>, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + scripthash_notification_sender: Option>, + ) -> Result { + let client = ElectrumClient(ElectrumClientImpl::try_new_arc( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )?); + + Ok(client) + } + + /// Sends a JSONRPC request to all the connected servers. + /// + /// This method will block until a response is received from at least one server. + async fn electrum_request_multi( + self, + request: JsonRpcRequestEnum, + ) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { + // Whether to send the request to all active connections or not. + let send_to_all = matches!(request, JsonRpcRequestEnum::Single(ref req) if SEND_TO_ALL_METHODS.contains(&req.method.as_str())); + // Request id and serialized request. + let req_id = request.rpc_id(); + let request = json::to_string(&request).map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))?; + let request = (req_id, request); + // Use the active connections for this request. + let connections = self.connection_manager.get_active_connections(); + // Maximum number of connections to establish or use in request concurrently. Could be up to connections.len(). + let concurrency = if send_to_all { connections.len() } else { 1 }; + match self + .send_request_using(&request, connections, send_to_all, concurrency) + .await + { + Ok(response) => Ok(response), + // If we failed the request using only the active connections, try again using all connections. + Err(_) if !send_to_all => { + warn!( + "[coin={}] Failed to send the request using active connections, trying all connections.", + self.coin_ticker() + ); + let connections = self.connection_manager.get_all_connections(); + // At this point we should have all the connections disconnected since all + // the active connections failed (and we disconnected them in the process). + // So use a higher concurrency to speed up the response time. + // + // Note that a side effect of this is that we might break the `max_connected` threshold for + // a short time since the connection manager's background task will be trying to establish + // connections at the same time. This is not as bad though since the manager's background task + // tries connections sequentially and we are expected for finish much quicker due to parallelizing. + let concurrency = self.connection_manager.config().max_connected; + match self.send_request_using(&request, connections, false, concurrency).await { + Ok(response) => Ok(response), + Err(err_vec) => Err(JsonRpcErrorType::Internal(format!("All servers errored: {err_vec:?}"))), + } + }, + Err(e) => Err(JsonRpcErrorType::Internal(format!("All servers errored: {e:?}"))), + } + } + + /// Sends a JSONRPC request to a specific electrum server. + /// + /// This will try to wake up the server connection if it's not connected. + async fn electrum_request_to( + self, + to_addr: String, + request: JsonRpcRequestEnum, + ) -> Result { + // Whether to force the connection to be established (if not) before sending the request. + let force_connect = !matches!(request, JsonRpcRequestEnum::Single(ref req) if NO_FORCE_CONNECT_METHODS.contains(&req.method.as_str())); + let json = json::to_string(&request).map_err(|err| JsonRpcErrorType::InvalidRequest(err.to_string()))?; + + let connection = self + .connection_manager + .get_connection_by_address(&to_addr, force_connect) + .await + .map_err(|err| JsonRpcErrorType::Internal(err.to_string()))?; + + let response = connection + .electrum_request(json, request.rpc_id(), ELECTRUM_REQUEST_TIMEOUT) + .await; + // If the request was not forcefully connected, we shouldn't inform the connection manager that it's + // not needed anymore, as we didn't force spawn it in the first place. + // This fixes dropping the connection after the version check request, as we don't mark the connection + // maintained till after the version is checked. + if force_connect { + // Inform the connection manager that the connection was queried and no longer needed now. + self.connection_manager.not_needed(&to_addr); + } + + response + } + + /// Sends a JSONRPC request to all the given connections in parallel and returns + /// the first successful response if there is any, or a vector of errors otherwise. + /// + /// If `send_to_all` is set to `true`, we won't return on first successful response but + /// wait for all responses to come back first. + async fn send_request_using( + &self, + request: &(JsonRpcId, String), + connections: Vec>, + send_to_all: bool, + max_concurrency: usize, + ) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), Vec<(JsonRpcRemoteAddr, JsonRpcErrorType)>> { + let max_concurrency = max_concurrency.max(1); + // Create the request + let chunked_requests = connections.chunks(max_concurrency).map(|chunk| { + FuturesUnordered::from_iter(chunk.iter().map(|connection| { + let client = self.clone(); + let req_id = request.0; + let req_json = request.1.clone(); + async move { + let connection_is_established = connection + // We first make sure that the connection loop is established before sending the request. + .establish_connection_loop(client) + .await + .map_err(|e| JsonRpcErrorType::Transport(format!("Failed to establish connection: {e:?}"))); + let response = match connection_is_established { + Ok(_) => { + // Perform the request. + connection + .electrum_request(req_json, req_id, ELECTRUM_REQUEST_TIMEOUT) + .await + }, + Err(e) => Err(e), + }; + (response, connection.clone()) + } + })) + }); + let client = self.clone(); + let mut final_response = None; + let mut errors = Vec::new(); + // Iterate over the request chunks sequentially. + for mut requests in chunked_requests { + // For each chunk, iterate over the requests in parallel. + while let Some((response, connection)) = requests.next().await { + let address = JsonRpcRemoteAddr(connection.address().to_string()); + match response { + Ok(response) => { + if final_response.is_none() { + final_response = Some((address, response)); + } + client.connection_manager.not_needed(connection.address()); + if !send_to_all && final_response.is_some() { + return Ok(final_response.unwrap()); + } + }, + Err(e) => { + warn!( + "[coin={}], Error while sending request to {address:?}: {e:?}", + client.coin_ticker() + ); + connection.disconnect(Some(ElectrumConnectionErr::Temporary(format!( + "Forcefully disconnected for erroring: {e:?}." + )))); + client.event_handlers.on_disconnected(connection.address()).ok(); + errors.push((address, e)) + }, + } + } + } + final_response.ok_or(errors) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-ping + pub fn server_ping(&self) -> RpcRes<()> { rpc_func!(self, "server.ping") } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-version + pub fn server_version(&self, server_address: &str, version: &OrdRange) -> RpcRes { + let protocol_version: Vec = version.flatten().into_iter().map(|v| format!("{}", v)).collect(); + rpc_func_from!( + self, + server_address, + "server.version", + &self.client_name, + protocol_version + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe + pub fn get_block_count_from(&self, server_address: &str) -> RpcRes { + Box::new( + rpc_func_from!(self, server_address, BLOCKCHAIN_HEADERS_SUB_ID) + .map(|r: ElectrumBlockHeader| r.block_height()), + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers + pub fn get_block_headers_from( + &self, + server_address: &str, + start_height: u64, + count: NonZeroU64, + ) -> RpcRes { + rpc_func_from!(self, server_address, "blockchain.block.headers", start_height, count) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent + /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 + /// We should remove them to build valid transactions + pub fn scripthash_list_unspent(&self, hash: &str) -> RpcRes> { + let request_fut = Box::new(rpc_func!(self, "blockchain.scripthash.listunspent", hash).and_then( + move |unspents: Vec| { + let mut map: HashMap<(H256Json, u32), bool> = HashMap::new(); + let unspents = unspents + .into_iter() + .filter(|unspent| match map.entry((unspent.tx_hash, unspent.tx_pos)) { + Entry::Occupied(_) => false, + Entry::Vacant(e) => { + e.insert(true); + true + }, + }) + .collect(); + Ok(unspents) + }, + )); + let arc = self.clone(); + let hash = hash.to_owned(); + let fut = async move { arc.list_unspent_concurrent_map.wrap_request(hash, request_fut).await }; + Box::new(fut.boxed().compat()) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent + /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 + /// We should remove them to build valid transactions. + /// Please note the function returns `ScriptHashUnspents` elements in the same order in which they were requested. + pub fn scripthash_list_unspent_batch(&self, hashes: Vec) -> RpcRes> { + let requests = hashes + .iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.listunspent", hash)); + Box::new(self.batch_rpc(requests).map(move |unspents: Vec| { + unspents + .into_iter() + .map(|hash_unspents| { + hash_unspents + .into_iter() + .unique_by(|unspent| (unspent.tx_hash, unspent.tx_pos)) + .collect::>() + }) + .collect() + })) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history + pub fn scripthash_get_history(&self, hash: &str) -> RpcRes { + rpc_func!(self, "blockchain.scripthash.get_history", hash) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history + /// Requests history of the `hashes` in a batch and returns them in the same order they were requested. + pub fn scripthash_get_history_batch(&self, hashes: I) -> RpcRes> + where + I: IntoIterator, + { + let requests = hashes + .into_iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.get_history", hash)); + self.batch_rpc(requests) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory + pub fn scripthash_get_balance(&self, hash: &str) -> RpcRes { + let arc = self.clone(); + let hash = hash.to_owned(); + let fut = async move { + let request = rpc_func!(arc, "blockchain.scripthash.get_balance", &hash); + arc.get_balance_concurrent_map.wrap_request(hash, request).await + }; + Box::new(fut.boxed().compat()) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory + /// Requests balances in a batch and returns them in the same order they were requested. + pub fn scripthash_get_balances(&self, hashes: I) -> RpcRes> + where + I: IntoIterator, + { + let requests = hashes + .into_iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.get_balance", &hash)); + self.batch_rpc(requests) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe + pub fn blockchain_headers_subscribe(&self) -> RpcRes { + rpc_func!(self, BLOCKCHAIN_HEADERS_SUB_ID) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-broadcast + pub fn blockchain_transaction_broadcast(&self, tx: BytesJson) -> RpcRes { + rpc_func!(self, "blockchain.transaction.broadcast", tx) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-estimatefee + /// It is recommended to set n_blocks as low as possible. + /// However, in some cases, n_blocks = 1 leads to an unreasonably high fee estimation. + /// https://github.com/KomodoPlatform/atomicDEX-API/issues/656#issuecomment-743759659 + pub fn estimate_fee(&self, mode: &Option, n_blocks: u32) -> UtxoRpcFut { + match mode { + Some(m) => { + Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks, m).map_to_mm_fut(UtxoRpcError::from)) + }, + None => Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks).map_to_mm_fut(UtxoRpcError::from)), + } + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-header + pub fn blockchain_block_header(&self, height: u64) -> RpcRes { + rpc_func!(self, "blockchain.block.header", height) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers + pub fn blockchain_block_headers(&self, start_height: u64, count: NonZeroU64) -> RpcRes { + rpc_func!(self, "blockchain.block.headers", start_height, count) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get-merkle + pub fn blockchain_transaction_get_merkle(&self, txid: H256Json, height: u64) -> RpcRes { + rpc_func!(self, "blockchain.transaction.get_merkle", txid, height) + } + + // get_tx_height_from_rpc is costly since it loops through history after requesting the whole history of the script pubkey + // This method should always be used if the block headers are saved to the DB + async fn get_tx_height_from_storage(&self, tx: &UtxoTx) -> Result> { + let tx_hash = tx.hash().reversed(); + let blockhash = self.get_verbose_transaction(&tx_hash.into()).compat().await?.blockhash; + Ok(self + .block_headers_storage() + .get_block_height_by_hash(blockhash.into()) + .await? + .ok_or_else(|| { + GetTxHeightError::HeightNotFound(format!( + "Transaction block header is not found in storage for {}", + self.coin_ticker() + )) + })? + .try_into()?) + } + + // get_tx_height_from_storage is always preferred to be used instead of this, but if there is no headers in storage (storing headers is not enabled) + // this function can be used instead + async fn get_tx_height_from_rpc(&self, tx: &UtxoTx) -> Result { + let selfi = self; + for output in tx.outputs.clone() { + let script_pubkey_str = hex::encode(electrum_script_hash(&output.script_pubkey)); + if let Ok(history) = selfi.scripthash_get_history(script_pubkey_str.as_str()).compat().await { + if let Some(item) = history + .into_iter() + .find(|item| item.tx_hash.reversed() == H256Json(*tx.hash()) && item.height > 0) + { + return Ok(item.height as u64); + } + } + } + Err(GetTxHeightError::HeightNotFound(format!( + "Couldn't find height through electrum for {}", + selfi.coin_ticker + ))) + } + + async fn block_header_from_storage(&self, height: u64) -> Result> { + self.block_headers_storage() + .get_block_header(height) + .await? + .ok_or_else(|| { + GetBlockHeaderError::Internal(format!("Header not found in storage for {}", self.coin_ticker)).into() + }) + } + + async fn block_header_from_storage_or_rpc(&self, height: u64) -> Result> { + match self.block_header_from_storage(height).await { + Ok(h) => Ok(h), + Err(_) => Ok(deserialize( + self.blockchain_block_header(height).compat().await?.as_slice(), + )?), + } + } + + pub async fn get_confirmed_tx_info_from_rpc( + &self, + tx: &UtxoTx, + ) -> Result { + let height = self.get_tx_height_from_rpc(tx).await?; + + let merkle_branch = self + .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) + .compat() + .await?; + + let header = deserialize(self.blockchain_block_header(height).compat().await?.as_slice())?; + + Ok(ConfirmedTransactionInfo { + tx: tx.clone(), + header, + index: merkle_branch.pos as u64, + height, + }) + } + + pub async fn get_merkle_and_validated_header( + &self, + tx: &UtxoTx, + ) -> Result<(TxMerkleBranch, BlockHeader, u64), MmError> { + let height = self.get_tx_height_from_storage(tx).await?; + + let merkle_branch = self + .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) + .compat() + .await + .map_to_mm(|err| SPVError::UnableToGetMerkle { + coin: self.coin_ticker.clone(), + err: err.to_string(), + })?; + + let header = self.block_header_from_storage(height).await?; + + Ok((merkle_branch, header, height)) + } + + pub fn retrieve_headers_from( + &self, + server_address: &str, + from_height: u64, + to_height: u64, + ) -> UtxoRpcFut<(HashMap, Vec)> { + let coin_name = self.coin_ticker.clone(); + if from_height == 0 || to_height < from_height { + return Box::new(futures01::future::err( + UtxoRpcError::Internal("Invalid values for from/to parameters".to_string()).into(), + )); + } + let count: NonZeroU64 = match (to_height - from_height + 1).try_into() { + Ok(c) => c, + Err(e) => return Box::new(futures01::future::err(UtxoRpcError::Internal(e.to_string()).into())), + }; + Box::new( + self.get_block_headers_from(server_address, from_height, count) + .map_to_mm_fut(UtxoRpcError::from) + .and_then(move |headers| { + let (block_registry, block_headers) = { + if headers.count == 0 { + return MmError::err(UtxoRpcError::Internal("No headers available".to_string())); + } + let len = CompactInteger::from(headers.count); + let mut serialized = serialize(&len).take(); + serialized.extend(headers.hex.0.into_iter()); + drop_mutability!(serialized); + let mut reader = + Reader::new_with_coin_variant(serialized.as_slice(), coin_name.as_str().into()); + let maybe_block_headers = reader.read_list::(); + let block_headers = match maybe_block_headers { + Ok(headers) => headers, + Err(e) => return MmError::err(UtxoRpcError::InvalidResponse(format!("{:?}", e))), + }; + let mut block_registry: HashMap = HashMap::new(); + let mut starting_height = from_height; + for block_header in &block_headers { + block_registry.insert(starting_height, block_header.clone()); + starting_height += 1; + } + (block_registry, block_headers) + }; + Ok((block_registry, block_headers)) + }), + ) + } + + pub(crate) fn get_servers_with_latest_block_count(&self) -> UtxoRpcFut<(Vec, u64)> { + let selfi = self.clone(); + let fut = async move { + let addresses = selfi.connection_manager.get_all_server_addresses(); + let futures = addresses + .into_iter() + .map(|address| { + selfi + .get_block_count_from(&address) + .map(|response| (address, response)) + .compat() + }) + .collect::>(); + + let responses = join_all(futures).await; + + // First, we use filter_map to get rid of any errors and collect the + // server addresses and block counts into two vectors + let (responding_servers, block_counts_from_all_servers): (Vec<_>, Vec<_>) = + responses.clone().into_iter().filter_map(|res| res.ok()).unzip(); + + // Next, we use max to find the maximum block count from all servers + if let Some(max_block_count) = block_counts_from_all_servers.clone().iter().max() { + // Then, we use filter and collect to get the servers that have the maximum block count + let servers_with_max_count: Vec<_> = responding_servers + .into_iter() + .zip(block_counts_from_all_servers) + .filter(|(_, count)| count == max_block_count) + .map(|(addr, _)| addr) + .collect(); + + // Finally, we return a tuple of servers with max count and the max count + return Ok((servers_with_max_count, *max_block_count)); + } + + Err(MmError::new(UtxoRpcError::Internal(format!( + "Couldn't get block count from any server for {}, responses: {:?}", + &selfi.coin_ticker, responses + )))) + }; + + Box::new(fut.boxed().compat()) + } +} + +// if mockable is placed before async_trait there is `munmap_chunk(): invalid pointer` error on async fn mocking attempt +#[async_trait] +#[cfg_attr(test, mockable)] +impl UtxoRpcClientOps for ElectrumClient { + fn list_unspent(&self, address: &Address, _decimals: u8) -> UtxoRpcFut> { + let mut output_scripts = vec![try_f!(output_script(address))]; + + // If the plain pubkey is available, fetch the UTXOs found in P2PK outputs as well (if any). + if let Some(pubkey) = address.pubkey() { + let p2pk_output_script = output_script_p2pk(pubkey); + output_scripts.push(p2pk_output_script); + } + + let this = self.clone(); + let fut = async move { + let hashes = output_scripts + .iter() + .map(|s| hex::encode(electrum_script_hash(s))) + .collect(); + let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; + + let unspents = unspents + .into_iter() + .zip(output_scripts) + .flat_map(|(unspents, output_script)| { + unspents + .into_iter() + .map(move |unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) + }) + .collect(); + Ok(unspents) + }; + + Box::new(fut.boxed().compat()) + } + + fn list_unspent_group(&self, addresses: Vec
, _decimals: u8) -> UtxoRpcFut { + let output_scripts = try_f!(addresses + .iter() + .map(output_script) + .collect::, keys::Error>>()); + + let this = self.clone(); + let fut = async move { + let hashes = output_scripts + .iter() + .map(|s| hex::encode(electrum_script_hash(s))) + .collect(); + let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; + + let unspents: Vec> = unspents + .into_iter() + .zip(output_scripts) + .map(|(unspents, output_script)| { + unspents + .into_iter() + .map(|unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) + .collect() + }) + .collect(); + + let unspent_map = addresses + .into_iter() + // `scripthash_list_unspent_batch` returns `ScriptHashUnspents` elements in the same order in which they were requested. + // So we can zip `addresses` and `unspents` into one iterator. + .zip(unspents) + .collect(); + Ok(unspent_map) + }; + Box::new(fut.boxed().compat()) + } + + fn send_transaction(&self, tx: &UtxoTx) -> UtxoRpcFut { + let bytes = if tx.has_witness() { + BytesJson::from(serialize_with_flags(tx, SERIALIZE_TRANSACTION_WITNESS)) + } else { + BytesJson::from(serialize(tx)) + }; + Box::new( + self.blockchain_transaction_broadcast(bytes) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut { + Box::new( + self.blockchain_transaction_broadcast(tx) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn blockchain_scripthash_subscribe_using(&self, server_address: &str, scripthash: String) -> UtxoRpcFut { + Box::new( + rpc_func_from!(self, server_address, BLOCKCHAIN_SCRIPTHASH_SUB_ID, scripthash) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// returns transaction bytes by default + fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut { + let verbose = false; + Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// returns verbose transaction by default + fn get_verbose_transaction(&self, txid: &H256Json) -> UtxoRpcFut { + let verbose = true; + Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// Returns verbose transactions in a batch. + fn get_verbose_transactions(&self, tx_ids: &[H256Json]) -> UtxoRpcFut> { + let verbose = true; + let requests = tx_ids + .iter() + .map(|txid| rpc_req!(self, "blockchain.transaction.get", txid, verbose)); + Box::new(self.batch_rpc(requests).map_to_mm_fut(UtxoRpcError::from)) + } + + fn get_block_count(&self) -> UtxoRpcFut { + Box::new( + self.blockchain_headers_subscribe() + .map(|r| r.block_height()) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn display_balance(&self, address: Address, decimals: u8) -> RpcRes { + let output_script = try_f!(output_script(&address).map_err(|err| JsonRpcError::new( + UtxoJsonRpcClientInfo::client_info(self), + rpc_req!(self, "blockchain.scripthash.get_balance").into(), + JsonRpcErrorType::Internal(err.to_string()) + ))); + let mut hashes = vec![hex::encode(electrum_script_hash(&output_script))]; + + // If the plain pubkey is available, fetch the balance found in P2PK output as well (if any). + if let Some(pubkey) = address.pubkey() { + let p2pk_output_script = output_script_p2pk(pubkey); + hashes.push(hex::encode(electrum_script_hash(&p2pk_output_script))); + } + + let this = self.clone(); + let fut = async move { + Ok(this + .scripthash_get_balances(hashes) + .compat() + .await? + .into_iter() + .fold(BigDecimal::from(0), |sum, electrum_balance| { + sum + electrum_balance.to_big_decimal(decimals) + })) + }; + Box::new(fut.boxed().compat()) + } + + fn display_balances(&self, addresses: Vec
, decimals: u8) -> UtxoRpcFut> { + let this = self.clone(); + let fut = async move { + let hashes = addresses + .iter() + .map(|address| { + let output_script = output_script(address)?; + let hash = electrum_script_hash(&output_script); + + Ok(hex::encode(hash)) + }) + .collect::, keys::Error>>()?; + + let electrum_balances = this.scripthash_get_balances(hashes).compat().await?; + let balances = electrum_balances + .into_iter() + // `scripthash_get_balances` returns `ElectrumBalance` elements in the same order in which they were requested. + // So we can zip `addresses` and the balances into one iterator. + .zip(addresses) + .map(|(electrum_balance, address)| (address, electrum_balance.to_big_decimal(decimals))) + .collect(); + Ok(balances) + }; + + Box::new(fut.boxed().compat()) + } + + fn estimate_fee_sat( + &self, + decimals: u8, + _fee_method: &EstimateFeeMethod, + mode: &Option, + n_blocks: u32, + ) -> UtxoRpcFut { + Box::new(self.estimate_fee(mode, n_blocks).map(move |fee| { + if fee > 0.00001 { + (fee * 10.0_f64.powf(decimals as f64)) as u64 + } else { + 1000 + } + })) + } + + fn get_relay_fee(&self) -> RpcRes { rpc_func!(self, "blockchain.relayfee") } + + fn find_output_spend( + &self, + tx_hash: H256, + script_pubkey: &[u8], + vout: usize, + _from_block: BlockHashOrHeight, + tx_hash_algo: TxHashAlgo, + ) -> Box, Error = String> + Send> { + let selfi = self.clone(); + let script_hash = hex::encode(electrum_script_hash(script_pubkey)); + let fut = async move { + let history = try_s!(selfi.scripthash_get_history(&script_hash).compat().await); + + if history.len() < 2 { + return Ok(None); + } + + for item in history.iter() { + let transaction = try_s!(selfi.get_transaction_bytes(&item.tx_hash).compat().await); + + let mut maybe_spend_tx: UtxoTx = + try_s!(deserialize(transaction.as_slice()).map_err(|e| ERRL!("{:?}", e))); + maybe_spend_tx.tx_hash_algo = tx_hash_algo; + drop_mutability!(maybe_spend_tx); + + for (index, input) in maybe_spend_tx.inputs.iter().enumerate() { + if input.previous_output.hash == tx_hash && input.previous_output.index == vout as u32 { + return Ok(Some(SpentOutputInfo { + input: input.clone(), + input_index: index, + spending_tx: maybe_spend_tx, + spent_in_block: BlockHashOrHeight::Height(item.height), + })); + } + } + } + Ok(None) + }; + Box::new(fut.boxed().compat()) + } + + fn get_median_time_past( + &self, + starting_block: u64, + count: NonZeroU64, + coin_variant: CoinVariant, + ) -> UtxoRpcFut { + let from = if starting_block <= count.get() { + 0 + } else { + starting_block - count.get() + 1 + }; + Box::new( + self.blockchain_block_headers(from, count) + .map_to_mm_fut(UtxoRpcError::from) + .and_then(|res| { + if res.count == 0 { + return MmError::err(UtxoRpcError::InvalidResponse("Server returned zero count".to_owned())); + } + let len = CompactInteger::from(res.count); + let mut serialized = serialize(&len).take(); + serialized.extend(res.hex.0.into_iter()); + let mut reader = Reader::new_with_coin_variant(serialized.as_slice(), coin_variant); + let headers = reader.read_list::()?; + let mut timestamps: Vec<_> = headers.into_iter().map(|block| block.time).collect(); + // can unwrap because count is non zero + Ok(median(timestamps.as_mut_slice()).unwrap()) + }), + ) + } + + async fn get_block_timestamp(&self, height: u64) -> Result> { + Ok(self.block_header_from_storage_or_rpc(height).await?.time as u64) + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs new file mode 100644 index 0000000000..2b9a3ada48 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs @@ -0,0 +1,730 @@ +use super::client::ElectrumClient; +use super::constants::{BLOCKCHAIN_HEADERS_SUB_ID, BLOCKCHAIN_SCRIPTHASH_SUB_ID, CUTOFF_TIMEOUT, + DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT}; + +use crate::{RpcTransportEventHandler, SharableRpcTransportEventHandler}; +use common::custom_futures::timeout::FutureTimerExt; +use common::executor::{abortable_queue::AbortableQueue, abortable_queue::WeakSpawner, AbortableSystem, SpawnFuture, + Timer}; +use common::expirable_map::ExpirableMap; +use common::jsonrpc_client::{JsonRpcBatchResponse, JsonRpcErrorType, JsonRpcId, JsonRpcRequest, JsonRpcResponse, + JsonRpcResponseEnum}; +use common::log::{error, info}; +use common::{now_float, now_ms}; +use mm2_rpc::data::legacy::ElectrumProtocol; + +use std::io; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use futures::channel::oneshot as async_oneshot; +use futures::compat::{Future01CompatExt, Stream01CompatExt}; +use futures::future::FutureExt; +use futures::lock::Mutex as AsyncMutex; +use futures::select; +use futures::stream::StreamExt; +use futures01::sync::mpsc; +use futures01::{Sink, Stream}; +use http::Uri; +use instant::Instant; +use serde::Serialize; + +cfg_native! { + use super::tcp_stream::*; + + use std::convert::TryFrom; + use std::net::ToSocketAddrs; + use futures::future::{Either, TryFutureExt}; + use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader, WriteHalf, ReadHalf}; + use tokio::net::TcpStream; + use tokio_rustls::{TlsConnector}; + use rustls::{ServerName}; +} + +cfg_wasm32! { + use mm2_net::wasm::wasm_ws::{ws_transport,WsOutgoingSender,WsIncomingReceiver}; + + use std::sync::atomic::AtomicUsize; +} + +pub type JsonRpcPendingRequests = ExpirableMap>; + +macro_rules! disconnect_and_return { + ($typ:tt, $err:expr, $conn:expr, $handlers:expr) => {{ + let err = ElectrumConnectionErr::$typ(format!("{:?}", $err)); + disconnect_and_return!(err, $conn, $handlers); + }}; + ($err:expr, $conn:expr, $handlers:expr) => {{ + // Inform the event handlers of the disconnection. + $handlers.on_disconnected(&$conn.address()).ok(); + // Disconnect the connection. + $conn.disconnect(Some($err.clone())); + return Err($err); + }}; +} + +macro_rules! disconnect_and_return_if_err { + ($ex:expr, $typ:tt, $conn:expr, $handlers:expr) => {{ + match $ex { + Ok(res) => res, + Err(e) => { + disconnect_and_return!($typ, e, $conn, $handlers); + }, + } + }}; + ($ex:expr, $conn:expr, $handlers:expr) => {{ + match $ex { + Ok(res) => res, + Err(e) => { + disconnect_and_return!(e, $conn, $handlers); + }, + } + }}; +} + +macro_rules! wrap_timeout { + ($call:expr, $timeout:expr, $conn:expr, $handlers:expr) => {{ + let now = Instant::now(); + let res = match $call.timeout_secs($timeout).await { + Ok(res) => res, + Err(_) => { + disconnect_and_return!( + ElectrumConnectionErr::Timeout(stringify!($call), $timeout), + $conn, + $handlers + ); + }, + }; + // Remaining timeout after executing `$call`. + let timeout = ($timeout - now.elapsed().as_secs_f64()).max(0.0); + (timeout, res) + }}; +} + +/// Helper function casting mpsc::Receiver as Stream. +fn rx_to_stream(rx: mpsc::Receiver>) -> impl Stream, Error = io::Error> { + rx.map_err(|_| panic!("errors not possible on rx")) +} + +#[cfg(not(target_arch = "wasm32"))] +/// Helper function to parse a a string DNS name into a ServerName. +fn server_name_from_domain(dns_name: &str) -> Result { + match ServerName::try_from(dns_name) { + // The `ServerName` must be `DnsName` variant, SSL works with domain names and not IPs. + Ok(dns_name) if matches!(dns_name, ServerName::DnsName(_)) => Ok(dns_name), + _ => ERR!("Couldn't parse DNS name from '{}'", dns_name), + } +} + +/// Electrum request RPC representation +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ElectrumConnectionSettings { + pub url: String, + #[serde(default)] + pub protocol: ElectrumProtocol, + #[serde(default)] + pub disable_cert_verification: bool, + pub timeout_sec: Option, +} + +/// Possible connection errors when connection to an Electrum server. +#[derive(Clone, Debug)] +pub enum ElectrumConnectionErr { + /// Couldn't connect to the server within the provided timeout. + /// The first argument is the call (stringified) that timed out. + /// The second argument is the time limit it had to finish within, in seconds. + Timeout(&'static str, f64), + /// A temporary error that might be resolved later on. + Temporary(String), + /// An error that can't be resolved by retrying. + Irrecoverable(String), + /// The server's version doesn't match the client's version. + VersionMismatch(String), +} + +impl ElectrumConnectionErr { + pub fn is_recoverable(&self) -> bool { + match self { + ElectrumConnectionErr::Irrecoverable(_) | ElectrumConnectionErr::VersionMismatch(_) => false, + ElectrumConnectionErr::Timeout(_, _) | ElectrumConnectionErr::Temporary(_) => true, + } + } +} + +/// Represents the active Electrum connection to selected address +#[derive(Debug)] +pub struct ElectrumConnection { + /// The client connected to this SocketAddr + settings: ElectrumConnectionSettings, + /// The Sender forwarding requests to writing part of underlying stream + tx: Mutex>>>, + /// A lock to prevent multiple connection establishments happening concurrently. + establishing_connection: AsyncMutex<()>, + /// Responses are stored here + responses: Mutex, + /// Selected protocol version. The value is initialized after the server.version RPC call. + protocol_version: Mutex>, + /// Why was the connection disconnected the last time? + last_error: Mutex>, + /// An abortable system for connection specific tasks to run on. + abortable_system: AbortableQueue, +} + +impl ElectrumConnection { + pub fn new(settings: ElectrumConnectionSettings, abortable_system: AbortableQueue) -> Self { + ElectrumConnection { + settings, + tx: Mutex::new(None), + establishing_connection: AsyncMutex::new(()), + responses: Mutex::new(JsonRpcPendingRequests::new()), + protocol_version: Mutex::new(None), + last_error: Mutex::new(None), + abortable_system, + } + } + + pub fn address(&self) -> &str { &self.settings.url } + + fn weak_spawner(&self) -> WeakSpawner { self.abortable_system.weak_spawner() } + + fn is_connected(&self) -> bool { self.tx.lock().unwrap().is_some() } + + fn set_protocol_version(&self, version: f32) { + let mut protocol_version = self.protocol_version.lock().unwrap(); + if protocol_version.is_none() { + *protocol_version = Some(version); + } + } + + fn clear_protocol_version(&self) { self.protocol_version.lock().unwrap().take(); } + + fn set_last_error(&self, reason: ElectrumConnectionErr) { + let mut last_error = self.last_error.lock().unwrap(); + if last_error.is_none() { + *last_error = Some(reason); + } + } + + fn clear_last_error(&self) { self.last_error.lock().unwrap().take(); } + + fn last_error(&self) -> Option { self.last_error.lock().unwrap().clone() } + + /// Connects to the electrum server by setting the `tx` sender channel. + /// + /// # Safety: + /// For this to be atomic, the caller must have acquired the lock to `establishing_connection`. + fn connect(&self, tx: mpsc::Sender>) { + self.tx.lock().unwrap().replace(tx); + self.clear_last_error(); + } + + /// Disconnect and clear the connection state. + pub fn disconnect(&self, reason: Option) { + self.tx.lock().unwrap().take(); + self.responses.lock().unwrap().clear(); + self.clear_protocol_version(); + if let Some(reason) = reason { + self.set_last_error(reason); + } + self.abortable_system.abort_all_and_reset().ok(); + } + + /// Sends a request to the electrum server and waits for the response. + /// + /// ## Important: This should always return [`JsonRpcErrorType::Transport`] error. + pub async fn electrum_request( + &self, + mut req_json: String, + rpc_id: JsonRpcId, + timeout: f64, + ) -> Result { + #[cfg(not(target_arch = "wasm"))] + { + // Electrum request and responses must end with \n + // https://electrumx.readthedocs.io/en/latest/protocol-basics.html#message-stream + req_json.push('\n'); + } + + // Create a oneshot channel to receive the response in. + let (req_tx, res_rx) = async_oneshot::channel(); + self.responses + .lock() + .unwrap() + .insert(rpc_id, req_tx, Duration::from_secs_f64(timeout)); + let tx = self + .tx + .lock() + .unwrap() + // Clone to not to hold the lock while sending the request. + .clone() + .ok_or_else(|| JsonRpcErrorType::Transport("Connection is not established".to_string()))?; + + // Send the request to the electrum server. + tx.send(req_json.into_bytes()) + .compat() + .await + .map_err(|e| JsonRpcErrorType::Transport(e.to_string()))?; + + // Wait for the response to be processed and sent back to us. + res_rx + .timeout_secs(timeout) + .await + .map_err(|e| JsonRpcErrorType::Transport(e.to_string()))? + .map_err(|_e| JsonRpcErrorType::Transport("The sender didn't send".to_string())) + } + + /// Process an incoming JSONRPC response from the electrum server. + fn process_electrum_response(&self, bytes: &[u8], event_handlers: &Vec>) { + // Inform the event handlers. + event_handlers.on_incoming_response(bytes); + + // detect if we got standard JSONRPC response or subscription response as JSONRPC request + #[derive(Deserialize)] + #[serde(untagged)] + enum ElectrumRpcResponseEnum { + /// The subscription response as JSONRPC request. + /// + /// NOTE Because JsonRpcResponse uses default values for each of its field, + /// this variant has to stay at top in this enumeration to be properly deserialized + /// from serde. + SubscriptionNotification(JsonRpcRequest), + /// The standard JSONRPC single response. + SingleResponse(JsonRpcResponse), + /// The batch of standard JSONRPC responses. + BatchResponses(JsonRpcBatchResponse), + } + + let response: ElectrumRpcResponseEnum = match serde_json::from_slice(bytes) { + Ok(res) => res, + Err(e) => { + error!("{}", e); + return; + }, + }; + + let response = match response { + ElectrumRpcResponseEnum::SingleResponse(single) => JsonRpcResponseEnum::Single(single), + ElectrumRpcResponseEnum::BatchResponses(batch) => JsonRpcResponseEnum::Batch(batch), + ElectrumRpcResponseEnum::SubscriptionNotification(req) => { + match req.method.as_str() { + // NOTE: Sending a script hash notification is handled in it's own event handler. + BLOCKCHAIN_SCRIPTHASH_SUB_ID | BLOCKCHAIN_HEADERS_SUB_ID => {}, + _ => { + error!("Unexpected notification method: {}", req.method); + }, + } + return; + }, + }; + + // the corresponding sender may not exist, receiver may be dropped + // these situations are not considered as errors so we just silently skip them + let pending = self.responses.lock().unwrap().remove(&response.rpc_id()); + if let Some(tx) = pending { + tx.send(response).ok(); + } + } + + /// Process a bulk response from the electrum server. + /// + /// A bulk response is a response that contains multiple JSONRPC responses. + fn process_electrum_bulk_response( + &self, + bulk_response: &[u8], + event_handlers: &Vec>, + ) { + // We should split the received response because we can get several responses in bulk. + let responses = bulk_response.split(|item| *item == b'\n'); + + for response in responses { + // `split` returns empty slice if it ends with separator which is our case. + if !response.is_empty() { + self.process_electrum_response(response, event_handlers) + } + } + } +} + +// Connection loop establishment methods. +impl ElectrumConnection { + /// Tries to establish a connection to the server. + /// + /// Returns the tokio stream with the server and the remaining timeout + /// left from the input timeout. + #[cfg(not(target_arch = "wasm32"))] + async fn establish_connection(connection: &ElectrumConnection) -> Result { + let address = connection.address(); + + let socket_addr = match address.to_socket_addrs() { + Err(e) if matches!(e.kind(), std::io::ErrorKind::InvalidInput) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!( + "Invalid address format: {e:?}" + ))); + }, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Resolve error in address: {e:?}" + ))); + }, + Ok(mut addr) => match addr.next() { + None => { + return Err(ElectrumConnectionErr::Temporary("Address resolved to None".to_string())); + }, + Some(addr) => addr, + }, + }; + + let connect_f = match connection.settings.protocol { + ElectrumProtocol::TCP => Either::Left(TcpStream::connect(&socket_addr).map_ok(ElectrumStream::Tcp)), + ElectrumProtocol::SSL => { + let uri: Uri = match address.parse() { + Ok(uri) => uri, + Err(e) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!("URL parse error: {e:?}"))); + }, + }; + + let Some(dns_name) = uri.host().map(String::from) else { + return Err(ElectrumConnectionErr::Irrecoverable("Couldn't retrieve host from address".to_string())); + }; + + let Ok(dns) = server_name_from_domain(dns_name.as_str()) else { + return Err(ElectrumConnectionErr::Irrecoverable("Address isn't a valid domain name".to_string())); + }; + + let tls_connector = if connection.settings.disable_cert_verification { + TlsConnector::from(UNSAFE_TLS_CONFIG.clone()) + } else { + TlsConnector::from(SAFE_TLS_CONFIG.clone()) + }; + + Either::Right( + TcpStream::connect(&socket_addr) + .and_then(move |stream| tls_connector.connect(dns, stream).map_ok(ElectrumStream::Tls)), + ) + }, + ElectrumProtocol::WS | ElectrumProtocol::WSS => { + return Err(ElectrumConnectionErr::Irrecoverable( + "Incorrect protocol for native connection ('WS'/'WSS'). Use 'TCP' or 'SSL' instead.".to_string(), + )); + }, + }; + + // Try to connect to the server. + let stream = match connect_f.await { + Ok(stream) => stream, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Couldn't connect to the electrum server: {e:?}" + ))) + }, + }; + if let Err(e) = stream.as_ref().set_nodelay(true) { + return Err(ElectrumConnectionErr::Temporary(format!( + "Setting TCP_NODELAY failed: {e:?}" + ))); + }; + + Ok(stream) + } + + #[cfg(target_arch = "wasm32")] + async fn establish_connection( + connection: &ElectrumConnection, + ) -> Result<(WsIncomingReceiver, WsOutgoingSender), ElectrumConnectionErr> { + lazy_static! { + static ref CONN_IDX: Arc = Arc::new(AtomicUsize::new(0)); + } + + let address = connection.address(); + let uri: Uri = match address.parse() { + Ok(uri) => uri, + Err(e) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!( + "Failed to parse the address: {e:?}" + ))); + }, + }; + if uri.scheme().is_some() { + return Err(ElectrumConnectionErr::Irrecoverable( + "There has not to be a scheme in the url. 'ws://' scheme is used by default. Consider using 'protocol: \"WSS\"' in the electrum request to switch to the 'wss://' scheme.".to_string(), + ) + ); + } + + let protocol_prefixed_address = match connection.settings.protocol { + ElectrumProtocol::WS => { + format!("ws://{address}") + }, + ElectrumProtocol::WSS => { + format!("wss://{address}") + }, + ElectrumProtocol::TCP | ElectrumProtocol::SSL => { + return Err(ElectrumConnectionErr::Irrecoverable( + "'TCP' and 'SSL' are not supported in a browser. Please use 'WS' or 'WSS' protocols".to_string(), + )); + }, + }; + + let spawner = connection.weak_spawner(); + let connect_f = ws_transport( + CONN_IDX.fetch_add(1, AtomicOrdering::Relaxed), + &protocol_prefixed_address, + &spawner, + ); + + // Try to connect to the server. + let (transport_tx, transport_rx) = match connect_f.await { + Ok(stream) => stream, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Couldn't connect to the electrum server: {e:?}" + ))) + }, + }; + + Ok((transport_rx, transport_tx)) + } + + /// Waits until `last_response` time is too old in the past then returns a temporary error. + async fn timeout_loop(last_response: Arc) -> ElectrumConnectionErr { + loop { + Timer::sleep(CUTOFF_TIMEOUT).await; + let last_sec = (last_response.load(AtomicOrdering::Relaxed) / 1000) as f64; + if now_float() - last_sec > CUTOFF_TIMEOUT { + break ElectrumConnectionErr::Temporary(format!( + "Server didn't respond for too long ({}s).", + now_float() - last_sec + )); + } + } + } + + /// Runs the send loop that sends outgoing requests to the server. + /// + /// This runs until the sender is disconnected. + async fn send_loop( + address: String, + event_handlers: Arc>>, + #[cfg(not(target_arch = "wasm32"))] mut write: WriteHalf, + #[cfg(target_arch = "wasm32")] mut write: WsOutgoingSender, + rx: mpsc::Receiver>, + ) -> ElectrumConnectionErr { + let mut rx = rx_to_stream(rx).compat(); + while let Some(Ok(bytes)) = rx.next().await { + // NOTE: We shouldn't really notify on going request yet since we don't know + // if sending will error. We do that early though to avoid cloning the bytes on wasm. + event_handlers.on_outgoing_request(&bytes); + + #[cfg(not(target_arch = "wasm32"))] + let send_result = write.write_all(&bytes).await; + #[cfg(target_arch = "wasm32")] + let send_result = write.send(bytes).await; + + if let Err(e) = send_result { + error!("Write error {e} to {address}"); + } + } + ElectrumConnectionErr::Temporary("Sender disconnected".to_string()) + } + + /// Runs the receive loop that reads incoming responses from the server. + /// + /// This runs until the electrum server sends an empty response (signaling disconnection), + /// or if we encounter an error while reading from the stream. + #[cfg(not(target_arch = "wasm32"))] + async fn recv_loop( + connection: Arc, + event_handlers: Arc>>, + read: ReadHalf, + last_response: Arc, + ) -> ElectrumConnectionErr { + let mut buffer = String::with_capacity(1024); + let mut buf_reader = BufReader::new(read); + loop { + match buf_reader.read_line(&mut buffer).await { + Ok(c) => { + if c == 0 { + break ElectrumConnectionErr::Temporary("EOF".to_string()); + } + }, + Err(e) => { + break ElectrumConnectionErr::Temporary(format!("Error on read {e:?}")); + }, + }; + + last_response.store(now_ms(), AtomicOrdering::Relaxed); + connection.process_electrum_bulk_response(buffer.as_bytes(), &event_handlers); + buffer.clear(); + } + } + + #[cfg(target_arch = "wasm32")] + async fn recv_loop( + connection: Arc, + event_handlers: Arc>>, + mut read: WsIncomingReceiver, + last_response: Arc, + ) -> ElectrumConnectionErr { + let address = connection.address(); + while let Some(response) = read.next().await { + match response { + Ok(bytes) => { + last_response.store(now_ms(), AtomicOrdering::Relaxed); + connection.process_electrum_response(&bytes, &event_handlers); + }, + Err(e) => { + error!("{address} error: {e:?}"); + }, + } + } + ElectrumConnectionErr::Temporary("Receiver disconnected".to_string()) + } + + /// Checks the server version against the range of accepted versions and disconnects the server + /// if the version is not supported. + async fn check_server_version( + connection: &ElectrumConnection, + client: &ElectrumClient, + ) -> Result<(), ElectrumConnectionErr> { + let address = connection.address(); + + // Don't query for the version if the client doesn't care about it, as querying for the version might + // fail with the protocol range we will provide. + if !client.negotiate_version() { + return Ok(()); + } + + match client.server_version(address, client.protocol_version()).compat().await { + Ok(version_str) => match version_str.protocol_version.parse::() { + Ok(version_f32) => { + connection.set_protocol_version(version_f32); + Ok(()) + }, + Err(e) => Err(ElectrumConnectionErr::Temporary(format!( + "Failed to parse electrum server version {e:?}" + ))), + }, + // If the version we provided isn't supported by the server, it returns a JSONRPC response error. + Err(e) if matches!(e.error, JsonRpcErrorType::Response(..)) => { + Err(ElectrumConnectionErr::VersionMismatch(format!("{e:?}"))) + }, + Err(e) => Err(ElectrumConnectionErr::Temporary(format!( + "Failed to get electrum server version {e:?}" + ))), + } + } + + /// Starts the connection loop that keeps an active connection to the electrum server. + /// If this connection is already connected, nothing is performed and `Ok(())` is returned. + /// + /// This will first try to connect to the server and use that connection to query its version. + /// If version checks succeed, the connection will be kept alive, otherwise, it will be terminated. + pub async fn establish_connection_loop( + self: &Arc, + client: ElectrumClient, + ) -> Result<(), ElectrumConnectionErr> { + let connection = self.clone(); + let address = connection.address().to_string(); + let event_handlers = client.event_handlers(); + // This is the timeout for connection establishment and version querying (i.e. the whole method). + // The caller is guaranteed that the method will return within this time. + let timeout = connection + .settings + .timeout_sec + .unwrap_or(DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT); + + // Locking `establishing_connection` will prevent other threads from establishing a connection concurrently. + let (timeout, _establishing_connection) = wrap_timeout!( + connection.establishing_connection.lock(), + timeout, + connection, + event_handlers + ); + + // Check if we are already connected. + if connection.is_connected() { + return Ok(()); + } + + // Check why we errored the last time, don't try to reconnect if it was an irrecoverable error. + if let Some(last_error) = connection.last_error() { + if !last_error.is_recoverable() { + return Err(last_error); + } + } + + let (timeout, stream_res) = wrap_timeout!( + Self::establish_connection(&connection).boxed(), + timeout, + connection, + event_handlers + ); + let stream = disconnect_and_return_if_err!(stream_res, connection, event_handlers); + + let (connection_ready_signal, wait_for_connection_ready) = async_oneshot::channel(); + let connection_loop = { + // Branch 1: Disconnect after not receiving responses for too long. + let last_response = Arc::new(AtomicU64::new(now_ms())); + let timeout_branch = Self::timeout_loop(last_response.clone()).boxed(); + + // Branch 2: Read incoming responses from the server. + #[cfg(not(target_arch = "wasm32"))] + let (read, write) = tokio::io::split(stream); + #[cfg(target_arch = "wasm32")] + let (read, write) = stream; + let recv_branch = Self::recv_loop(connection.clone(), event_handlers.clone(), read, last_response).boxed(); + + // Branch 3: Send outgoing requests to the server. + let (tx, rx) = mpsc::channel(0); + let send_branch = Self::send_loop(address.clone(), event_handlers.clone(), write, rx).boxed(); + + let connection = connection.clone(); + let event_handlers = event_handlers.clone(); + async move { + connection.connect(tx); + // Signal that the connection is up and ready so to start the version querying. + connection_ready_signal.send(()).ok(); + event_handlers.on_connected(&address).ok(); + let via = match connection.settings.protocol { + ElectrumProtocol::TCP => "via TCP", + ElectrumProtocol::SSL if connection.settings.disable_cert_verification => { + "via SSL *with disabled certificate verification*" + }, + ElectrumProtocol::SSL => "via SSL", + ElectrumProtocol::WS => "via WS", + ElectrumProtocol::WSS => "via WSS", + }; + info!("{address} is now connected {via}."); + + let err = select! { + e = timeout_branch.fuse() => e, + e = recv_branch.fuse() => e, + e = send_branch.fuse() => e, + }; + + error!("{address} connection dropped due to: {err:?}"); + event_handlers.on_disconnected(&address).ok(); + connection.disconnect(Some(err)); + } + }; + // Start the connection loop on a weak spawner. + connection.weak_spawner().spawn(connection_loop); + + // Wait for the connection to be ready before querying the version. + let (timeout, connection_ready_res) = + wrap_timeout!(wait_for_connection_ready, timeout, connection, event_handlers); + disconnect_and_return_if_err!(connection_ready_res, Temporary, connection, event_handlers); + + let (_, version_res) = wrap_timeout!( + Self::check_server_version(&connection, &client).boxed(), + timeout, + connection, + event_handlers + ); + disconnect_and_return_if_err!(version_res, connection, event_handlers); + + Ok(()) + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs new file mode 100644 index 0000000000..17f3495b85 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs @@ -0,0 +1,91 @@ +use std::collections::HashSet; +use std::mem; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; + +use super::super::connection::ElectrumConnection; +use super::super::constants::FIRST_SUSPEND_TIME; + +use common::now_ms; +use keys::Address; + +#[derive(Debug)] +struct SuspendTimer { + /// When was the connection last disconnected. + disconnected_at: AtomicU64, + /// How long to suspend the server the next time it disconnects (in milliseconds). + next_suspend_time: AtomicU64, +} + +impl SuspendTimer { + /// Creates a new suspend timer. + fn new() -> Self { + SuspendTimer { + disconnected_at: AtomicU64::new(0), + next_suspend_time: AtomicU64::new(FIRST_SUSPEND_TIME), + } + } + + /// Resets the suspend time and disconnection time. + fn reset(&self) { + self.disconnected_at.store(0, Ordering::SeqCst); + self.next_suspend_time.store(FIRST_SUSPEND_TIME, Ordering::SeqCst); + } + + /// Doubles the suspend time and sets the disconnection time to `now`. + fn double(&self) { + // The max suspend time, 12h. + const MAX_SUSPEND_TIME: u64 = 12 * 60 * 60; + self.disconnected_at.store(now_ms(), Ordering::SeqCst); + let mut next_suspend_time = self.next_suspend_time.load(Ordering::SeqCst); + next_suspend_time = (next_suspend_time * 2).min(MAX_SUSPEND_TIME); + self.next_suspend_time.store(next_suspend_time, Ordering::SeqCst); + } + + /// Returns the time until when the server should be suspended in milliseconds. + fn get_suspend_until(&self) -> u64 { + self.disconnected_at.load(Ordering::SeqCst) + self.next_suspend_time.load(Ordering::SeqCst) * 1000 + } +} + +/// A struct that encapsulates an Electrum connection and its information. +#[derive(Debug)] +pub struct ConnectionContext { + /// The electrum connection. + pub connection: Arc, + /// The list of addresses subscribed to the connection. + subs: Mutex>, + /// The timer deciding when the connection is ready to be used again. + suspend_timer: SuspendTimer, + /// The ID of this connection which also serves as a priority (lower is better). + pub id: u32, +} + +impl ConnectionContext { + /// Creates a new connection context. + pub(super) fn new(connection: ElectrumConnection, id: u32) -> Self { + ConnectionContext { + connection: Arc::new(connection), + subs: Mutex::new(HashSet::new()), + suspend_timer: SuspendTimer::new(), + id, + } + } + + /// Resets the suspend time. + pub(super) fn connected(&self) { self.suspend_timer.reset(); } + + /// Inform the connection context that the connection has been disconnected. + /// + /// Doubles the suspend time and clears the subs list and returns it. + pub(super) fn disconnected(&self) -> HashSet
{ + self.suspend_timer.double(); + mem::take(&mut self.subs.lock().unwrap()) + } + + /// Returns the time the server should be suspended until (when to take it up) in milliseconds. + pub(super) fn suspended_till(&self) -> u64 { self.suspend_timer.get_suspend_until() } + + /// Adds a subscription to the connection context. + pub(super) fn add_sub(&self, address: Address) { self.subs.lock().unwrap().insert(address); } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs new file mode 100644 index 0000000000..b06628fd60 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs @@ -0,0 +1,528 @@ +use std::collections::{BTreeMap, HashMap}; +use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak}; + +use super::super::client::{ElectrumClient, ElectrumClientImpl}; +use super::super::connection::{ElectrumConnection, ElectrumConnectionErr, ElectrumConnectionSettings}; +use super::super::constants::{BACKGROUND_TASK_WAIT_TIMEOUT, PING_INTERVAL}; +use super::connection_context::ConnectionContext; + +use crate::utxo::rpc_clients::UtxoRpcClientOps; +use common::executor::abortable_queue::AbortableQueue; +use common::executor::{AbortableSystem, SpawnFuture, Timer}; +use common::log::{debug, error}; +use common::notifier::{Notifiee, Notifier}; +use common::now_ms; +use keys::Address; + +use futures::compat::Future01CompatExt; +use futures::FutureExt; + +/// A macro to unwrap an option and *execute* some code if the option is None. +macro_rules! unwrap_or_else { + ($option:expr, $($action:tt)*) => {{ + match $option { + Some(some_val) => some_val, + None => { $($action)* } + } + }}; +} + +macro_rules! unwrap_or_continue { + ($option:expr) => { + unwrap_or_else!($option, continue) + }; +} + +macro_rules! unwrap_or_return { + ($option:expr, $ret:expr) => { + unwrap_or_else!($option, return $ret) + }; + ($option:expr) => { + unwrap_or_else!($option, return) + }; +} + +/// The ID of a connection (and also its priority, lower is better). +type ID = u32; + +#[derive(Debug, Display)] +pub enum ConnectionManagerErr { + #[display(fmt = "Unknown server address")] + UnknownAddress, + #[display(fmt = "Failed to connect to the server due to {:?}", _0)] + ConnectingError(ElectrumConnectionErr), + #[display(fmt = "No client found, connection manager isn't initialized properly")] + NoClient, + #[display(fmt = "Connection manager is already initialized")] + AlreadyInitialized, +} + +/// The configuration parameter for a connection manager. +#[derive(Debug)] +pub struct ManagerConfig { + /// A flag to spawn a ping loop task for active connections. + pub spawn_ping: bool, + /// The minimum number of connections that should be connected at all times. + pub min_connected: usize, + /// The maximum number of connections that can be connected at any given time. + pub max_connected: usize, +} + +#[derive(Debug)] +/// A connection manager that maintains a set of connections to electrum servers and +/// handles reconnecting, address subscription distribution, etc... +struct ConnectionManagerImpl { + /// The configuration for the connection manager. + config: ManagerConfig, + /// The set of addresses that are currently connected. + /// + /// This set's size should satisfy: `min_connected <= maintained_connections.len() <= max_connected`. + /// + /// It is actually represented as a sorted map from connection ID (u32, also represents connection priority) + /// to address so we can easily/cheaply pop low priority connections and add high priority ones. + maintained_connections: RwLock>, + /// A map for server addresses to their corresponding connections. + connections: RwLock>, + /// A weak reference to the electrum client that owns this connection manager. + /// It is used to send electrum requests during connection establishment (version querying). + // TODO: This field might not be necessary if [`ElectrumConnection`] object be used to send + // electrum requests on its own, i.e. implement [`JsonRpcClient`] & [`UtxoRpcClientOps`]. + electrum_client: RwLock>>, + /// A notification sender to notify the background task when we have less than `min_connected` connections. + below_min_connected_notifier: Notifier, + /// A notification receiver to be used by the background task to receive notifications of when + /// we have less than `min_connected` maintained connections. + /// + /// Wrapped inside a Mutex>, +} + +#[derive(Clone, Debug)] +pub struct ConnectionManager(Arc); + +// Public interface. +impl ConnectionManager { + pub fn try_new( + servers: Vec, + spawn_ping: bool, + (min_connected, max_connected): (usize, usize), + abortable_system: &AbortableQueue, + ) -> Result { + let mut connections = HashMap::with_capacity(servers.len()); + // Priority is assumed to be the order of the servers in the list as they appear. + for (priority, connection_settings) in servers.into_iter().enumerate() { + let subsystem = abortable_system.create_subsystem().map_err(|e| { + ERRL!( + "Failed to create abortable subsystem for connection: {}, error: {:?}", + connection_settings.url, + e + ) + })?; + let connection = ElectrumConnection::new(connection_settings, subsystem); + connections.insert( + connection.address().to_string(), + ConnectionContext::new(connection, priority as u32), + ); + } + + if min_connected == 0 { + return Err(ERRL!("min_connected should be greater than 0")); + } + if min_connected > max_connected { + return Err(ERRL!( + "min_connected ({}) must be <= max_connected ({})", + min_connected, + max_connected + )); + } + + let (notifier, notifiee) = Notifier::new(); + Ok(ConnectionManager(Arc::new(ConnectionManagerImpl { + config: ManagerConfig { + spawn_ping, + min_connected, + max_connected, + }, + connections: RwLock::new(connections), + maintained_connections: RwLock::new(BTreeMap::new()), + electrum_client: RwLock::new(None), + below_min_connected_notifier: notifier, + below_min_connected_notifiee: Mutex::new(Some(notifiee)), + }))) + } + + /// Initializes the connection manager by connecting the electrum connections. + /// This must be called and only be called once to have a functioning connection manager. + pub fn initialize(&self, weak_client: Weak) -> Result<(), ConnectionManagerErr> { + // Disallow reusing the same manager with another client. + if self.weak_client().read().unwrap().is_some() { + return Err(ConnectionManagerErr::AlreadyInitialized); + } + + let electrum_client = unwrap_or_return!(weak_client.upgrade(), Err(ConnectionManagerErr::NoClient)); + + // Store the (weak) electrum client. + *self.weak_client().write().unwrap() = Some(weak_client); + + // Use the client's spawner to spawn the connection manager's background task. + electrum_client.weak_spawner().spawn(self.clone().background_task()); + + if self.config().spawn_ping { + // Use the client's spawner to spawn the connection manager's ping task. + electrum_client.weak_spawner().spawn(self.clone().ping_task()); + } + + Ok(()) + } + + /// Returns all the server addresses. + pub fn get_all_server_addresses(&self) -> Vec { self.read_connections().keys().cloned().collect() } + + /// Returns all the connections. + pub fn get_all_connections(&self) -> Vec> { + self.read_connections() + .values() + .map(|conn_ctx| conn_ctx.connection.clone()) + .collect() + } + + /// Retrieve a specific electrum connection by its address. + /// The connection will be forcibly established if it's disconnected. + pub async fn get_connection_by_address( + &self, + server_address: &str, + force_connect: bool, + ) -> Result, ConnectionManagerErr> { + let connection = self + .get_connection(server_address) + .ok_or(ConnectionManagerErr::UnknownAddress)?; + + if force_connect { + let client = unwrap_or_return!(self.get_client(), Err(ConnectionManagerErr::NoClient)); + // Make sure the connection is connected. + connection + .establish_connection_loop(client) + .await + .map_err(ConnectionManagerErr::ConnectingError)?; + } + + Ok(connection) + } + + /// Returns a list of active/maintained connections. + pub fn get_active_connections(&self) -> Vec> { + self.read_maintained_connections() + .iter() + .filter_map(|(_id, address)| self.get_connection(address)) + .collect() + } + + /// Returns a boolean `true` if the connection pool is empty, `false` otherwise. + pub fn is_connections_pool_empty(&self) -> bool { self.read_connections().is_empty() } + + /// Subscribe the list of addresses to our active connections. + /// + /// There is a bit of indirection here. We register the abandoned addresses on `on_disconnected` with + /// the client to queue them for `utxo_balance_events` which in turn calls this method back to re-subscribe + /// the abandoned addresses. We could have instead directly re-subscribed the addresses here in the connection + /// manager without sending them to `utxo_balance_events`. However, we don't do that so that `utxo_balance_events` + /// knows about all the added addresses. If it doesn't know about them, it won't be able to retrieve the triggered + /// address when its script hash is notified. + pub async fn add_subscriptions(&self, addresses: &HashMap) { + for (scripthash, address) in addresses.iter() { + // For a single address/scripthash, keep trying to subscribe it until we succeed. + 'single_address_sub: loop { + let client = unwrap_or_return!(self.get_client()); + let connections = self.get_active_connections(); + if connections.is_empty() { + // If there are no active connections, wait for a connection to be established. + Timer::sleep(1.).await; + continue; + } + // Try to subscribe the address to any connection we have. + for connection in connections { + if client + .blockchain_scripthash_subscribe_using(connection.address(), scripthash.clone()) + .compat() + .await + .is_ok() + { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_continue!(all_connections.get(connection.address())); + connection_ctx.add_sub(address.clone()); + break 'single_address_sub; + } + } + } + } + } + + /// Handles the connection event. + pub fn on_connected(&self, server_address: &str) { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + + // Reset the suspend time & disconnection time. + connection_ctx.connected(); + } + + /// Handles the disconnection event from an Electrum server. + pub fn on_disconnected(&self, server_address: &str) { + debug!("Electrum server disconnected: {}", server_address); + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + + self.unmaintain(connection_ctx.id); + + let abandoned_subs = connection_ctx.disconnected(); + // Re-subscribe the abandoned addresses using the client. + let client = unwrap_or_return!(self.get_client()); + client.subscribe_addresses(abandoned_subs).ok(); + } + + /// A method that should be called after using a specific server for some request. + /// + /// Instead of disconnecting the connection right away, this method will only disconnect it + /// if it's not in the maintained connections set. + pub fn not_needed(&self, server_address: &str) { + let (id, connection) = { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + (connection_ctx.id, connection_ctx.connection.clone()) + }; + if !self.read_maintained_connections().contains_key(&id) { + connection.disconnect(Some(ElectrumConnectionErr::Temporary("Not needed anymore".to_string()))); + self.on_disconnected(connection.address()); + } + } + + /// Remove a connection from the connection manager by its address. + // TODO(feat): Add the ability to add a connection during runtime. + pub fn remove_connection(&self, server_address: &str) -> Result, ConnectionManagerErr> { + let connection = self + .get_connection(server_address) + .ok_or(ConnectionManagerErr::UnknownAddress)?; + // Make sure this connection is disconnected. + connection.disconnect(Some(ElectrumConnectionErr::Irrecoverable( + "Forcefully disconnected & removed".to_string(), + ))); + // Run the on-disconnection hook, this will also make sure the connection is removed from the maintained set. + self.on_disconnected(connection.address()); + // Remove the connection from the manager. + self.write_connections().remove(server_address); + Ok(connection) + } +} + +// Background tasks. +impl ConnectionManager { + /// A forever-lived task that pings active/maintained connections periodically. + async fn ping_task(self) { + loop { + let client = unwrap_or_return!(self.get_client()); + // This will ping all the active/maintained connections, which will keep these connections alive. + client.server_ping().compat().await.ok(); + Timer::sleep(PING_INTERVAL).await; + } + } + + /// A forever-lived task that does the house keeping tasks of the connection manager: + /// - Maintaining the right number of active connections. + /// - Establishing new connections if needed. + /// - Replacing low priority connections with high priority ones periodically. + /// - etc... + async fn background_task(self) { + // Take out the min_connected notifiee from the manager. + let mut min_connected_notification = unwrap_or_return!(self.extract_below_min_connected_notifiee()); + // A flag to indicate whether to log connection establishment errors or not. We should not log them if we + // are in panic mode (i.e. we are below the `min_connected` threshold) as this will flood the error log. + let mut log_errors = true; + loop { + // Get the candidate connections that we will consider maintaining. + let (candidate_connections, will_never_get_min_connected) = self.get_candidate_connections(); + // Establish the connections to the selected candidates and alter the maintained connections set accordingly. + self.establish_best_connections(candidate_connections, log_errors).await; + // Only sleep if we successfully acquired the minimum number of connections, + // or if we know we can never maintain `min_connected` connections; there is no point of infinite non-wait looping then. + if self.read_maintained_connections().len() >= self.config().min_connected || will_never_get_min_connected { + // Wait for a timeout or a below `min_connected` notification before doing another round of house keeping. + futures::select! { + _ = Timer::sleep(BACKGROUND_TASK_WAIT_TIMEOUT).fuse() => (), + _ = min_connected_notification.wait().fuse() => (), + } + log_errors = true; + } else { + // Never sleeping can result in busy waiting, which is problematic as it might not + // give a chance to other tasks to make progress, especially in single threaded environments. + // Yield the execution to the executor to give a chance to other tasks to run. + // TODO: `yield` keyword is not supported in the current rust version, using a short sleep for now. + Timer::sleep(1.).await; + log_errors = false; + } + } + } + + /// Returns a list of candidate connections that aren't maintained and could be considered for maintaining. + /// + /// Also returns a flag indicating whether covering `min_connected` connections is even possible: not possible when + /// `min_connected` is greater than the number of connections we have. + fn get_candidate_connections(&self) -> (Vec<(Arc, u32)>, bool) { + let all_connections = self.read_connections(); + let maintained_connections = self.read_maintained_connections(); + // The number of connections we need to add as maintained to reach the `min_connected` threshold. + let connections_needed = self.config().min_connected.saturating_sub(maintained_connections.len()); + // The connections that we can consider (all connections - candidate connections). + let all_candidate_connections: Vec<_> = all_connections + .iter() + .filter_map(|(_, conn_ctx)| { + (!maintained_connections.contains_key(&conn_ctx.id)).then(|| (conn_ctx.connection.clone(), conn_ctx.id)) + }) + .collect(); + // The candidate connections from above, but further filtered by whether they are suspended or not. + let non_suspended_candidate_connections: Vec<_> = all_candidate_connections + .iter() + .filter(|(connection, _)| { + all_connections + .get(connection.address()) + .map_or(false, |conn_ctx| now_ms() > conn_ctx.suspended_till()) + }) + .cloned() + .collect(); + // Decide which candidate connections to consider (all or only non-suspended). + if connections_needed > non_suspended_candidate_connections.len() { + if connections_needed > all_candidate_connections.len() { + // Not enough connections to cover the `min_connected` threshold. + // This means we will never be able to maintain `min_connected` active connections. + (all_candidate_connections, true) + } else { + // If we consider all candidate connection (but some are suspended), we can cover the needed connections. + // We will consider the suspended ones since if we don't we will stay below `min_connected` threshold. + (all_candidate_connections, false) + } + } else { + // Non suspended candidates are enough to cover the needed connections. + (non_suspended_candidate_connections, false) + } + } + + /// Establishes the best connections (based on priority) using the candidate connections + /// till we can't establish no more (hit the `max_connected` threshold). + async fn establish_best_connections( + &self, + mut candidate_connections: Vec<(Arc, u32)>, + log_errors: bool, + ) { + let client = unwrap_or_return!(self.get_client()); + // Sort the candidate connections by their priority/ID. + candidate_connections.sort_by_key(|(_, priority)| *priority); + for (connection, connection_id) in candidate_connections { + let address = connection.address().to_string(); + let (maintained_connections_size, lowest_priority_connection_id) = { + let maintained_connections = self.read_maintained_connections(); + let maintained_connections_size = maintained_connections.len(); + let lowest_priority_connection_id = *maintained_connections.keys().next_back().unwrap_or(&u32::MAX); + (maintained_connections_size, lowest_priority_connection_id) + }; + + // We can only try to add the connection if: + // 1- We haven't reached the `max_connected` threshold. + // 2- We have reached the `max_connected` threshold but the connection has a higher priority than the lowest priority connection. + if maintained_connections_size < self.config().max_connected + || connection_id < lowest_priority_connection_id + { + // Now that we know the connection is good to be inserted, try to establish it. + if let Err(e) = connection.establish_connection_loop(client.clone()).await { + if log_errors { + error!("Failed to establish connection to {address} due to error: {e:?}"); + } + // Remove the connection if it's not recoverable. + if !e.is_recoverable() { + self.remove_connection(&address).ok(); + } + continue; + } + self.maintain(connection_id, address); + } else { + // If any of the two conditions on the `if` statement above are not met, there is nothing to do. + // At this point we have already collected `max_connected` connections and also the current connection + // in the candidate list has a lower priority than the lowest priority maintained connection, and the next + // candidate connections as well since they are sorted by priority. + break; + } + } + } +} + +// Abstractions over the accesses of the inner fields of the connection manager. +impl ConnectionManager { + #[inline] + pub fn config(&self) -> &ManagerConfig { &self.0.config } + + #[inline] + fn read_connections(&self) -> RwLockReadGuard> { + self.0.connections.read().unwrap() + } + + #[inline] + fn write_connections(&self) -> RwLockWriteGuard> { + self.0.connections.write().unwrap() + } + + #[inline] + fn get_connection(&self, server_address: &str) -> Option> { + self.read_connections() + .get(server_address) + .map(|connection_ctx| connection_ctx.connection.clone()) + } + + #[inline] + fn read_maintained_connections(&self) -> RwLockReadGuard> { + self.0.maintained_connections.read().unwrap() + } + + #[inline] + fn maintain(&self, id: ID, server_address: String) { + let mut maintained_connections = self.0.maintained_connections.write().unwrap(); + maintained_connections.insert(id, server_address); + // If we have reached the `max_connected` threshold then remove the lowest priority connection. + if maintained_connections.len() > self.config().max_connected { + let lowest_priority_connection_id = *maintained_connections.keys().next_back().unwrap_or(&u32::MAX); + maintained_connections.remove(&lowest_priority_connection_id); + } + } + + #[inline] + fn unmaintain(&self, id: ID) { + // To avoid write locking the maintained connections, just make sure the connection is actually maintained first. + let is_maintained = self.read_maintained_connections().contains_key(&id); + if is_maintained { + // If the connection was maintained, remove it from the maintained connections. + let mut maintained_connections = self.0.maintained_connections.write().unwrap(); + maintained_connections.remove(&id); + // And notify the background task if we fell below the `min_connected` threshold. + if maintained_connections.len() < self.config().min_connected { + self.notify_below_min_connected() + } + } + } + + #[inline] + fn notify_below_min_connected(&self) { self.0.below_min_connected_notifier.notify().ok(); } + + #[inline] + fn extract_below_min_connected_notifiee(&self) -> Option { + self.0.below_min_connected_notifiee.lock().unwrap().take() + } + + #[inline] + fn weak_client(&self) -> &RwLock>> { &self.0.electrum_client } + + #[inline] + fn get_client(&self) -> Option { + self.weak_client() + .read() + .unwrap() + .as_ref() // None here = client was never initialized. + .and_then(|weak| weak.upgrade().map(ElectrumClient)) // None here = client was dropped. + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs new file mode 100644 index 0000000000..b301ad3186 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs @@ -0,0 +1,4 @@ +mod connection_context; +mod manager; + +pub use manager::ConnectionManager; diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs new file mode 100644 index 0000000000..f4d1a0efa5 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs @@ -0,0 +1,32 @@ +/// The timeout for the electrum server to respond to a request. +pub const ELECTRUM_REQUEST_TIMEOUT: f64 = 20.; +/// The default (can be overridden) maximum timeout to establish a connection with the electrum server. +/// This included connecting to the server and querying the server version. +pub const DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT: f64 = 20.; +/// Wait this long before pinging again. +pub const PING_INTERVAL: f64 = 30.; +/// Used to cutoff the server connection after not receiving any response for that long. +/// This only makes sense if we have sent a request to the server. So we need to keep `PING_INTERVAL` +/// lower than this value, otherwise we might disconnect servers that are perfectly responsive but just +/// haven't received any requests from us for a while. +pub const CUTOFF_TIMEOUT: f64 = 60.; +/// Initial server suspension time. +pub const FIRST_SUSPEND_TIME: u64 = 10; +/// The timeout used by the background task of the connection manager to re-check the manager's health. +pub const BACKGROUND_TASK_WAIT_TIMEOUT: f64 = (5 * 60) as f64; +/// Electrum methods that should not be sent without forcing the connection to be established first. +pub const NO_FORCE_CONNECT_METHODS: &[&str] = &[ + // The server should already be connected if we are querying for its version, don't force connect. + "server.version", +]; +/// Electrum methods that should be sent to all connections even after receiving a response from a subset of them. +/// Note that this is only applicable to active/maintained connections. If an electrum request fails by all maintained +/// connections, a fallback using all connections will *NOT* be attempted. +pub const SEND_TO_ALL_METHODS: &[&str] = &[ + // A ping should be sent to all connections even if we got a response from one of them early. + "server.ping", +]; +/// Electrum RPC method for headers subscription. +pub const BLOCKCHAIN_HEADERS_SUB_ID: &str = "blockchain.headers.subscribe"; +/// Electrum RPC method for script/address subscription. +pub const BLOCKCHAIN_SCRIPTHASH_SUB_ID: &str = "blockchain.scripthash.subscribe"; diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs new file mode 100644 index 0000000000..27bd74b4d9 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs @@ -0,0 +1,74 @@ +use super::connection_manager::ConnectionManager; +use super::constants::BLOCKCHAIN_SCRIPTHASH_SUB_ID; + +use crate::utxo::ScripthashNotification; +use crate::RpcTransportEventHandler; +use common::jsonrpc_client::JsonRpcRequest; +use common::log::{error, warn}; + +use futures::channel::mpsc::UnboundedSender; +use serde_json::{self as json, Value as Json}; + +/// An `RpcTransportEventHandler` that forwards `ScripthashNotification`s to trigger balance updates. +/// +/// This handler hooks in `on_incoming_response` and looks for an electrum script hash notification to forward it. +pub struct ElectrumScriptHashNotificationBridge { + pub scripthash_notification_sender: UnboundedSender, +} + +impl RpcTransportEventHandler for ElectrumScriptHashNotificationBridge { + fn debug_info(&self) -> String { "ElectrumScriptHashNotificationBridge".into() } + + fn on_incoming_response(&self, data: &[u8]) { + if let Ok(raw_json) = json::from_slice::(data) { + // Try to parse the notification. A notification is sent as a JSON-RPC request. + if let Ok(notification) = json::from_value::(raw_json) { + // Only care about `BLOCKCHAIN_SCRIPTHASH_SUB_ID` notifications. + if notification.method.as_str() == BLOCKCHAIN_SCRIPTHASH_SUB_ID { + if let Some(scripthash) = notification.params.first().and_then(|s| s.as_str()) { + if let Err(e) = self + .scripthash_notification_sender + .unbounded_send(ScripthashNotification::Triggered(scripthash.to_string())) + { + error!("Failed sending script hash message. {e:?}"); + } + } else { + warn!("Notification must contain the script hash value, got: {notification:?}"); + } + }; + } + } + } + + fn on_connected(&self, _address: &str) -> Result<(), String> { Ok(()) } + + fn on_disconnected(&self, _address: &str) -> Result<(), String> { Ok(()) } + + fn on_outgoing_request(&self, _data: &[u8]) {} +} + +/// An `RpcTransportEventHandler` that notifies the `ConnectionManager` upon connections and disconnections. +/// +/// When a connection is connected or disconnected, this event handler will notify the `ConnectionManager` +/// to handle the the event. +pub struct ElectrumConnectionManagerNotifier { + pub connection_manager: ConnectionManager, +} + +impl RpcTransportEventHandler for ElectrumConnectionManagerNotifier { + fn debug_info(&self) -> String { "ElectrumConnectionManagerNotifier".into() } + + fn on_connected(&self, address: &str) -> Result<(), String> { + self.connection_manager.on_connected(address); + Ok(()) + } + + fn on_disconnected(&self, address: &str) -> Result<(), String> { + self.connection_manager.on_disconnected(address); + Ok(()) + } + + fn on_incoming_response(&self, _data: &[u8]) {} + + fn on_outgoing_request(&self, _data: &[u8]) {} +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs new file mode 100644 index 0000000000..bf78308be2 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs @@ -0,0 +1,20 @@ +use sha2::{Digest, Sha256}; + +mod client; +mod connection; +mod connection_manager; +mod constants; +mod event_handlers; +mod rpc_responses; +#[cfg(not(target_arch = "wasm32"))] mod tcp_stream; + +pub use client::{ElectrumClient, ElectrumClientImpl, ElectrumClientSettings}; +pub use connection::ElectrumConnectionSettings; +pub use rpc_responses::*; + +#[inline] +pub fn electrum_script_hash(script: &[u8]) -> Vec { + let mut sha = Sha256::new(); + sha.update(script); + sha.finalize().iter().rev().copied().collect() +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs new file mode 100644 index 0000000000..75daac6f35 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs @@ -0,0 +1,168 @@ +use chain::{BlockHeader, BlockHeaderBits, BlockHeaderNonce, Transaction as UtxoTx}; +use mm2_number::{BigDecimal, BigInt}; +use rpc::v1::types::{Bytes as BytesJson, H256 as H256Json}; +use serialization::serialize; + +#[derive(Debug, Deserialize)] +pub struct ElectrumTxHistoryItem { + pub height: i64, + pub tx_hash: H256Json, + pub fee: Option, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumUnspent { + pub height: Option, + pub tx_hash: H256Json, + pub tx_pos: u32, + pub value: u64, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(untagged)] +pub enum ElectrumNonce { + Number(u64), + Hash(H256Json), +} + +#[allow(clippy::from_over_into)] +impl Into for ElectrumNonce { + fn into(self) -> BlockHeaderNonce { + match self { + ElectrumNonce::Number(n) => BlockHeaderNonce::U32(n as u32), + ElectrumNonce::Hash(h) => BlockHeaderNonce::H256(h.into()), + } + } +} + +#[derive(Debug, Deserialize)] +pub struct ElectrumBlockHeadersRes { + pub count: u64, + pub hex: BytesJson, + #[allow(dead_code)] + max: u64, +} + +/// The block header compatible with Electrum 1.2 +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBlockHeaderV12 { + pub bits: u64, + pub block_height: u64, + pub merkle_root: H256Json, + pub nonce: ElectrumNonce, + pub prev_block_hash: H256Json, + pub timestamp: u64, + pub version: u64, +} + +impl ElectrumBlockHeaderV12 { + fn as_block_header(&self) -> BlockHeader { + BlockHeader { + version: self.version as u32, + previous_header_hash: self.prev_block_hash.into(), + merkle_root_hash: self.merkle_root.into(), + claim_trie_root: None, + hash_final_sapling_root: None, + time: self.timestamp as u32, + bits: BlockHeaderBits::U32(self.bits as u32), + nonce: self.nonce.clone().into(), + solution: None, + aux_pow: None, + prog_pow: None, + mtp_pow: None, + is_verus: false, + hash_state_root: None, + hash_utxo_root: None, + prevout_stake: None, + vch_block_sig_dlgt: None, + n_height: None, + n_nonce_u64: None, + mix_hash: None, + } + } + + #[inline] + pub fn as_hex(&self) -> String { + let block_header = self.as_block_header(); + let serialized = serialize(&block_header); + hex::encode(serialized) + } + + #[inline] + pub fn hash(&self) -> H256Json { + let block_header = self.as_block_header(); + BlockHeader::hash(&block_header).into() + } +} + +/// The block header compatible with Electrum 1.4 +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBlockHeaderV14 { + pub height: u64, + pub hex: BytesJson, +} + +impl ElectrumBlockHeaderV14 { + pub fn hash(&self) -> H256Json { self.hex.clone().into_vec()[..].into() } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(untagged)] +pub enum ElectrumBlockHeader { + V12(ElectrumBlockHeaderV12), + V14(ElectrumBlockHeaderV14), +} + +impl ElectrumBlockHeader { + pub fn block_height(&self) -> u64 { + match self { + ElectrumBlockHeader::V12(h) => h.block_height, + ElectrumBlockHeader::V14(h) => h.height, + } + } + + pub fn block_hash(&self) -> H256Json { + match self { + ElectrumBlockHeader::V12(h) => h.hash(), + ElectrumBlockHeader::V14(h) => h.hash(), + } + } +} + +/// The merkle branch of a confirmed transaction +#[derive(Clone, Debug, Deserialize)] +pub struct TxMerkleBranch { + pub merkle: Vec, + pub block_height: u64, + pub pos: usize, +} + +#[derive(Clone)] +pub struct ConfirmedTransactionInfo { + pub tx: UtxoTx, + pub header: BlockHeader, + pub index: u64, + pub height: u64, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBalance { + pub(crate) confirmed: i128, + pub(crate) unconfirmed: i128, +} + +impl ElectrumBalance { + #[inline] + pub fn to_big_decimal(&self, decimals: u8) -> BigDecimal { + let balance_sat = BigInt::from(self.confirmed) + BigInt::from(self.unconfirmed); + BigDecimal::from(balance_sat) / BigDecimal::from(10u64.pow(decimals as u32)) + } +} + +#[derive(Debug, Deserialize, Serialize)] +/// Deserializable Electrum protocol version representation for RPC +/// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#server.version +pub struct ElectrumProtocolVersion { + pub server_software_version: String, + pub protocol_version: String, +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs new file mode 100644 index 0000000000..b50b7f5c85 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs @@ -0,0 +1,105 @@ +use std::io; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::SystemTime; + +use futures::io::Error; +use rustls::client::ServerCertVerified; +use rustls::{Certificate, ClientConfig, OwnedTrustAnchor, RootCertStore, ServerName}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::net::TcpStream; +use tokio_rustls::client::TlsStream; +use webpki_roots::TLS_SERVER_ROOTS; + +/// The enum wrapping possible variants of underlying Streams +#[allow(clippy::large_enum_variant)] +pub enum ElectrumStream { + Tcp(TcpStream), + Tls(TlsStream), +} + +impl AsRef for ElectrumStream { + fn as_ref(&self) -> &TcpStream { + match self { + ElectrumStream::Tcp(stream) => stream, + ElectrumStream::Tls(stream) => stream.get_ref().0, + } + } +} + +impl AsyncRead for ElectrumStream { + fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), + ElectrumStream::Tls(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), + } + } +} + +impl AsyncWrite for ElectrumStream { + fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), + ElectrumStream::Tls(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), + } + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), + ElectrumStream::Tls(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), + } + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), + ElectrumStream::Tls(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), + } + } +} + +/// Skips the server certificate verification on TLS connection +pub struct NoCertificateVerification {} + +impl rustls::client::ServerCertVerifier for NoCertificateVerification { + fn verify_server_cert( + &self, + _: &Certificate, + _: &[Certificate], + _: &ServerName, + _: &mut dyn Iterator, + _: &[u8], + _: SystemTime, + ) -> Result { + Ok(rustls::client::ServerCertVerified::assertion()) + } +} + +fn rustls_client_config(unsafe_conf: bool) -> Arc { + let mut cert_store = RootCertStore::empty(); + + cert_store.add_trust_anchors( + TLS_SERVER_ROOTS + .iter() + .map(|ta| OwnedTrustAnchor::from_subject_spki_name_constraints(ta.subject, ta.spki, ta.name_constraints)), + ); + + let mut tls_config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(cert_store) + .with_no_client_auth(); + + if unsafe_conf { + tls_config + .dangerous() + .set_certificate_verifier(Arc::new(NoCertificateVerification {})); + } + Arc::new(tls_config) +} + +lazy_static! { + pub static ref SAFE_TLS_CONFIG: Arc = rustls_client_config(false); + pub static ref UNSAFE_TLS_CONFIG: Arc = rustls_client_config(true); +} diff --git a/mm2src/coins/utxo/utxo_balance_events.rs b/mm2src/coins/utxo/utxo_balance_events.rs index 2d97ef5cc9..ec1de7aa40 100644 --- a/mm2src/coins/utxo/utxo_balance_events.rs +++ b/mm2src/coins/utxo/utxo_balance_events.rs @@ -1,20 +1,20 @@ +use super::utxo_standard::UtxoStandardCoin; +use crate::utxo::rpc_clients::UtxoRpcClientEnum; +use crate::{utxo::{output_script, + rpc_clients::electrum_script_hash, + utxo_common::{address_balance, address_to_scripthash}, + ScripthashNotification, UtxoCoinFields}, + CoinWithDerivationMethod, MarketCoinOps, MmCoin}; use async_trait::async_trait; -use common::{executor::{AbortSettings, SpawnAbortable, Timer}, - log, Future01CompatExt}; +use common::{executor::{AbortSettings, SpawnAbortable}, + log}; use futures::channel::oneshot::{self, Receiver, Sender}; use futures_util::StreamExt; use keys::Address; use mm2_core::mm_ctx::MmArc; use mm2_event_stream::{behaviour::{EventBehaviour, EventInitStatus}, ErrorEventName, Event, EventName, EventStreamConfiguration}; -use std::collections::{BTreeMap, HashSet}; - -use super::utxo_standard::UtxoStandardCoin; -use crate::{utxo::{output_script, - rpc_clients::electrum_script_hash, - utxo_common::{address_balance, address_to_scripthash}, - ScripthashNotification, UtxoCoinFields}, - CoinWithDerivationMethod, MarketCoinOps, MmCoin}; +use std::collections::{BTreeMap, HashMap, HashSet}; macro_rules! try_or_continue { ($exp:expr) => { @@ -41,37 +41,29 @@ impl EventBehaviour for UtxoStandardCoin { utxo: &UtxoCoinFields, addresses: HashSet
, ) -> Result, String> { - const LOOP_INTERVAL: f64 = 0.5; - - let mut scripthash_to_address_map: BTreeMap = BTreeMap::new(); - for address in addresses { - let scripthash = address_to_scripthash(&address).map_err(|e| e.to_string())?; - - scripthash_to_address_map.insert(scripthash.clone(), address); - - let mut attempt = 0; - while let Err(e) = utxo - .rpc_client - .blockchain_scripthash_subscribe(scripthash.clone()) - .compat() - .await - { - if attempt == 5 { - return Err(e.to_string()); - } - - log::error!( - "Failed to subscribe {} scripthash ({attempt}/5 attempt). Error: {}", - scripthash, - e.to_string() - ); - - attempt += 1; - Timer::sleep(LOOP_INTERVAL).await; - } + match utxo.rpc_client.clone() { + UtxoRpcClientEnum::Electrum(client) => { + // Collect the scrpithash for every address into a map. + let scripthash_to_address_map = addresses + .into_iter() + .map(|address| { + let scripthash = address_to_scripthash(&address).map_err(|e| e.to_string())?; + Ok((scripthash, address)) + }) + .collect::, String>>()?; + // Add these subscriptions to the connection manager. It will choose whatever connections + // it sees fit to subscribe each of these addresses to. + client + .connection_manager + .add_subscriptions(&scripthash_to_address_map) + .await; + // Convert the hashmap back to btreemap. + Ok(scripthash_to_address_map.into_iter().map(|(k, v)| (k, v)).collect()) + }, + UtxoRpcClientEnum::Native(_) => { + Err("Balance streaming is currently not supported for native client.".to_owned()) + }, } - - Ok(scripthash_to_address_map) } let ctx = match MmArc::from_weak(&self.as_ref().ctx) { @@ -115,24 +107,6 @@ impl EventBehaviour for UtxoStandardCoin { }, }; - continue; - }, - ScripthashNotification::RefreshSubscriptions => { - let my_addresses = try_or_continue!(self.all_addresses().await); - match subscribe_to_addresses(self.as_ref(), my_addresses).await { - Ok(map) => scripthash_to_address_map = map, - Err(e) => { - log::error!("{e}"); - - ctx.stream_channel_controller - .broadcast(Event::new( - format!("{}:{}", Self::error_event_name(), self.ticker()), - json!({ "error": e }).to_string(), - )) - .await; - }, - }; - continue; }, }; diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index 60b4d75ff0..f8e16a6089 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -260,14 +260,14 @@ pub(crate) async fn block_header_utxo_loop( ) { macro_rules! remove_server_and_break_if_no_servers_left { ($client:expr, $server_address:expr, $ticker:expr, $sync_status_loop_handle:expr) => { - if let Err(e) = $client.remove_server($server_address).await { + if let Err(e) = $client.remove_server($server_address) { let msg = format!("Error {} on removing server {}!", e, $server_address); // Todo: Permanent error notification should lead to deactivation of coin after applying some fail-safe measures if there are on-going swaps $sync_status_loop_handle.notify_on_permanent_error(msg); break; } - if $client.is_connections_pool_empty().await { + if $client.is_connections_pool_empty() { // Todo: Permanent error notification should lead to deactivation of coin after applying some fail-safe measures if there are on-going swaps let msg = format!("All servers are removed for {}!", $ticker); $sync_status_loop_handle.notify_on_permanent_error(msg); @@ -294,14 +294,14 @@ pub(crate) async fn block_header_utxo_loop( }; let mut args = BlockHeaderUtxoLoopExtraArgs::default(); while let Some(client) = weak.upgrade() { - let client = &ElectrumClient(client); + let client = ElectrumClient(client); let ticker = client.coin_name(); let storage = client.block_headers_storage(); let last_height_in_storage = match storage.get_last_block_height().await { Ok(Some(height)) => height, Ok(None) => { - if let Err(err) = validate_and_store_starting_header(client, ticker, storage, &spv_conf).await { + if let Err(err) = validate_and_store_starting_header(&client, ticker, storage, &spv_conf).await { sync_status_loop_handle.notify_on_permanent_error(err); break; } @@ -372,7 +372,7 @@ pub(crate) async fn block_header_utxo_loop( }; let (block_registry, block_headers) = match try_to_retrieve_headers_until_success( &mut args, - client, + &client, server_address, last_height_in_storage + 1, retrieve_to, @@ -411,7 +411,7 @@ pub(crate) async fn block_header_utxo_loop( } = &err { match resolve_possible_chain_reorg( - client, + &client, server_address, &mut args, last_height_in_storage, diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 446cadf2bb..15a699c2f1 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -1,41 +1,37 @@ use crate::hd_wallet::{load_hd_accounts_from_storage, HDAccountsMutex, HDWallet, HDWalletCoinStorage, HDWalletStorageError, DEFAULT_GAP_LIMIT}; -use crate::utxo::rpc_clients::{ElectrumClient, ElectrumClientImpl, ElectrumRpcRequest, EstimateFeeMethod, +use crate::utxo::rpc_clients::{ElectrumClient, ElectrumClientSettings, ElectrumConnectionSettings, EstimateFeeMethod, UtxoRpcClientEnum}; use crate::utxo::tx_cache::{UtxoVerboseCacheOps, UtxoVerboseCacheShared}; use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; use crate::utxo::utxo_builder::utxo_conf_builder::{UtxoConfBuilder, UtxoConfError}; -use crate::utxo::{output_script, ElectrumBuilderArgs, ElectrumProtoVerifier, ElectrumProtoVerifierEvent, - RecentlySpentOutPoints, ScripthashNotification, ScripthashNotificationSender, TxFee, UtxoCoinConf, - UtxoCoinFields, UtxoHDWallet, UtxoRpcMode, UtxoSyncStatus, UtxoSyncStatusLoopHandle, - UTXO_DUST_AMOUNT}; +use crate::utxo::{output_script, ElectrumBuilderArgs, RecentlySpentOutPoints, ScripthashNotification, + ScripthashNotificationSender, TxFee, UtxoCoinConf, UtxoCoinFields, UtxoHDWallet, UtxoRpcMode, + UtxoSyncStatus, UtxoSyncStatusLoopHandle, UTXO_DUST_AMOUNT}; use crate::{BlockchainNetwork, CoinTransportMetrics, DerivationMethod, HistorySyncState, IguanaPrivKey, - PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, UtxoActivationParams}; + PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, + SharableRpcTransportEventHandler, UtxoActivationParams}; use async_trait::async_trait; use chain::TxHashAlgo; -use common::custom_futures::repeatable::{Ready, Retry}; -use common::executor::{abortable_queue::AbortableQueue, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, - Timer}; -use common::log::{error, info, LogOnError}; -use common::{now_sec, small_rng}; +use common::executor::{abortable_queue::AbortableQueue, AbortableSystem, AbortedError}; +use common::now_sec; use crypto::{Bip32DerPathError, CryptoCtx, CryptoCtxError, GlobalHDAccountArc, HwWalletType, StandardHDPathError}; use derive_more::Display; -use futures::channel::mpsc::{channel, unbounded, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; +use futures::channel::mpsc::{channel, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; use futures::compat::Future01CompatExt; use futures::lock::Mutex as AsyncMutex; -use futures::StreamExt; use keys::bytes::Bytes; pub use keys::{Address, AddressBuilder, AddressFormat as UtxoAddressFormat, AddressHashEnum, AddressScriptType, KeyPair, Private, Public, Secret}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; use primitives::hash::H160; -use rand::seq::SliceRandom; use serde_json::{self as json, Value as Json}; use spv_validation::conf::SPVConf; use spv_validation::helpers_validation::SPVError; use spv_validation::storage::{BlockHeaderStorageError, BlockHeaderStorageOps}; -use std::sync::{Arc, Mutex, Weak}; +use std::sync::Arc; +use std::sync::Mutex; cfg_native! { use crate::utxo::coin_daemon_data_dir; @@ -60,16 +56,6 @@ pub enum UtxoCoinBuildError { ErrorDetectingFeeMethod(String), ErrorDetectingDecimals(String), InvalidBlockchainNetwork(String), - #[display( - fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", - electrum_servers, - seconds - )] - FailedToConnectToElectrums { - electrum_servers: Vec, - seconds: u64, - }, - ElectrumProtocolVersionCheckError(String), #[display(fmt = "Can not detect the user home directory")] CantDetectUserHome, #[display(fmt = "Private key policy is not allowed: {}", _0)] @@ -82,8 +68,6 @@ pub enum UtxoCoinBuildError { )] CoinDoesntSupportTrezor, BlockHeaderStorageError(BlockHeaderStorageError), - #[display(fmt = "Error {} on getting the height of the latest block from rpc!", _0)] - CantGetBlockCount(String), #[display(fmt = "Internal error: {}", _0)] Internal(String), #[display(fmt = "SPV params verificaiton failed. Error: {_0}")] @@ -252,10 +236,7 @@ fn get_scripthash_notification_handlers( Arc>>, )> { if ctx.event_stream_configuration.is_some() { - let (sender, receiver): ( - UnboundedSender, - UnboundedReceiver, - ) = futures::channel::mpsc::unbounded(); + let (sender, receiver) = futures::channel::mpsc::unbounded(); Some((sender, Arc::new(AsyncMutex::new(receiver)))) } else { None @@ -565,12 +546,17 @@ pub trait UtxoCoinBuilderCommonOps { Ok(UtxoRpcClientEnum::Native(native)) } }, - UtxoRpcMode::Electrum { servers } => { + UtxoRpcMode::Electrum { + servers, + min_connected, + max_connected, + } => { let electrum = self .electrum_client( abortable_system, ElectrumBuilderArgs::default(), servers, + (min_connected, max_connected), scripthash_notification_sender, ) .await?; @@ -585,21 +571,19 @@ pub trait UtxoCoinBuilderCommonOps { &self, abortable_system: AbortableQueue, args: ElectrumBuilderArgs, - mut servers: Vec, + servers: Vec, + (min_connected, max_connected): (Option, Option), scripthash_notification_sender: ScripthashNotificationSender, ) -> UtxoCoinBuildResult { - let (on_event_tx, on_event_rx) = unbounded(); - let ticker = self.ticker().to_owned(); + let coin_ticker = self.ticker().to_owned(); let ctx = self.ctx(); - let mut event_handlers = vec![]; + let mut event_handlers: Vec> = vec![]; if args.collect_metrics { - event_handlers.push( - CoinTransportMetrics::new(ctx.metrics.weak(), ticker.clone(), RpcClientType::Electrum).into_shared(), - ); - } - - if args.negotiate_version { - event_handlers.push(ElectrumProtoVerifier { on_event_tx }.into_shared()); + event_handlers.push(Box::new(CoinTransportMetrics::new( + ctx.metrics.weak(), + coin_ticker.clone(), + RpcClientType::Electrum, + ))); } let storage_ticker = self.ticker().replace('-', "_"); @@ -609,56 +593,27 @@ pub trait UtxoCoinBuilderCommonOps { block_headers_storage.init().await?; } - let mut rng = small_rng(); - servers.as_mut_slice().shuffle(&mut rng); + let gui = ctx.gui().unwrap_or("UNKNOWN").to_string(); + let mm_version = ctx.mm_version().to_string(); + let (min_connected, max_connected) = (min_connected.unwrap_or(1), max_connected.unwrap_or(servers.len())); + let client_settings = ElectrumClientSettings { + client_name: format!("{} GUI/MM2 {}", gui, mm_version), + servers: servers.clone(), + coin_ticker, + spawn_ping: args.spawn_ping, + negotiate_version: args.negotiate_version, + min_connected, + max_connected, + }; - let client = ElectrumClientImpl::new( - ticker, + ElectrumClient::try_new( + client_settings, event_handlers, block_headers_storage, abortable_system, - args.negotiate_version, scripthash_notification_sender, - ); - for server in servers.iter() { - match client.add_server(server).await { - Ok(_) => (), - Err(e) => error!("Error {:?} connecting to {:?}. Address won't be used", e, server), - }; - } - - let mut attempts = 0i32; - while !client.is_connected().await { - if attempts >= 10 { - return MmError::err(UtxoCoinBuildError::FailedToConnectToElectrums { - electrum_servers: servers.clone(), - seconds: 5, - }); - } - - Timer::sleep(0.5).await; - attempts += 1; - } - - let client = Arc::new(client); - - let spawner = client.spawner(); - if args.negotiate_version { - let weak_client = Arc::downgrade(&client); - let client_name = format!("{} GUI/MM2 {}", ctx.gui().unwrap_or("UNKNOWN"), ctx.mm_version()); - spawn_electrum_version_loop(&spawner, weak_client, on_event_rx, client_name); - - wait_for_protocol_version_checked(&client) - .await - .map_to_mm(UtxoCoinBuildError::ElectrumProtocolVersionCheckError)?; - } - - if args.spawn_ping { - let weak_client = Arc::downgrade(&client); - spawn_electrum_ping_loop(&spawner, weak_client, servers); - } - - Ok(ElectrumClient(client)) + ) + .map_to_mm(UtxoCoinBuildError::Internal) } #[cfg(not(target_arch = "wasm32"))] @@ -869,140 +824,3 @@ fn read_native_mode_conf( ))); Ok((rpc_port, rpc_user.clone(), rpc_password.clone())) } - -/// Ping the electrum servers every 30 seconds to prevent them from disconnecting us. -/// According to docs server can do it if there are no messages in ~10 minutes. -/// https://electrumx.readthedocs.io/en/latest/protocol-methods.html?highlight=keep#server-ping -/// Weak reference will allow to stop the thread if client is dropped. -fn spawn_electrum_ping_loop( - spawner: &Spawner, - weak_client: Weak, - servers: Vec, -) { - let msg_on_stopped = format!("Electrum servers {servers:?} ping loop stopped"); - let fut = async move { - loop { - if let Some(client) = weak_client.upgrade() { - if let Err(e) = ElectrumClient(client).server_ping().compat().await { - error!("Electrum servers {:?} ping error: {}", servers, e); - } - } else { - break; - } - Timer::sleep(30.).await - } - }; - - let settings = AbortSettings::info_on_any_stop(msg_on_stopped); - spawner.spawn_with_settings(fut, settings); -} - -/// Follow the `on_connect_rx` stream and verify the protocol version of each connected electrum server. -/// https://electrumx.readthedocs.io/en/latest/protocol-methods.html?highlight=keep#server-version -/// Weak reference will allow to stop the thread if client is dropped. -fn spawn_electrum_version_loop( - spawner: &Spawner, - weak_client: Weak, - mut on_event_rx: UnboundedReceiver, - client_name: String, -) { - let fut = async move { - while let Some(event) = on_event_rx.next().await { - match event { - ElectrumProtoVerifierEvent::Connected(electrum_addr) => { - check_electrum_server_version(weak_client.clone(), client_name.clone(), electrum_addr).await - }, - ElectrumProtoVerifierEvent::Disconnected(electrum_addr) => { - if let Some(client) = weak_client.upgrade() { - client.reset_protocol_version(&electrum_addr).await.error_log(); - } - }, - } - } - }; - let settings = AbortSettings::info_on_any_stop("Electrum server.version loop stopped".to_string()); - spawner.spawn_with_settings(fut, settings); -} - -async fn check_electrum_server_version( - weak_client: Weak, - client_name: String, - electrum_addr: String, -) { - // client.remove_server() is called too often - async fn remove_server(client: ElectrumClient, electrum_addr: &str) { - if let Err(e) = client.remove_server(electrum_addr).await { - error!("Error on remove server: {}", e); - } - } - - if let Some(c) = weak_client.upgrade() { - let client = ElectrumClient(c); - let available_protocols = client.protocol_version(); - let version = match client - .server_version(&electrum_addr, &client_name, available_protocols) - .compat() - .await - { - Ok(version) => version, - Err(e) => { - error!("Electrum {} server.version error: {:?}", electrum_addr, e); - if !e.error.is_transport() { - remove_server(client, &electrum_addr).await; - }; - return; - }, - }; - - // check if the version is allowed - let actual_version = match version.protocol_version.parse::() { - Ok(v) => v, - Err(e) => { - error!("Error on parse protocol_version: {:?}", e); - remove_server(client, &electrum_addr).await; - return; - }, - }; - - if !available_protocols.contains(&actual_version) { - error!( - "Received unsupported protocol version {:?} from {:?}. Remove the connection", - actual_version, electrum_addr - ); - remove_server(client, &electrum_addr).await; - return; - } - - match client.set_protocol_version(&electrum_addr, actual_version).await { - Ok(()) => info!( - "Use protocol version {:?} for Electrum {:?}", - actual_version, electrum_addr - ), - Err(e) => error!("Error on set protocol_version: {}", e), - }; - } -} - -/// Wait until the protocol version of at least one client's Electrum is checked. -async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Result<(), String> { - repeatable!(async { - if client.count_connections().await == 0 { - // All of the connections were removed because of server.version checking - return Ready(ERR!( - "There are no Electrums with the required protocol version {:?}", - client.protocol_version() - )); - } - - if client.is_protocol_version_checked().await { - return Ready(Ok(())); - } - Retry(()) - }) - .repeat_every_secs(0.5) - .attempts(10) - .await - .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) - // Flatten `Result< Result<(), String>, String >` - .flatten() -} diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 175454e1da..91116e109a 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -4997,11 +4997,7 @@ where valid_addresses.insert(valid_address); } if let UtxoRpcClientEnum::Electrum(electrum_client) = &coin.as_ref().rpc_client { - if let Some(sender) = &electrum_client.scripthash_notification_sender { - sender - .unbounded_send(ScripthashNotification::SubscribeToAddresses(valid_addresses)) - .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; - } + electrum_client.subscribe_addresses(valid_addresses)?; }; Ok(()) diff --git a/mm2src/coins/utxo/utxo_tests.rs b/mm2src/coins/utxo/utxo_tests.rs index 9456361c64..34b088ad5b 100644 --- a/mm2src/coins/utxo/utxo_tests.rs +++ b/mm2src/coins/utxo/utxo_tests.rs @@ -13,9 +13,9 @@ use crate::rpc_command::init_scan_for_new_addresses::{InitScanAddressesRpcOps, S use crate::utxo::qtum::{qtum_coin_with_priv_key, QtumCoin, QtumDelegationOps, QtumDelegationRequest}; #[cfg(not(target_arch = "wasm32"))] use crate::utxo::rpc_clients::{BlockHashOrHeight, NativeUnspent}; -use crate::utxo::rpc_clients::{ElectrumBalance, ElectrumClient, ElectrumClientImpl, GetAddressInfoRes, - ListSinceBlockRes, NativeClient, NativeClientImpl, NetworkInfo, UtxoRpcClientOps, - ValidateAddressRes, VerboseBlock}; +use crate::utxo::rpc_clients::{ElectrumBalance, ElectrumClient, ElectrumClientImpl, ElectrumClientSettings, + GetAddressInfoRes, ListSinceBlockRes, NativeClient, NativeClientImpl, NetworkInfo, + UtxoRpcClientOps, ValidateAddressRes, VerboseBlock}; use crate::utxo::spv::SimplePaymentVerification; #[cfg(not(target_arch = "wasm32"))] use crate::utxo::utxo_block_header_storage::{BlockHeaderStorage, SqliteBlockHeadersStorage}; @@ -85,7 +85,7 @@ pub fn electrum_client_for_test(servers: &[&str]) -> ElectrumClient { let servers = servers.into_iter().map(|s| json::from_value(s).unwrap()).collect(); let abortable_system = AbortableQueue::default(); - block_on(builder.electrum_client(abortable_system, args, servers, None)).unwrap() + block_on(builder.electrum_client(abortable_system, args, servers, (None, None), None)).unwrap() } /// Returned client won't work by default, requires some mocks to be usable @@ -468,15 +468,24 @@ fn test_wait_for_payment_spend_timeout_electrum() { }; let abortable_system = AbortableQueue::default(); - let client = ElectrumClientImpl::new( - TEST_COIN_NAME.into(), + let client_settings = ElectrumClientSettings { + client_name: "test".to_string(), + servers: vec![], + coin_ticker: TEST_COIN_NAME.into(), + spawn_ping: true, + negotiate_version: true, + min_connected: 1, + max_connected: 1, + }; + let client = ElectrumClient::try_new( + client_settings, Default::default(), block_headers_storage, abortable_system, - true, None, - ); - let client = UtxoRpcClientEnum::Electrum(ElectrumClient(Arc::new(client))); + ) + .expect("Expected electrum_client_impl constructed without a problem"); + let client = UtxoRpcClientEnum::Electrum(client); let coin = utxo_coin_for_test(client, None, false); let transaction = hex::decode("01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000") .unwrap(); @@ -1089,7 +1098,7 @@ fn test_electrum_rpc_client_error() { // use the static string instead because the actual error message cannot be obtain // by serde_json serialization - let expected = r#"JsonRpcError { client_info: "coin: DOC", request: JsonRpcRequest { jsonrpc: "2.0", id: "1", method: "blockchain.transaction.get", params: [String("0000000000000000000000000000000000000000000000000000000000000000"), Bool(true)] }, error: Response(electrum1.cipig.net:10060, Object({"code": Number(2), "message": String("daemon error: DaemonError({'code': -5, 'message': 'No such mempool or blockchain transaction. Use gettransaction for wallet transactions.'})")})) }"#; + let expected = r#"method: "blockchain.transaction.get", params: [String("0000000000000000000000000000000000000000000000000000000000000000"), Bool(true)] }, error: Response(electrum1.cipig.net:10060, Object({"code": Number(2), "message": String("daemon error: DaemonError({'code': -5, 'message': 'No such mempool or blockchain transaction. Use gettransaction for wallet transactions.'})")})) }"#; let actual = format!("{}", err); assert!(actual.contains(expected)); @@ -1533,33 +1542,44 @@ fn test_network_info_negative_time_offset() { #[test] fn test_unavailable_electrum_proto_version() { - ElectrumClientImpl::new.mock_safe( - |coin_ticker, event_handlers, block_headers_storage, abortable_system, _, _| { + ElectrumClientImpl::try_new_arc.mock_safe( + |client_settings, block_headers_storage, abortable_system, event_handlers, scripthash_notification_sender| { MockResult::Return(ElectrumClientImpl::with_protocol_version( - coin_ticker, - event_handlers, - OrdRange::new(1.8, 1.9).unwrap(), + client_settings, block_headers_storage, abortable_system, - None, + event_handlers, + scripthash_notification_sender, + OrdRange::new(1.8, 1.9).unwrap(), )) }, ); let conf = json!({"coin":"RICK","asset":"RICK","rpcport":8923}); + let servers = ["electrum1.cipig.net:10020"]; let req = json!({ "method": "electrum", - "servers": [{"url":"electrum1.cipig.net:10020"}], + "servers": servers.iter().map(|server| json!({"url": server})).collect::>(), }); let ctx = MmCtxBuilder::new().into_mm_arc(); let params = UtxoActivationParams::from_legacy_req(&req).unwrap(); let priv_key = Secp256k1Secret::from([1; 32]); - let error = block_on(utxo_standard_coin_with_priv_key(&ctx, "RICK", &conf, ¶ms, priv_key)) - .err() - .unwrap(); - log!("Error: {}", error); - assert!(error.contains("There are no Electrums with the required protocol version")); + let coin = block_on(utxo_standard_coin_with_priv_key(&ctx, "RICK", &conf, ¶ms, priv_key)).unwrap(); + // Wait a little bit to make sure the servers are removed due to version mismatch. + block_on(Timer::sleep(2.)); + if let UtxoRpcClientEnum::Electrum(ref electrum_client) = coin.as_ref().rpc_client { + for server in servers { + let error = block_on(electrum_client.get_block_count_from(server).compat()) + .err() + .unwrap() + .to_string(); + log!("{}", error); + assert!(error.contains("Unknown server address")); + } + } else { + panic!("Expected Electrum client"); + } } #[test] @@ -1602,18 +1622,29 @@ fn test_spam_rick() { #[test] fn test_one_unavailable_electrum_proto_version() { + // Patch the electurm client construct to require protocol version 1.4 only. + ElectrumClientImpl::try_new_arc.mock_safe( + |client_settings, block_headers_storage, abortable_system, event_handlers, scripthash_notification_sender| { + MockResult::Return(ElectrumClientImpl::with_protocol_version( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + OrdRange::new(1.4, 1.4).unwrap(), + )) + }, + ); // check if the electrum-mona.bitbank.cc:50001 doesn't support the protocol version 1.4 let client = electrum_client_for_test(&["electrum-mona.bitbank.cc:50001"]); - let result = block_on_f01(client.server_version( - "electrum-mona.bitbank.cc:50001", - "AtomicDEX", - &OrdRange::new(1.4, 1.4).unwrap(), - )); - assert!(result - .err() - .unwrap() - .to_string() - .contains("unsupported protocol version")); + // When an electrum server doesn't support our protocol version range, it gets removed by the client, + // wait a little bit to make sure this is the case. + block_on(Timer::sleep(2.)); + let error = block_on_f01(client.get_block_count_from("electrum-mona.bitbank.cc:50001")) + .unwrap_err() + .to_string(); + log!("{}", error); + assert!(error.contains("Unknown server address")); drop(client); log!("Run BTC coin to test the server.version loop"); diff --git a/mm2src/coins/utxo/utxo_wasm_tests.rs b/mm2src/coins/utxo/utxo_wasm_tests.rs index a33e1ba039..bd059c8627 100644 --- a/mm2src/coins/utxo/utxo_wasm_tests.rs +++ b/mm2src/coins/utxo/utxo_wasm_tests.rs @@ -42,7 +42,7 @@ pub async fn electrum_client_for_test(servers: &[&str]) -> ElectrumClient { let servers = servers.into_iter().map(|s| json::from_value(s).unwrap()).collect(); let abortable_system = AbortableQueue::default(); builder - .electrum_client(abortable_system, args, servers, None) + .electrum_client(abortable_system, args, servers, (None, None), None) .await .unwrap() } diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 81d0aca85c..34d67bb60d 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -11,7 +11,7 @@ use crate::coin_errors::{MyAddressError, ValidatePaymentResult}; use crate::hd_wallet::HDPathAccountToAddressId; use crate::my_tx_history_v2::{MyTxHistoryErrorV2, MyTxHistoryRequestV2, MyTxHistoryResponseV2}; use crate::rpc_command::init_withdraw::{InitWithdrawCoin, WithdrawInProgressStatus, WithdrawTaskHandleShared}; -use crate::utxo::rpc_clients::{ElectrumRpcRequest, UnspentInfo, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, +use crate::utxo::rpc_clients::{ElectrumConnectionSettings, UnspentInfo, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, UtxoRpcResult}; use crate::utxo::utxo_builder::UtxoCoinBuildError; use crate::utxo::utxo_builder::{UtxoCoinBuilder, UtxoCoinBuilderCommonOps, UtxoFieldsWithGlobalHDBuilder, @@ -750,7 +750,12 @@ pub enum ZcoinRpcMode { #[serde(alias = "Electrum")] Light { #[serde(alias = "servers")] - electrum_servers: Vec, + /// The settings of each electrum server. + electrum_servers: Vec, + /// The minimum number of connections to electrum servers to keep alive/maintained at all times. + min_connected: Option, + /// The maximum number of connections to electrum servers to not exceed at any time. + max_connected: Option, light_wallet_d_servers: Vec, /// Specifies the parameters for synchronizing the wallet from a specific block. This overrides the /// `CheckPointBlockInfo` configuration in the coin settings. @@ -967,8 +972,15 @@ impl<'a> ZCoinBuilder<'a> { let utxo_mode = match &z_coin_params.mode { #[cfg(not(target_arch = "wasm32"))] ZcoinRpcMode::Native => UtxoRpcMode::Native, - ZcoinRpcMode::Light { electrum_servers, .. } => UtxoRpcMode::Electrum { + ZcoinRpcMode::Light { + electrum_servers, + min_connected, + max_connected, + .. + } => UtxoRpcMode::Electrum { servers: electrum_servers.clone(), + min_connected: *min_connected, + max_connected: *max_connected, }, }; let utxo_params = UtxoActivationParams { diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index f665a9a8e0..de201856d8 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -129,6 +129,7 @@ pub mod custom_futures; pub mod custom_iter; #[path = "executor/mod.rs"] pub mod executor; pub mod expirable_map; +pub mod notifier; pub mod number_type_casting; pub mod password_policy; pub mod seri; diff --git a/mm2src/common/executor/abortable_system/abortable_queue.rs b/mm2src/common/executor/abortable_system/abortable_queue.rs index 99ffc70ca3..89781bcfa4 100644 --- a/mm2src/common/executor/abortable_system/abortable_queue.rs +++ b/mm2src/common/executor/abortable_system/abortable_queue.rs @@ -40,9 +40,7 @@ impl From> for AbortableQueue { impl AbortableSystem for AbortableQueue { type Inner = QueueInnerState; - /// Aborts all spawned futures and initiates aborting of critical futures - /// after the specified [`AbortSettings::critical_timeout_s`]. - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_handle(subsystem_abort_tx).map(|_| ()) @@ -98,12 +96,15 @@ impl WeakSpawner { match select(abortable_fut.boxed(), wait_till_abort.boxed()).await { // The future has finished normally. - Either::Left(_) => { + Either::Left((_, wait_till_abort_fut)) => { if let Some(on_finish) = settings.on_finish { log::log!(on_finish.level, "{}", on_finish.msg); } if let Some(queue_inner) = inner_weak.upgrade() { + // Drop the `wait_till_abort_fut` so to render the corresponding `abort_tx` sender canceled. + // This way we can query the `abort_tx` sender to check if it's canceled, thus safe to mark as finished. + drop(wait_till_abort_fut); queue_inner.lock().on_future_finished(future_id); } }, @@ -203,8 +204,18 @@ impl QueueInnerState { /// Releases the `finished_future_id` so it can be reused later on [`QueueInnerState::insert_handle`]. fn on_future_finished(&mut self, finished_future_id: FutureId) { - if let QueueInnerState::Ready { finished_futures, .. } = self { - finished_futures.push(finished_future_id); + if let QueueInnerState::Ready { + finished_futures, + abort_handlers, + } = self + { + // Only mark this ID as finished if a future existed for it and is canceled. We can get false + // `on_future_finished` signals from futures that aren't in the `abort_handlers` anymore (abortable queue was reset). + if let Some(handle) = abort_handlers.get(finished_future_id) { + if handle.is_canceled() { + finished_futures.push(finished_future_id); + } + } } } @@ -234,6 +245,8 @@ impl SystemInner for QueueInnerState { *self = QueueInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, QueueInnerState::Aborted) } } #[cfg(test)] diff --git a/mm2src/common/executor/abortable_system/graceful_shutdown.rs b/mm2src/common/executor/abortable_system/graceful_shutdown.rs index 3feee076b2..6a902faab7 100644 --- a/mm2src/common/executor/abortable_system/graceful_shutdown.rs +++ b/mm2src/common/executor/abortable_system/graceful_shutdown.rs @@ -32,7 +32,7 @@ impl From> for GracefulShutdownRegistry { impl AbortableSystem for GracefulShutdownRegistry { type Inner = ShutdownInnerState; - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_handle(subsystem_abort_tx) @@ -73,4 +73,6 @@ impl SystemInner for ShutdownInnerState { *self = ShutdownInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, ShutdownInnerState::Aborted) } } diff --git a/mm2src/common/executor/abortable_system/mod.rs b/mm2src/common/executor/abortable_system/mod.rs index b5399ad6dd..82ef564278 100644 --- a/mm2src/common/executor/abortable_system/mod.rs +++ b/mm2src/common/executor/abortable_system/mod.rs @@ -24,7 +24,23 @@ pub trait AbortableSystem: From> { /// Aborts all spawned futures and subsystems if they present. /// The abortable system is considered not to be - fn abort_all(&self) -> Result<(), AbortedError>; + fn abort_all(&self) -> Result<(), AbortedError> { self.__inner().lock().abort_all() } + + /// Aborts all the spawned futures & subsystems if present, and resets the system + /// to the initial state for further use. + fn abort_all_and_reset(&self) -> Result<(), AbortedError> { + let inner = self.__inner(); + let mut inner_locked = inner.lock(); + // Don't allow resetting the system state if the system is already aborted. If the system is + // aborted this is because its parent was aborted as well. Resetting it will leave the system + // dangling with no parent to abort it (could still be aborted manually of course). + if inner_locked.is_aborted() { + return Err(AbortedError); + } + let mut previous_inner = std::mem::take(&mut *inner_locked); + previous_inner.abort_all().ok(); + Ok(()) + } /// Creates a new subsystem `S` linked to `Self` the way that /// if `Self` is aborted, the futures spawned by the subsystem will be aborted as well. @@ -56,12 +72,17 @@ pub trait AbortableSystem: From> { Ok(S::from(inner_shared)) } + fn __inner(&self) -> InnerShared; + fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError>; } pub trait SystemInner: Default + Send + 'static { /// Aborts all spawned futures and subsystems if they present. fn abort_all(&mut self) -> Result<(), AbortedError>; + + /// Returns whether the system has already been aborted. + fn is_aborted(&self) -> bool; } #[cfg(test)] diff --git a/mm2src/common/executor/abortable_system/simple_map.rs b/mm2src/common/executor/abortable_system/simple_map.rs index c7cd9fc6cd..d759d53a04 100644 --- a/mm2src/common/executor/abortable_system/simple_map.rs +++ b/mm2src/common/executor/abortable_system/simple_map.rs @@ -35,7 +35,7 @@ impl AbortableSimpleMap { impl AbortableSystem for AbortableSimpleMap { type Inner = SimpleMapInnerState; - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_subsystem(subsystem_abort_tx) @@ -81,6 +81,8 @@ impl SystemInner for SimpleMapInnerState { *self = SimpleMapInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, SimpleMapInnerState::Aborted) } } impl SimpleMapInnerState { diff --git a/mm2src/common/expirable_map.rs b/mm2src/common/expirable_map.rs index 996e2edfae..0b3110c066 100644 --- a/mm2src/common/expirable_map.rs +++ b/mm2src/common/expirable_map.rs @@ -93,6 +93,12 @@ impl ExpirableMap { self.map.insert(k, entry).map(|v| v.value) } + /// Clears the map. + pub fn clear(&mut self) { + self.map.clear(); + self.expiries.clear(); + } + /// Removes expired entries from the map. /// /// Iterates through the `expiries` in order, removing entries that have expired. diff --git a/mm2src/common/jsonrpc_client.rs b/mm2src/common/jsonrpc_client.rs index 94a1ca809b..3f9e4cf6f6 100644 --- a/mm2src/common/jsonrpc_client.rs +++ b/mm2src/common/jsonrpc_client.rs @@ -2,7 +2,7 @@ use futures01::Future; use itertools::Itertools; use serde::de::DeserializeOwned; use serde_json::{self as json, Value as Json}; -use std::collections::{BTreeSet, HashMap}; +use std::collections::HashMap; use std::fmt; /// Macro generating functions for RPC requests. @@ -69,10 +69,10 @@ impl From for JsonRpcRemoteAddr { /// The identifier is designed to uniquely match outgoing requests and incoming responses. /// Even if the batch response is sorted in a different order, `BTreeSet` allows it to be matched to the request. -#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub enum JsonRpcId { - Single(String), - Batch(BTreeSet), + Single(u64), + Batch(u64), } /// Serializable RPC request that is either single or batch. @@ -114,19 +114,15 @@ impl fmt::Debug for JsonRpcRequestEnum { pub struct JsonRpcRequest { pub jsonrpc: String, #[serde(default)] - pub id: String, + pub id: u64, pub method: String, pub params: Vec, } impl JsonRpcRequest { - // Returns [`JsonRpcRequest::id`]. - #[inline] - pub fn get_id(&self) -> &str { &self.id } - /// Returns a `JsonRpcId` identifier of the request. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id.clone()) } + pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id) } } impl From for JsonRpcRequestEnum { @@ -140,7 +136,12 @@ pub struct JsonRpcBatchRequest(Vec); impl JsonRpcBatchRequest { /// Returns a `JsonRpcId` identifier of the request. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Batch(self.orig_sequence_ids().collect()) } + pub fn rpc_id(&self) -> JsonRpcId { + // This shouldn't be called on an empty batch, but let's + // simply set the batch ID to maximum if the batch is empty. + let batch_id = self.0.iter().map(|res| res.id).max().unwrap_or(u64::MAX); + JsonRpcId::Batch(batch_id) + } /// Returns the number of the requests in the batch. #[inline] @@ -153,7 +154,7 @@ impl JsonRpcBatchRequest { /// Returns original sequence of identifiers. /// The method is used to process batch responses in the same order in which the requests were sent. #[inline] - fn orig_sequence_ids(&self) -> impl Iterator + '_ { self.0.iter().map(|req| req.id.clone()) } + fn orig_sequence_ids(&self) -> impl Iterator + '_ { self.0.iter().map(|req| req.id) } } impl From for JsonRpcRequestEnum { @@ -185,7 +186,7 @@ pub struct JsonRpcResponse { #[serde(default)] pub jsonrpc: String, #[serde(default)] - pub id: String, + pub id: u64, #[serde(default)] pub result: Json, #[serde(default)] @@ -195,7 +196,7 @@ pub struct JsonRpcResponse { impl JsonRpcResponse { /// Returns a `JsonRpcId` identifier of the response. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id.clone()) } + pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id) } } /// Deserializable RPC batch response. @@ -204,7 +205,12 @@ pub struct JsonRpcBatchResponse(Vec); impl JsonRpcBatchResponse { /// Returns a `JsonRpcId` identifier of the response. - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Batch(self.0.iter().map(|res| res.id.clone()).collect()) } + pub fn rpc_id(&self) -> JsonRpcId { + // This shouldn't be called on an empty batch, but let's + // simply set the batch ID to maximum if the batch is empty. + let batch_id = self.0.iter().map(|res| res.id).max().unwrap_or(u64::MAX); + JsonRpcId::Batch(batch_id) + } /// Returns the number of the requests in the batch. #[inline] @@ -272,8 +278,8 @@ pub trait JsonRpcClient { /// Returns a stringified version of the JSON-RPC protocol. fn version(&self) -> &'static str; - /// Returns a stringified identifier of the next request. - fn next_id(&self) -> String; + /// Returns a unique identifier for the next request. + fn next_id(&self) -> u64; /// Get info that is used in particular to supplement the error info fn client_info(&self) -> String; @@ -395,8 +401,7 @@ fn process_transport_batch_result( }; // Turn the vector of responses into a hashmap by their IDs to get quick access to the content of the responses. - let mut response_map: HashMap = - batch.into_iter().map(|res| (res.id.clone(), res)).collect(); + let mut response_map: HashMap<_, _> = batch.into_iter().map(|res| (res.id, res)).collect(); if response_map.len() != orig_ids.len() { return Err(JsonRpcErrorType::Parse( remote_addr, diff --git a/mm2src/common/notifier.rs b/mm2src/common/notifier.rs new file mode 100644 index 0000000000..a82253b537 --- /dev/null +++ b/mm2src/common/notifier.rs @@ -0,0 +1,53 @@ +//! A simple notification system based on mpsc channels. +//! +//! Since this is based on mpsc, multiple notifiers (senders) are allowed while only a single +//! notifiee (receiver) listens for notifications. +//! +//! NOTE: This implementation memory leaks (in contrast to tokio's, but not used here to avoid tokio dependency on wasm). +//! This is because with each `clone()` of the sender we have a new slot in the channel (this is how `futures-rs` does mpsc). +//! These are removed when the receiver calls `wait()`, which calls `clear()`. But if the receiver never `wait()`s for any reason, +//! and there is a thread that doesn't stop `notify()`ing, the channel will keep growing unbounded. +//! +//! So one must make sure that either `wait()` is called after some time or the receiver is dropped when it's no longer needed. +use futures::{channel::mpsc, StreamExt}; + +#[derive(Clone, Debug)] +pub struct Notifier(mpsc::Sender<()>); + +#[derive(Debug)] +pub struct Notifiee(mpsc::Receiver<()>); + +impl Notifier { + /// Create a new notifier and notifiee pair. + pub fn new() -> (Notifier, Notifiee) { + let (sender, receiver) = mpsc::channel(0); + (Notifier(sender), Notifiee(receiver)) + } + + /// Notify the receiver. + /// + /// This will error if the receiver has been dropped (disconnected). + pub fn notify(&self) -> Result<(), &'static str> { + if let Err(e) = self.0.clone().try_send(()) { + if e.is_disconnected() { + return Err("Notification receiver has been dropped."); + } + } + Ok(()) + } +} + +impl Notifiee { + /// Wait for a notification from any notifier. + /// + /// This will error if all notifiers have been dropped (disconnected). + pub async fn wait(&mut self) -> Result<(), &'static str> { + let result = self.0.next().await.ok_or("All notifiers have been dropped."); + // Clear pending notifications if there are any, since we have already been notified. + self.clear(); + result + } + + /// Clears the pending notifications if there are any. + fn clear(&mut self) { while let Ok(Some(_)) = self.0.try_next() {} } +} diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index dc9b85082b..f4633b1e92 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5167,6 +5167,7 @@ pub struct CancelOrderResponse { result: String, } +// TODO: This is a near copy of the function below, `cancel_order_rpc`. pub async fn cancel_order(ctx: MmArc, req: CancelOrderReq) -> Result> { let ordermatch_ctx = match OrdermatchContext::from_ctx(&ctx) { Ok(x) => x, diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index c7665da02e..caa5417c5d 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1839,7 +1839,7 @@ mod lp_swap_tests { use super::*; use crate::lp_native_dex::{fix_directories, init_p2p}; use coins::hd_wallet::HDPathAccountToAddressId; - use coins::utxo::rpc_clients::ElectrumRpcRequest; + use coins::utxo::rpc_clients::ElectrumConnectionSettings; use coins::utxo::utxo_standard::utxo_standard_coin_with_priv_key; use coins::utxo::{UtxoActivationParams, UtxoRpcMode}; use coins::MarketCoinOps; @@ -2221,12 +2221,15 @@ mod lp_swap_tests { mode: UtxoRpcMode::Electrum { servers: electrums .iter() - .map(|url| ElectrumRpcRequest { + .map(|url| ElectrumConnectionSettings { url: url.to_string(), protocol: Default::default(), disable_cert_verification: false, + timeout_sec: None, }) .collect(), + min_connected: None, + max_connected: None, }, utxo_merge_params: None, tx_history: false, diff --git a/mm2src/mm2_main/src/rpc.rs b/mm2src/mm2_main/src/rpc.rs index 1bef856e15..85b61db612 100644 --- a/mm2src/mm2_main/src/rpc.rs +++ b/mm2src/mm2_main/src/rpc.rs @@ -37,6 +37,7 @@ use std::net::SocketAddr; cfg_native! { use hyper::{self, Body, Server}; + use futures::channel::oneshot; use mm2_net::sse_handler::{handle_sse, SSE_ENDPOINT}; } @@ -333,6 +334,34 @@ pub extern "C" fn spawn_rpc(ctx_h: u32) { Ok((cert_chain, privkey)) } + // Handles incoming HTTP requests. + async fn handle_request( + req: Request, + remote_addr: SocketAddr, + ctx_h: u32, + is_event_stream_enabled: bool, + ) -> Result, Infallible> { + let (tx, rx) = oneshot::channel(); + // We execute the request in a separate task to avoid it being left uncompleted if the client disconnects. + // So what's inside the spawn here will complete till completion (or panic). + common::executor::spawn(async move { + if is_event_stream_enabled && req.uri().path() == SSE_ENDPOINT { + tx.send(handle_sse(req, ctx_h).await).ok(); + } else { + tx.send(rpc_service(req, ctx_h, remote_addr).await).ok(); + } + }); + // On the other hand, this `.await` might be aborted if the client disconnects. + match rx.await { + Ok(res) => Ok(res), + Err(_) => { + let err = "The RPC service aborted without responding."; + error!("{}", err); + Ok(Response::builder().status(500).body(Body::from(err)).unwrap()) + }, + } + } + // NB: We need to manually handle the incoming connections in order to get the remote IP address, // cf. https://github.com/hyperium/hyper/issues/1410#issuecomment-419510220. // Although if the ability to access the remote IP address is solved by the Hyper in the future @@ -340,28 +369,19 @@ pub extern "C" fn spawn_rpc(ctx_h: u32) { // cf. https://github.com/hyperium/hyper/pull/1640. let ctx = MmArc::from_ffi_handle(ctx_h).expect("No context"); - let is_event_stream_enabled = ctx.event_stream_configuration.is_some(); - let make_svc_fut = move |remote_addr: SocketAddr| async move { - Ok::<_, Infallible>(service_fn(move |req: Request| async move { - if is_event_stream_enabled && req.uri().path() == SSE_ENDPOINT { - let res = handle_sse(req, ctx_h).await?; - return Ok::<_, Infallible>(res); - } - - let res = rpc_service(req, ctx_h, remote_addr).await; - Ok::<_, Infallible>(res) - })) - }; - //The `make_svc` macro creates a `make_service_fn` for a specified socket type. // `$socket_type`: The socket type with a `remote_addr` method that returns a `SocketAddr`. macro_rules! make_svc { ($socket_type:ty) => { make_service_fn(move |socket: &$socket_type| { let remote_addr = socket.remote_addr(); - make_svc_fut(remote_addr) + async move { + Ok::<_, Infallible>(service_fn(move |req: Request| { + handle_request(req, remote_addr, ctx_h, is_event_stream_enabled) + })) + } }) }; } diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs index 5ef386942c..a43d6cf127 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs @@ -20,6 +20,7 @@ // use coins::{lp_coinfind, lp_coinfind_any, lp_coininit, CoinsContext, MmCoinEnum}; +use common::custom_futures::timeout::FutureTimerExt; use common::executor::Timer; use common::{rpc_err_response, rpc_response, HyRes}; use futures::compat::Future01CompatExt; @@ -138,7 +139,16 @@ pub async fn disable_coin(ctx: MmArc, req: Json) -> Result>, St pub async fn electrum(ctx: MmArc, req: Json) -> Result>, String> { let ticker = try_s!(req["coin"].as_str().ok_or("No 'coin' field")).to_owned(); let coin: MmCoinEnum = try_s!(lp_coininit(&ctx, &ticker, &req).await); - let balance = try_s!(coin.my_balance().compat().await); + let balance = match coin.my_balance().compat().timeout_secs(5.).await { + Ok(Ok(balance)) => balance, + // If the coin was activated successfully but the balance query failed (most probably due to faulty + // electrum servers), remove the coin as the whole request is a failure now from the POV of the GUI. + err => { + let coins_ctx = try_s!(CoinsContext::from_ctx(&ctx)); + coins_ctx.remove_coin(coin).await; + return Err(ERRL!("Deactivated coin due to error in balance querying: {:?}", err)); + }, + }; let res = CoinInitResponse { result: "success".into(), address: try_s!(coin.my_address()), diff --git a/mm2src/mm2_net/src/sse_handler.rs b/mm2src/mm2_net/src/sse_handler.rs index 3b3afeee58..568bfc98c0 100644 --- a/mm2src/mm2_net/src/sse_handler.rs +++ b/mm2src/mm2_net/src/sse_handler.rs @@ -1,12 +1,11 @@ use hyper::{body::Bytes, Body, Request, Response}; use mm2_core::mm_ctx::MmArc; use serde_json::json; -use std::convert::Infallible; pub const SSE_ENDPOINT: &str = "/event-stream"; /// Handles broadcasted messages from `mm2_event_stream` continuously. -pub async fn handle_sse(request: Request, ctx_h: u32) -> Result, Infallible> { +pub async fn handle_sse(request: Request, ctx_h: u32) -> Response { // This is only called once for per client on the initialization, // meaning this is not a resource intensive computation. let ctx = match MmArc::from_ffi_handle(ctx_h) { @@ -62,17 +61,15 @@ pub async fn handle_sse(request: Request, ctx_h: u32) -> Result Ok(res), + Ok(res) => res, Err(err) => handle_internal_error(err.to_string()).await, } } /// Fallback function for handling errors in SSE connections -async fn handle_internal_error(message: String) -> Result, Infallible> { - let response = Response::builder() +async fn handle_internal_error(message: String) -> Response { + Response::builder() .status(500) .body(Body::from(message)) - .expect("Returning 500 should never fail."); - - Ok(response) + .expect("Returning 500 should never fail.") } diff --git a/mm2src/mm2_net/src/wasm/wasm_ws.rs b/mm2src/mm2_net/src/wasm/wasm_ws.rs index 7464dcf142..1d19d43a60 100644 --- a/mm2src/mm2_net/src/wasm/wasm_ws.rs +++ b/mm2src/mm2_net/src/wasm/wasm_ws.rs @@ -22,7 +22,7 @@ const NORMAL_CLOSURE_CODE: u16 = 1000; pub type ConnIdx = usize; -pub type WsOutgoingReceiver = mpsc::Receiver; +pub type WsOutgoingReceiver = mpsc::Receiver>; pub type WsIncomingSender = mpsc::Sender<(ConnIdx, WebSocketEvent)>; type WsTransportReceiver = mpsc::Receiver; @@ -69,14 +69,14 @@ impl InitWsError { } } -/// The `WsEventReceiver` wrapper that filters and maps the incoming `WebSocketEvent` events into `Result`. +/// The `WsEventReceiver` wrapper that filters and maps the incoming `WebSocketEvent` events into `Result, WebSocketError>`. pub struct WsIncomingReceiver { inner: WsEventReceiver, closed: bool, } impl Stream for WsIncomingReceiver { - type Item = Result; + type Item = Result, WebSocketError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.closed { @@ -122,7 +122,7 @@ impl Stream for WsEventReceiver { #[derive(Debug, Clone)] pub struct WsOutgoingSender { - inner: mpsc::Sender, + inner: mpsc::Sender>, /// Is used to determine when all senders are dropped. #[allow(dead_code)] shutdown_tx: OutgoingShutdownTx, @@ -132,9 +132,9 @@ pub struct WsOutgoingSender { /// Please note `WsOutgoingSender` must not provide a way to close the [`WsOutgoingSender::inner`] channel, /// because the shutdown_tx wouldn't be closed properly. impl WsOutgoingSender { - pub async fn send(&mut self, msg: Json) -> Result<(), SendError> { self.inner.send(msg).await } + pub async fn send(&mut self, msg: Vec) -> Result<(), SendError> { self.inner.send(msg).await } - pub fn try_send(&mut self, msg: Json) -> Result<(), TrySendError> { self.inner.try_send(msg) } + pub fn try_send(&mut self, msg: Vec) -> Result<(), TrySendError>> { self.inner.try_send(msg) } } #[derive(Debug)] @@ -147,12 +147,12 @@ pub enum WebSocketEvent { /// Please note some of the errors lead to the connection close. Error(WebSocketError), /// A message has been received through a WebSocket connection. - Incoming(Json), + Incoming(Vec), } #[derive(Debug)] pub enum WebSocketError { - OutgoingError { reason: OutgoingError, outgoing: Json }, + OutgoingError { reason: OutgoingError, outgoing: Vec }, InvalidIncoming { description: String }, } @@ -212,6 +212,7 @@ fn spawn_ws_transport( ) -> InitWsResult<(WsOutgoingSender, WsEventReceiver)> { let (ws, ws_transport_rx) = WebSocketImpl::init(url)?; let (incoming_tx, incoming_rx, incoming_shutdown) = incoming_channel(1024); + let (outgoing_tx, outgoing_rx, outgoing_shutdown) = outgoing_channel(1024); let user_shutdown = into_one_shutdown(incoming_shutdown, outgoing_shutdown); @@ -353,17 +354,11 @@ impl WebSocketImpl { Ok((WebSocketImpl { ws, closures }, rx)) } - fn send_to_ws(&self, outgoing: Json) -> Result<(), WebSocketError> { - match json::to_string(&outgoing) { - Ok(req) => self.ws.send_with_str(&req).map_err(|error| { - let reason = OutgoingError::UnderlyingError(stringify_js_error(&error)); - WebSocketError::OutgoingError { reason, outgoing } - }), - Err(e) => { - let reason = OutgoingError::SerializingError(e.to_string()); - Err(WebSocketError::OutgoingError { reason, outgoing }) - }, - } + fn send_to_ws(&self, outgoing: Vec) -> Result<(), WebSocketError> { + self.ws.send_with_u8_array(&outgoing).map_err(|error| { + let reason = OutgoingError::UnderlyingError(stringify_js_error(&error)); + WebSocketError::OutgoingError { reason, outgoing } + }) } fn validate_websocket_url(url: &str) -> Result<(), MmError> { @@ -423,7 +418,7 @@ impl WsStateMachine { } } - fn send_unexpected_outgoing_back(&mut self, outgoing: Json, current_state: &str) { + fn send_unexpected_outgoing_back(&mut self, outgoing: Vec, current_state: &str) { error!( "Unexpected outgoing message while the socket idx={} state is {}", self.idx, current_state @@ -478,7 +473,7 @@ enum StateEvent { /// All instances of `WsOutgoingSender` and `WsIncomingReceiver` were dropped. UserSideClosed, /// Received an outgoing message. It should be forwarded to `WebSocket`. - OutgoingMessage(Json), + OutgoingMessage(Vec), /// Received a `WsTransportEvent` event. It might be an incoming message from `WebSocket` or something else. WsTransportEvent(WsTransportEvent), } @@ -491,7 +486,7 @@ enum WsTransportEvent { code: u16, }, Error(WsTransportError), - Incoming(Json), + Incoming(Vec), } #[derive(Debug)] @@ -565,8 +560,8 @@ impl State for ConnectingState { } }, StateEvent::WsTransportEvent(WsTransportEvent::Incoming(incoming)) => error!( - "Unexpected incoming message {} while the socket idx={} state is ConnectingState", - ctx.idx, incoming + "Unexpected incoming message {:?} while the socket idx={} state is ConnectingState", + incoming, ctx.idx ), } } @@ -647,11 +642,11 @@ impl ClosedState { } } -fn decode_incoming(incoming: MessageEvent) -> Result { +fn decode_incoming(incoming: MessageEvent) -> Result, String> { match incoming.data().dyn_into::() { Ok(txt) => { let txt = String::from(txt); - json::from_str(&txt).map_err(|e| format!("Error deserializing an incoming payload: {}", e)) + Ok(txt.into_bytes()) }, Err(e) => Err(format!("Unknown MessageEvent {:?}", e)), } @@ -724,10 +719,12 @@ mod tests { "method": "server.version", "params": ["1.2", "1.4"], }); + let get_version = json::to_vec(&get_version).expect("Vec serialization won't fail"); outgoing_tx.send(get_version).await.expect("!outgoing_tx.send"); match incoming_rx.next().timeout_secs(5.).await.unwrap_w() { Some((_conn_idx, WebSocketEvent::Incoming(response))) => { + let response: Json = json::from_slice(&response).expect("Failed to parse incoming message"); debug!("Response: {:?}", response); assert!(response.get("result").is_some()); },