From eaf45dfa3542340fe52a9796108513637e31e521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E7=82=8E=E6=B3=BC?= Date: Sat, 1 Apr 2023 09:35:03 +0800 Subject: [PATCH] Change: move `RaftStateMachine` out of `RaftStorage` In Raft, the state machine is an independent storage component that operates separately from the log store. As a result, accessing the log store and accessing the state machine can be naturally parallelized. This commit replaces the type parameter `RaftStorage` in `Raft<.., S: RaftStorage>` with two type parameters: `RaftLogStorage` and `RaftStateMachine`. - Add: `RaftLogReaderExt` to provide additional log access methods based on a `RaftLogReader` implementation. Some of the methods are moved from `StorageHelper` to this trait. - Add: `Adapter` to let application use the seperated log state machine framework without rewriting `RaftStorage` implementation. - Refactor: shorten type names for the 2 example crates ### TODO - [ ] Callback based log append is defined but is not used. ### Upgrade tip Use an adapter to wrap `RaftStorage`: ```rust // Before: let store = MyRaftStorage::new(); Raft::new(..., store); // After: let store = MyRaftStorage::new(); let (log_store, sm) = Adaptoer::new(store); Raft::new(..., log_store, sm); ``` --- examples/raft-kv-memstore/src/app.rs | 18 +- examples/raft-kv-memstore/src/bin/main.rs | 6 - examples/raft-kv-memstore/src/client.rs | 34 +- examples/raft-kv-memstore/src/lib.rs | 56 +-- examples/raft-kv-memstore/src/network/api.rs | 14 +- .../src/network/management.rs | 23 +- examples/raft-kv-memstore/src/network/mod.rs | 5 +- examples/raft-kv-memstore/src/network/raft.rs | 20 +- .../src/network/raft_network_impl.rs | 64 ++- examples/raft-kv-memstore/src/store/mod.rs | 90 ++-- .../tests/cluster/test_cluster.rs | 8 +- examples/raft-kv-rocksdb/src/app.rs | 10 +- examples/raft-kv-rocksdb/src/bin/main.rs | 6 - examples/raft-kv-rocksdb/src/client.rs | 66 +-- examples/raft-kv-rocksdb/src/lib.rs | 42 +- examples/raft-kv-rocksdb/src/network.rs | 5 +- examples/raft-kv-rocksdb/src/network/api.rs | 14 +- .../raft-kv-rocksdb/src/network/management.rs | 24 +- examples/raft-kv-rocksdb/src/network/raft.rs | 18 +- .../src/network/raft_network_impl.rs | 49 +- examples/raft-kv-rocksdb/src/store.rs | 132 +++--- .../tests/cluster/test_cluster.rs | 16 +- memstore/src/test.rs | 10 +- openraft/src/core/raft_core.rs | 154 +++++-- openraft/src/core/tick.rs | 14 +- openraft/src/engine/log_id_list.rs | 9 +- openraft/src/raft.rs | 78 +++- openraft/src/replication/mod.rs | 233 +++++----- openraft/src/storage/adapter.rs | 207 +++++++++ openraft/src/storage/callback.rs | 92 ++++ openraft/src/storage/helper.rs | 86 ++-- openraft/src/storage/log_store_ext.rs | 60 +++ openraft/src/storage/mod.rs | 17 +- openraft/src/storage/v2.rs | 123 +++++ openraft/src/testing/mod.rs | 27 ++ openraft/src/testing/store_builder.rs | 20 +- openraft/src/testing/suite.rs | 430 +++++++++++------- rocksstore-compat07/src/test.rs | 11 +- rocksstore/src/test.rs | 13 +- sledstore/src/test.rs | 13 +- .../append_entries/t20_append_conflicts.rs | 12 +- .../t30_append_inconsistent_log.rs | 23 +- .../t40_append_updates_membership.rs | 2 +- .../t50_append_entries_with_bigger_term.rs | 3 +- .../tests/elect/t10_elect_compare_last_log.rs | 15 +- tests/tests/fixtures/mod.rs | 239 +++++----- tests/tests/life_cycle/t20_initialization.rs | 10 +- tests/tests/life_cycle/t20_shutdown.rs | 2 +- ...t30_follower_restart_does_not_interrupt.rs | 10 +- .../life_cycle/t30_single_follower_restart.rs | 6 +- .../t90_issue_607_single_restart.rs | 4 +- tests/tests/log_compaction/t10_compaction.rs | 14 +- tests/tests/membership/t00_learner_restart.rs | 6 +- tests/tests/membership/t10_add_learner.rs | 9 +- .../tests/membership/t20_change_membership.rs | 6 +- ...9_new_leader_auto_commit_uniform_config.rs | 8 +- .../t20_metrics_state_machine_consistency.rs | 4 +- .../tests/snapshot/t23_snapshot_chunk_size.rs | 6 +- .../snapshot/t40_purge_in_snapshot_logs.rs | 6 +- .../t41_snapshot_overrides_membership.rs | 6 +- .../t42_snapshot_uses_prev_snap_membership.rs | 11 +- .../t43_snapshot_delete_conflict_logs.rs | 22 +- .../t44_replication_does_not_block_purge.rs | 2 +- .../state_machine/t10_total_order_apply.rs | 6 +- .../t20_state_machine_apply_membership.rs | 10 +- 65 files changed, 1686 insertions(+), 1073 deletions(-) create mode 100644 openraft/src/storage/adapter.rs create mode 100644 openraft/src/storage/callback.rs create mode 100644 openraft/src/storage/log_store_ext.rs create mode 100644 openraft/src/storage/v2.rs diff --git a/examples/raft-kv-memstore/src/app.rs b/examples/raft-kv-memstore/src/app.rs index 96a5be37e..87db9887d 100644 --- a/examples/raft-kv-memstore/src/app.rs +++ b/examples/raft-kv-memstore/src/app.rs @@ -1,17 +1,15 @@ use std::sync::Arc; -use openraft::Config; - -use crate::ExampleNodeId; -use crate::ExampleRaft; -use crate::ExampleStore; +use crate::NodeId; +use crate::Raft; +use crate::Store; // Representation of an application state. This struct can be shared around to share // instances of raft, store and more. -pub struct ExampleApp { - pub id: ExampleNodeId, +pub struct App { + pub id: NodeId, pub addr: String, - pub raft: ExampleRaft, - pub store: Arc, - pub config: Arc, + pub raft: Raft, + pub store: Arc, + pub config: Arc, } diff --git a/examples/raft-kv-memstore/src/bin/main.rs b/examples/raft-kv-memstore/src/bin/main.rs index 95fe77811..7e6ce68f1 100644 --- a/examples/raft-kv-memstore/src/bin/main.rs +++ b/examples/raft-kv-memstore/src/bin/main.rs @@ -1,13 +1,7 @@ use clap::Parser; -use openraft::Raft; -use raft_kv_memstore::network::raft_network_impl::ExampleNetwork; use raft_kv_memstore::start_example_raft_node; -use raft_kv_memstore::store::ExampleStore; -use raft_kv_memstore::ExampleTypeConfig; use tracing_subscriber::EnvFilter; -pub type ExampleRaft = Raft; - #[derive(Parser, Clone, Debug)] #[clap(author, version, about, long_about = None)] pub struct Opt { diff --git a/examples/raft-kv-memstore/src/client.rs b/examples/raft-kv-memstore/src/client.rs index ec3e1040d..99c57e530 100644 --- a/examples/raft-kv-memstore/src/client.rs +++ b/examples/raft-kv-memstore/src/client.rs @@ -5,20 +5,18 @@ use std::time::Duration; use openraft::error::ForwardToLeader; use openraft::error::NetworkError; -use openraft::error::RPCError; use openraft::error::RemoteError; use openraft::BasicNode; use openraft::RaftMetrics; use openraft::TryAsRef; -use reqwest::Client; use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; use tokio::time::timeout; use crate::typ; -use crate::ExampleNodeId; -use crate::ExampleRequest; +use crate::NodeId; +use crate::Request; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Empty {} @@ -27,14 +25,14 @@ pub struct ExampleClient { /// The leader node to send request to. /// /// All traffic should be sent to the leader in a cluster. - pub leader: Arc>, + pub leader: Arc>, - pub inner: Client, + pub inner: reqwest::Client, } impl ExampleClient { /// Create a client with a leader node id and a node manager to get node address by node id. - pub fn new(leader_id: ExampleNodeId, leader_addr: String) -> Self { + pub fn new(leader_id: NodeId, leader_addr: String) -> Self { Self { leader: Arc::new(Mutex::new((leader_id, leader_addr))), inner: reqwest::Client::new(), @@ -49,10 +47,7 @@ impl ExampleClient { /// will be applied to state machine. /// /// The result of applying the request will be returned. - pub async fn write( - &self, - req: &ExampleRequest, - ) -> Result> { + pub async fn write(&self, req: &Request) -> Result> { self.send_rpc_to_leader("write", Some(req)).await } @@ -87,7 +82,7 @@ impl ExampleClient { /// The node to add has to exist, i.e., being added with `write(ExampleRequest::AddNode{})` pub async fn add_learner( &self, - req: (ExampleNodeId, String), + req: (NodeId, String), ) -> Result> { self.send_rpc_to_leader("add-learner", Some(&req)).await } @@ -98,7 +93,7 @@ impl ExampleClient { /// or an error [`LearnerNotFound`] will be returned. pub async fn change_membership( &self, - req: &BTreeSet, + req: &BTreeSet, ) -> Result> { self.send_rpc_to_leader("change-membership", Some(req)).await } @@ -108,7 +103,7 @@ impl ExampleClient { /// Metrics contains various information about the cluster, such as current leader, /// membership config, replication status etc. /// See [`RaftMetrics`]. - pub async fn metrics(&self) -> Result, typ::RPCError> { + pub async fn metrics(&self) -> Result, typ::RPCError> { self.do_send_rpc_to_leader("metrics", None::<&()>).await } @@ -118,7 +113,8 @@ impl ExampleClient { /// /// It sends out a POST request if `req` is Some. Otherwise a GET request. /// The remote endpoint must respond a reply in form of `Result`. - /// An `Err` happened on remote will be wrapped in an [`RPCError::RemoteError`]. + /// An `Err` happened on remote will be wrapped in an + /// [`openraft::error::RPCError::RemoteError`]. async fn do_send_rpc_to_leader( &self, uri: &str, @@ -150,22 +146,22 @@ impl ExampleClient { let res = timeout(Duration::from_millis(3_000), fu).await; let resp = match res { - Ok(x) => x.map_err(|e| RPCError::Network(NetworkError::new(&e)))?, + Ok(x) => x.map_err(|e| typ::RPCError::Network(NetworkError::new(&e)))?, Err(timeout_err) => { tracing::error!("timeout {} to url: {}", timeout_err, url); - return Err(RPCError::Network(NetworkError::new(&timeout_err))); + return Err(typ::RPCError::Network(NetworkError::new(&timeout_err))); } }; let res: Result> = - resp.json().await.map_err(|e| RPCError::Network(NetworkError::new(&e)))?; + resp.json().await.map_err(|e| typ::RPCError::Network(NetworkError::new(&e)))?; tracing::debug!( "<<< client recv reply from {}: {}", url, serde_json::to_string_pretty(&res).unwrap() ); - res.map_err(|e| RPCError::RemoteError(RemoteError::new(leader_id, e))) + res.map_err(|e| typ::RPCError::RemoteError(RemoteError::new(leader_id, e))) } /// Try the best to send a request to the leader. diff --git a/examples/raft-kv-memstore/src/lib.rs b/examples/raft-kv-memstore/src/lib.rs index ba753caa9..177143956 100644 --- a/examples/raft-kv-memstore/src/lib.rs +++ b/examples/raft-kv-memstore/src/lib.rs @@ -6,54 +6,54 @@ use std::sync::Arc; use actix_web::middleware; use actix_web::middleware::Logger; use actix_web::web::Data; -use actix_web::App; use actix_web::HttpServer; +use openraft::storage::Adaptor; use openraft::BasicNode; use openraft::Config; -use openraft::Raft; -use crate::app::ExampleApp; +use crate::app::App; use crate::network::api; use crate::network::management; use crate::network::raft; -use crate::network::raft_network_impl::ExampleNetwork; -use crate::store::ExampleRequest; -use crate::store::ExampleResponse; -use crate::store::ExampleStore; +use crate::network::Network; +use crate::store::Request; +use crate::store::Response; +use crate::store::Store; pub mod app; pub mod client; pub mod network; pub mod store; -pub type ExampleNodeId = u64; +pub type NodeId = u64; openraft::declare_raft_types!( /// Declare the type configuration for example K/V store. - pub ExampleTypeConfig: D = ExampleRequest, R = ExampleResponse, NodeId = ExampleNodeId, Node = BasicNode, Entry = openraft::Entry + pub TypeConfig: D = Request, R = Response, NodeId = NodeId, Node = BasicNode, Entry = openraft::Entry ); -pub type ExampleRaft = Raft>; +pub type LogStore = Adaptor>; +pub type StateMachineStore = Adaptor>; +pub type Raft = openraft::Raft; pub mod typ { use openraft::BasicNode; - use crate::ExampleNodeId; - use crate::ExampleTypeConfig; + use crate::NodeId; + use crate::TypeConfig; - pub type RaftError = openraft::error::RaftError; - pub type RPCError = - openraft::error::RPCError>; + pub type RaftError = openraft::error::RaftError; + pub type RPCError = openraft::error::RPCError>; - pub type ClientWriteError = openraft::error::ClientWriteError; - pub type CheckIsLeaderError = openraft::error::CheckIsLeaderError; - pub type ForwardToLeader = openraft::error::ForwardToLeader; - pub type InitializeError = openraft::error::InitializeError; + pub type ClientWriteError = openraft::error::ClientWriteError; + pub type CheckIsLeaderError = openraft::error::CheckIsLeaderError; + pub type ForwardToLeader = openraft::error::ForwardToLeader; + pub type InitializeError = openraft::error::InitializeError; - pub type ClientWriteResponse = openraft::raft::ClientWriteResponse; + pub type ClientWriteResponse = openraft::raft::ClientWriteResponse; } -pub async fn start_example_raft_node(node_id: ExampleNodeId, http_addr: String) -> std::io::Result<()> { +pub async fn start_example_raft_node(node_id: NodeId, http_addr: String) -> std::io::Result<()> { // Create a configuration for the raft instance. let config = Config { heartbeat_interval: 500, @@ -65,18 +65,20 @@ pub async fn start_example_raft_node(node_id: ExampleNodeId, http_addr: String) let config = Arc::new(config.validate().unwrap()); // Create a instance of where the Raft data will be stored. - let store = Arc::new(ExampleStore::default()); + let store = Arc::new(Store::default()); + + let (log_store, state_machine) = Adaptor::new(store.clone()); // Create the network layer that will connect and communicate the raft instances and // will be used in conjunction with the store created above. - let network = ExampleNetwork {}; + let network = Network {}; // Create a local raft instance. - let raft = Raft::new(node_id, config.clone(), network, store.clone()).await.unwrap(); + let raft = openraft::Raft::new(node_id, config.clone(), network, log_store, state_machine).await.unwrap(); // Create an application that will store all the instances created above, this will // be later used on the actix-web services. - let app = Data::new(ExampleApp { + let app_data = Data::new(App { id: node_id, addr: http_addr.clone(), raft, @@ -86,11 +88,11 @@ pub async fn start_example_raft_node(node_id: ExampleNodeId, http_addr: String) // Start the actix-web server. let server = HttpServer::new(move || { - App::new() + actix_web::App::new() .wrap(Logger::default()) .wrap(Logger::new("%a %{User-Agent}i")) .wrap(middleware::Compress::default()) - .app_data(app.clone()) + .app_data(app_data.clone()) // raft internal RPC .service(raft::append) .service(raft::snapshot) diff --git a/examples/raft-kv-memstore/src/network/api.rs b/examples/raft-kv-memstore/src/network/api.rs index f90812fa4..90f7e6888 100644 --- a/examples/raft-kv-memstore/src/network/api.rs +++ b/examples/raft-kv-memstore/src/network/api.rs @@ -8,9 +8,9 @@ use openraft::error::RaftError; use openraft::BasicNode; use web::Json; -use crate::app::ExampleApp; -use crate::store::ExampleRequest; -use crate::ExampleNodeId; +use crate::app::App; +use crate::store::Request; +use crate::NodeId; /** * Application API @@ -22,13 +22,13 @@ use crate::ExampleNodeId; * - `POST - /read` attempt to find a value from a given key. */ #[post("/write")] -pub async fn write(app: Data, req: Json) -> actix_web::Result { +pub async fn write(app: Data, req: Json) -> actix_web::Result { let response = app.raft.client_write(req.0).await; Ok(Json(response)) } #[post("/read")] -pub async fn read(app: Data, req: Json) -> actix_web::Result { +pub async fn read(app: Data, req: Json) -> actix_web::Result { let state_machine = app.store.state_machine.read().await; let key = req.0; let value = state_machine.data.get(&key).cloned(); @@ -38,7 +38,7 @@ pub async fn read(app: Data, req: Json) -> actix_web::Result } #[post("/consistent_read")] -pub async fn consistent_read(app: Data, req: Json) -> actix_web::Result { +pub async fn consistent_read(app: Data, req: Json) -> actix_web::Result { let ret = app.raft.is_leader().await; match ret { @@ -47,7 +47,7 @@ pub async fn consistent_read(app: Data, req: Json) -> actix_ let key = req.0; let value = state_machine.data.get(&key).cloned(); - let res: Result>> = + let res: Result>> = Ok(value.unwrap_or_default()); Ok(Json(res)) } diff --git a/examples/raft-kv-memstore/src/network/management.rs b/examples/raft-kv-memstore/src/network/management.rs index 98c5376ba..912cdbf42 100644 --- a/examples/raft-kv-memstore/src/network/management.rs +++ b/examples/raft-kv-memstore/src/network/management.rs @@ -3,16 +3,15 @@ use std::collections::BTreeSet; use actix_web::get; use actix_web::post; -use actix_web::web; use actix_web::web::Data; +use actix_web::web::Json; use actix_web::Responder; use openraft::error::Infallible; use openraft::BasicNode; use openraft::RaftMetrics; -use web::Json; -use crate::app::ExampleApp; -use crate::ExampleNodeId; +use crate::app::App; +use crate::NodeId; // --- Cluster management @@ -22,10 +21,7 @@ use crate::ExampleNodeId; /// This should be done before adding a node as a member into the cluster /// (by calling `change-membership`) #[post("/add-learner")] -pub async fn add_learner( - app: Data, - req: Json<(ExampleNodeId, String)>, -) -> actix_web::Result { +pub async fn add_learner(app: Data, req: Json<(NodeId, String)>) -> actix_web::Result { let node_id = req.0 .0; let node = BasicNode { addr: req.0 .1.clone() }; let res = app.raft.add_learner(node_id, node, true).await; @@ -34,17 +30,14 @@ pub async fn add_learner( /// Changes specified learners to members, or remove members. #[post("/change-membership")] -pub async fn change_membership( - app: Data, - req: Json>, -) -> actix_web::Result { +pub async fn change_membership(app: Data, req: Json>) -> actix_web::Result { let res = app.raft.change_membership(req.0, false).await; Ok(Json(res)) } /// Initialize a single-node cluster. #[post("/init")] -pub async fn init(app: Data) -> actix_web::Result { +pub async fn init(app: Data) -> actix_web::Result { let mut nodes = BTreeMap::new(); nodes.insert(app.id, BasicNode { addr: app.addr.clone() }); let res = app.raft.initialize(nodes).await; @@ -53,9 +46,9 @@ pub async fn init(app: Data) -> actix_web::Result { /// Get the latest metrics of the cluster #[get("/metrics")] -pub async fn metrics(app: Data) -> actix_web::Result { +pub async fn metrics(app: Data) -> actix_web::Result { let metrics = app.raft.metrics().borrow().clone(); - let res: Result, Infallible> = Ok(metrics); + let res: Result, Infallible> = Ok(metrics); Ok(Json(res)) } diff --git a/examples/raft-kv-memstore/src/network/mod.rs b/examples/raft-kv-memstore/src/network/mod.rs index aa89c6e51..fa31098cf 100644 --- a/examples/raft-kv-memstore/src/network/mod.rs +++ b/examples/raft-kv-memstore/src/network/mod.rs @@ -1,4 +1,7 @@ pub mod api; pub mod management; pub mod raft; -pub mod raft_network_impl; +mod raft_network_impl; + +pub use raft_network_impl::Network; +pub use raft_network_impl::NetworkConnection; diff --git a/examples/raft-kv-memstore/src/network/raft.rs b/examples/raft-kv-memstore/src/network/raft.rs index dd3651dcc..aec0f3e94 100644 --- a/examples/raft-kv-memstore/src/network/raft.rs +++ b/examples/raft-kv-memstore/src/network/raft.rs @@ -1,37 +1,33 @@ use actix_web::post; -use actix_web::web; use actix_web::web::Data; +use actix_web::web::Json; use actix_web::Responder; use openraft::raft::AppendEntriesRequest; use openraft::raft::InstallSnapshotRequest; use openraft::raft::VoteRequest; -use web::Json; -use crate::app::ExampleApp; -use crate::ExampleNodeId; -use crate::ExampleTypeConfig; +use crate::app::App; +use crate::NodeId; +use crate::TypeConfig; // --- Raft communication #[post("/raft-vote")] -pub async fn vote(app: Data, req: Json>) -> actix_web::Result { +pub async fn vote(app: Data, req: Json>) -> actix_web::Result { let res = app.raft.vote(req.0).await; Ok(Json(res)) } #[post("/raft-append")] -pub async fn append( - app: Data, - req: Json>, -) -> actix_web::Result { +pub async fn append(app: Data, req: Json>) -> actix_web::Result { let res = app.raft.append_entries(req.0).await; Ok(Json(res)) } #[post("/raft-snapshot")] pub async fn snapshot( - app: Data, - req: Json>, + app: Data, + req: Json>, ) -> actix_web::Result { let res = app.raft.install_snapshot(req.0).await; Ok(Json(res)) diff --git a/examples/raft-kv-memstore/src/network/raft_network_impl.rs b/examples/raft-kv-memstore/src/network/raft_network_impl.rs index 5e84497b8..64ded3b21 100644 --- a/examples/raft-kv-memstore/src/network/raft_network_impl.rs +++ b/examples/raft-kv-memstore/src/network/raft_network_impl.rs @@ -1,8 +1,6 @@ use async_trait::async_trait; use openraft::error::InstallSnapshotError; use openraft::error::NetworkError; -use openraft::error::RPCError; -use openraft::error::RaftError; use openraft::error::RemoteError; use openraft::raft::AppendEntriesRequest; use openraft::raft::AppendEntriesResponse; @@ -16,19 +14,20 @@ use openraft::RaftNetworkFactory; use serde::de::DeserializeOwned; use serde::Serialize; -use crate::ExampleNodeId; -use crate::ExampleTypeConfig; +use crate::typ; +use crate::NodeId; +use crate::TypeConfig; -pub struct ExampleNetwork {} +pub struct Network {} -impl ExampleNetwork { +impl Network { pub async fn send_rpc( &self, - target: ExampleNodeId, + target: NodeId, target_node: &BasicNode, uri: &str, req: Req, - ) -> Result> + ) -> Result> where Req: Serialize, Err: std::error::Error + DeserializeOwned, @@ -37,68 +36,65 @@ impl ExampleNetwork { let addr = &target_node.addr; let url = format!("http://{}/{}", addr, uri); - tracing::debug!("send_rpc to url: {}", url); let client = reqwest::Client::new(); - tracing::debug!("client is created for: {}", url); - let resp = client.post(url).json(&req).send().await.map_err(|e| RPCError::Network(NetworkError::new(&e)))?; + let resp = client + .post(url) + .json(&req) + .send() + .await + .map_err(|e| openraft::error::RPCError::Network(NetworkError::new(&e)))?; tracing::debug!("client.post() is sent"); - let res: Result = resp.json().await.map_err(|e| RPCError::Network(NetworkError::new(&e)))?; + let res: Result = + resp.json().await.map_err(|e| openraft::error::RPCError::Network(NetworkError::new(&e)))?; - res.map_err(|e| RPCError::RemoteError(RemoteError::new(target, e))) + res.map_err(|e| openraft::error::RPCError::RemoteError(RemoteError::new(target, e))) } } // NOTE: This could be implemented also on `Arc`, but since it's empty, implemented // directly. #[async_trait] -impl RaftNetworkFactory for ExampleNetwork { - type Network = ExampleNetworkConnection; +impl RaftNetworkFactory for Network { + type Network = NetworkConnection; - async fn new_client(&mut self, target: ExampleNodeId, node: &BasicNode) -> Self::Network { - ExampleNetworkConnection { - owner: ExampleNetwork {}, + async fn new_client(&mut self, target: NodeId, node: &BasicNode) -> Self::Network { + NetworkConnection { + owner: Network {}, target, target_node: node.clone(), } } } -pub struct ExampleNetworkConnection { - owner: ExampleNetwork, - target: ExampleNodeId, +pub struct NetworkConnection { + owner: Network, + target: NodeId, target_node: BasicNode, } #[async_trait] -impl RaftNetwork for ExampleNetworkConnection { +impl RaftNetwork for NetworkConnection { async fn send_append_entries( &mut self, - req: AppendEntriesRequest, - ) -> Result, RPCError>> - { + req: AppendEntriesRequest, + ) -> Result, typ::RPCError> { self.owner.send_rpc(self.target, &self.target_node, "raft-append", req).await } async fn send_install_snapshot( &mut self, - req: InstallSnapshotRequest, - ) -> Result< - InstallSnapshotResponse, - RPCError>, - > { + req: InstallSnapshotRequest, + ) -> Result, typ::RPCError> { self.owner.send_rpc(self.target, &self.target_node, "raft-snapshot", req).await } - async fn send_vote( - &mut self, - req: VoteRequest, - ) -> Result, RPCError>> { + async fn send_vote(&mut self, req: VoteRequest) -> Result, typ::RPCError> { self.owner.send_rpc(self.target, &self.target_node, "raft-vote", req).await } } diff --git a/examples/raft-kv-memstore/src/store/mod.rs b/examples/raft-kv-memstore/src/store/mod.rs index e6a5070b3..76a3a6107 100644 --- a/examples/raft-kv-memstore/src/store/mod.rs +++ b/examples/raft-kv-memstore/src/store/mod.rs @@ -24,8 +24,8 @@ use serde::Deserialize; use serde::Serialize; use tokio::sync::RwLock; -use crate::ExampleNodeId; -use crate::ExampleTypeConfig; +use crate::NodeId; +use crate::TypeConfig; /** * Here you will set the types of request that will interact with the raft nodes. @@ -34,26 +34,26 @@ use crate::ExampleTypeConfig; * You will want to add any request that can write data in all nodes here. */ #[derive(Serialize, Deserialize, Debug, Clone)] -pub enum ExampleRequest { +pub enum Request { Set { key: String, value: String }, } /** * Here you will defined what type of answer you expect from reading the data of a node. * In this example it will return a optional value from a given key in - * the `ExampleRequest.Set`. + * the `Request.Set`. * * TODO: Should we explain how to create multiple `AppDataResponse`? * */ #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ExampleResponse { +pub struct Response { pub value: Option, } #[derive(Debug)] -pub struct ExampleSnapshot { - pub meta: SnapshotMeta, +pub struct StoredSnapshot { + pub meta: SnapshotMeta, /// The data of the state machine at the time of this snapshot. pub data: Vec, @@ -66,36 +66,36 @@ pub struct ExampleSnapshot { * value as String, but you could set any type of value that has the serialization impl. */ #[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct ExampleStateMachine { - pub last_applied_log: Option>, +pub struct StateMachine { + pub last_applied_log: Option>, - pub last_membership: StoredMembership, + pub last_membership: StoredMembership, /// Application data. pub data: BTreeMap, } #[derive(Debug, Default)] -pub struct ExampleStore { - last_purged_log_id: RwLock>>, +pub struct Store { + last_purged_log_id: RwLock>>, /// The Raft log. - log: RwLock>>, + log: RwLock>>, /// The Raft state machine. - pub state_machine: RwLock, + pub state_machine: RwLock, /// The current granted vote. - vote: RwLock>>, + vote: RwLock>>, snapshot_idx: Arc>, - current_snapshot: RwLock>, + current_snapshot: RwLock>, } #[async_trait] -impl RaftLogReader for Arc { - async fn get_log_state(&mut self) -> Result, StorageError> { +impl RaftLogReader for Arc { + async fn get_log_state(&mut self) -> Result, StorageError> { let log = self.log.read().await; let last = log.iter().rev().next().map(|(_, ent)| ent.log_id); @@ -115,7 +115,7 @@ impl RaftLogReader for Arc { async fn try_get_log_entries + Clone + Debug + Send + Sync>( &mut self, range: RB, - ) -> Result>, StorageError> { + ) -> Result>, StorageError> { let log = self.log.read().await; let response = log.range(range.clone()).map(|(_, val)| val.clone()).collect::>(); Ok(response) @@ -123,11 +123,9 @@ impl RaftLogReader for Arc { } #[async_trait] -impl RaftSnapshotBuilder>> for Arc { +impl RaftSnapshotBuilder>> for Arc { #[tracing::instrument(level = "trace", skip(self))] - async fn build_snapshot( - &mut self, - ) -> Result>>, StorageError> { + async fn build_snapshot(&mut self) -> Result>>, StorageError> { let data; let last_applied_log; let last_membership; @@ -159,7 +157,7 @@ impl RaftSnapshotBuilder>> for Arc>> for Arc for Arc { +impl RaftStorage for Arc { type SnapshotData = Cursor>; type LogReader = Self; type SnapshotBuilder = Self; #[tracing::instrument(level = "trace", skip(self))] - async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError> { + async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError> { let mut v = self.vote.write().await; *v = Some(*vote); Ok(()) } - async fn read_vote(&mut self) -> Result>, StorageError> { + async fn read_vote(&mut self) -> Result>, StorageError> { Ok(*self.vote.read().await) } #[tracing::instrument(level = "trace", skip(self, entries))] - async fn append_to_log(&mut self, entries: I) -> Result<(), StorageError> - where I: IntoIterator> + Send { + async fn append_to_log(&mut self, entries: I) -> Result<(), StorageError> + where I: IntoIterator> + Send { let mut log = self.log.write().await; for entry in entries { log.insert(entry.log_id.index, entry); @@ -204,10 +202,7 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "debug", skip(self))] - async fn delete_conflict_logs_since( - &mut self, - log_id: LogId, - ) -> Result<(), StorageError> { + async fn delete_conflict_logs_since(&mut self, log_id: LogId) -> Result<(), StorageError> { tracing::debug!("delete_log: [{:?}, +oo)", log_id); let mut log = self.log.write().await; @@ -220,7 +215,7 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "debug", skip(self))] - async fn purge_logs_upto(&mut self, log_id: LogId) -> Result<(), StorageError> { + async fn purge_logs_upto(&mut self, log_id: LogId) -> Result<(), StorageError> { tracing::debug!("delete_log: [{:?}, +oo)", log_id); { @@ -243,8 +238,7 @@ impl RaftStorage for Arc { async fn last_applied_state( &mut self, - ) -> Result<(Option>, StoredMembership), StorageError> - { + ) -> Result<(Option>, StoredMembership), StorageError> { let state_machine = self.state_machine.read().await; Ok((state_machine.last_applied_log, state_machine.last_membership.clone())) } @@ -252,8 +246,8 @@ impl RaftStorage for Arc { #[tracing::instrument(level = "trace", skip(self, entries))] async fn apply_to_state_machine( &mut self, - entries: &[Entry], - ) -> Result, StorageError> { + entries: &[Entry], + ) -> Result, StorageError> { let mut res = Vec::with_capacity(entries.len()); let mut sm = self.state_machine.write().await; @@ -264,18 +258,18 @@ impl RaftStorage for Arc { sm.last_applied_log = Some(entry.log_id); match entry.payload { - EntryPayload::Blank => res.push(ExampleResponse { value: None }), + EntryPayload::Blank => res.push(Response { value: None }), EntryPayload::Normal(ref req) => match req { - ExampleRequest::Set { key, value } => { + Request::Set { key, value } => { sm.data.insert(key.clone(), value.clone()); - res.push(ExampleResponse { + res.push(Response { value: Some(value.clone()), }) } }, EntryPayload::Membership(ref mem) => { sm.last_membership = StoredMembership::new(Some(entry.log_id), mem.clone()); - res.push(ExampleResponse { value: None }) + res.push(Response { value: None }) } }; } @@ -283,29 +277,29 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "trace", skip(self))] - async fn begin_receiving_snapshot(&mut self) -> Result, StorageError> { + async fn begin_receiving_snapshot(&mut self) -> Result, StorageError> { Ok(Box::new(Cursor::new(Vec::new()))) } #[tracing::instrument(level = "trace", skip(self, snapshot))] async fn install_snapshot( &mut self, - meta: &SnapshotMeta, + meta: &SnapshotMeta, snapshot: Box, - ) -> Result<(), StorageError> { + ) -> Result<(), StorageError> { tracing::info!( { snapshot_size = snapshot.get_ref().len() }, "decoding snapshot for installation" ); - let new_snapshot = ExampleSnapshot { + let new_snapshot = StoredSnapshot { meta: meta.clone(), data: snapshot.into_inner(), }; // Update the state machine. { - let updated_state_machine: ExampleStateMachine = serde_json::from_slice(&new_snapshot.data) + let updated_state_machine: StateMachine = serde_json::from_slice(&new_snapshot.data) .map_err(|e| StorageIOError::read_snapshot(new_snapshot.meta.signature(), &e))?; let mut state_machine = self.state_machine.write().await; *state_machine = updated_state_machine; @@ -320,7 +314,7 @@ impl RaftStorage for Arc { #[tracing::instrument(level = "trace", skip(self))] async fn get_current_snapshot( &mut self, - ) -> Result>, StorageError> { + ) -> Result>, StorageError> { match &*self.current_snapshot.read().await { Some(snapshot) => { let data = snapshot.data.clone(); diff --git a/examples/raft-kv-memstore/tests/cluster/test_cluster.rs b/examples/raft-kv-memstore/tests/cluster/test_cluster.rs index 507011915..1c8a8f14f 100644 --- a/examples/raft-kv-memstore/tests/cluster/test_cluster.rs +++ b/examples/raft-kv-memstore/tests/cluster/test_cluster.rs @@ -9,7 +9,7 @@ use maplit::btreeset; use openraft::BasicNode; use raft_kv_memstore::client::ExampleClient; use raft_kv_memstore::start_example_raft_node; -use raft_kv_memstore::store::ExampleRequest; +use raft_kv_memstore::store::Request; use tokio::runtime::Runtime; use tracing_subscriber::EnvFilter; @@ -170,7 +170,7 @@ async fn test_cluster() -> anyhow::Result<()> { println!("=== write `foo=bar`"); let _x = client - .write(&ExampleRequest::Set { + .write(&Request::Set { key: "foo".to_string(), value: "bar".to_string(), }) @@ -200,7 +200,7 @@ async fn test_cluster() -> anyhow::Result<()> { println!("=== read `foo` on node 2"); let _x = client2 - .write(&ExampleRequest::Set { + .write(&Request::Set { key: "foo".to_string(), value: "wow".to_string(), }) @@ -253,7 +253,7 @@ async fn test_cluster() -> anyhow::Result<()> { println!("=== write `foo=zoo` to node-3"); let _x = client3 - .write(&ExampleRequest::Set { + .write(&Request::Set { key: "foo".to_string(), value: "zoo".to_string(), }) diff --git a/examples/raft-kv-rocksdb/src/app.rs b/examples/raft-kv-rocksdb/src/app.rs index dcfb79096..87925531e 100644 --- a/examples/raft-kv-rocksdb/src/app.rs +++ b/examples/raft-kv-rocksdb/src/app.rs @@ -2,17 +2,17 @@ use std::sync::Arc; use openraft::Config; -use crate::ExampleNodeId; use crate::ExampleRaft; -use crate::ExampleStore; +use crate::NodeId; +use crate::Store; // Representation of an application state. This struct can be shared around to share // instances of raft, store and more. -pub struct ExampleApp { - pub id: ExampleNodeId, +pub struct App { + pub id: NodeId, pub api_addr: String, pub rcp_addr: String, pub raft: ExampleRaft, - pub store: Arc, + pub store: Arc, pub config: Arc, } diff --git a/examples/raft-kv-rocksdb/src/bin/main.rs b/examples/raft-kv-rocksdb/src/bin/main.rs index a8fe606f7..aad600bda 100644 --- a/examples/raft-kv-rocksdb/src/bin/main.rs +++ b/examples/raft-kv-rocksdb/src/bin/main.rs @@ -1,13 +1,7 @@ use clap::Parser; -use openraft::Raft; -use raft_kv_rocksdb::network::raft_network_impl::ExampleNetwork; use raft_kv_rocksdb::start_example_raft_node; -use raft_kv_rocksdb::store::ExampleStore; -use raft_kv_rocksdb::ExampleTypeConfig; use tracing_subscriber::EnvFilter; -pub type ExampleRaft = Raft; - #[derive(Parser, Clone, Debug)] #[clap(author, version, about, long_about = None)] pub struct Opt { diff --git a/examples/raft-kv-rocksdb/src/client.rs b/examples/raft-kv-rocksdb/src/client.rs index 88ff90dc9..485d36abd 100644 --- a/examples/raft-kv-rocksdb/src/client.rs +++ b/examples/raft-kv-rocksdb/src/client.rs @@ -18,10 +18,10 @@ use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; -use crate::ExampleNode; -use crate::ExampleNodeId; -use crate::ExampleRequest; -use crate::ExampleTypeConfig; +use crate::Node; +use crate::NodeId; +use crate::Request; +use crate::TypeConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Empty {} @@ -30,14 +30,14 @@ pub struct ExampleClient { /// The leader node to send request to. /// /// All traffic should be sent to the leader in a cluster. - pub leader: Arc>, + pub leader: Arc>, pub inner: Client, } impl ExampleClient { /// Create a client with a leader node id and a node manager to get node address by node id. - pub fn new(leader_id: ExampleNodeId, leader_addr: String) -> Self { + pub fn new(leader_id: NodeId, leader_addr: String) -> Self { Self { leader: Arc::new(Mutex::new((leader_id, leader_addr))), inner: reqwest::Client::new(), @@ -54,10 +54,10 @@ impl ExampleClient { /// The result of applying the request will be returned. pub async fn write( &self, - req: &ExampleRequest, + req: &Request, ) -> Result< - ClientWriteResponse, - RPCError>>, + ClientWriteResponse, + RPCError>>, > { self.send_rpc_to_leader("api/write", Some(req)).await } @@ -65,10 +65,7 @@ impl ExampleClient { /// Read value by key, in an inconsistent mode. /// /// This method may return stale value because it does not force to read on a legal leader. - pub async fn read( - &self, - req: &String, - ) -> Result>> { + pub async fn read(&self, req: &String) -> Result>> { self.do_send_rpc_to_leader("api/read", Some(req)).await } @@ -78,10 +75,7 @@ impl ExampleClient { pub async fn consistent_read( &self, req: &String, - ) -> Result< - String, - RPCError>>, - > { + ) -> Result>>> { self.do_send_rpc_to_leader("api/consistent_read", Some(req)).await } @@ -93,12 +87,7 @@ impl ExampleClient { /// With a initialized cluster, new node can be added with [`write`]. /// Then setup replication with [`add_learner`]. /// Then make the new node a member with [`change_membership`]. - pub async fn init( - &self, - ) -> Result< - (), - RPCError>>, - > { + pub async fn init(&self) -> Result<(), RPCError>>> { self.do_send_rpc_to_leader("cluster/init", Some(&Empty {})).await } @@ -107,10 +96,10 @@ impl ExampleClient { /// The node to add has to exist, i.e., being added with `write(ExampleRequest::AddNode{})` pub async fn add_learner( &self, - req: (ExampleNodeId, String, String), + req: (NodeId, String, String), ) -> Result< - ClientWriteResponse, - RPCError>>, + ClientWriteResponse, + RPCError>>, > { self.send_rpc_to_leader("cluster/add-learner", Some(&req)).await } @@ -121,10 +110,10 @@ impl ExampleClient { /// or an error [`LearnerNotFound`] will be returned. pub async fn change_membership( &self, - req: &BTreeSet, + req: &BTreeSet, ) -> Result< - ClientWriteResponse, - RPCError>>, + ClientWriteResponse, + RPCError>>, > { self.send_rpc_to_leader("cluster/change-membership", Some(req)).await } @@ -134,10 +123,7 @@ impl ExampleClient { /// Metrics contains various information about the cluster, such as current leader, /// membership config, replication status etc. /// See [`RaftMetrics`]. - pub async fn metrics( - &self, - ) -> Result, RPCError>> - { + pub async fn metrics(&self) -> Result, RPCError>> { self.do_send_rpc_to_leader("cluster/metrics", None::<&()>).await } @@ -152,7 +138,7 @@ impl ExampleClient { &self, uri: &str, req: Option<&Req>, - ) -> Result> + ) -> Result> where Req: Serialize + 'static, Resp: Serialize + DeserializeOwned, @@ -197,21 +183,17 @@ impl ExampleClient { &self, uri: &str, req: Option<&Req>, - ) -> Result>> + ) -> Result>> where Req: Serialize + 'static, Resp: Serialize + DeserializeOwned, - Err: std::error::Error - + Serialize - + DeserializeOwned - + TryAsRef> - + Clone, + Err: std::error::Error + Serialize + DeserializeOwned + TryAsRef> + Clone, { // Retry at most 3 times to find a valid leader. let mut n_retry = 3; loop { - let res: Result>> = + let res: Result>> = self.do_send_rpc_to_leader(uri, req).await; let rpc_err = match res { @@ -220,7 +202,7 @@ impl ExampleClient { }; if let RPCError::RemoteError(remote_err) = &rpc_err { - let raft_err: &RaftError = &remote_err.source; + let raft_err: &RaftError = &remote_err.source; if let Some(ForwardToLeader { leader_id: Some(leader_id), diff --git a/examples/raft-kv-rocksdb/src/lib.rs b/examples/raft-kv-rocksdb/src/lib.rs index 05b3ce807..ab16d2b3b 100644 --- a/examples/raft-kv-rocksdb/src/lib.rs +++ b/examples/raft-kv-rocksdb/src/lib.rs @@ -7,31 +7,31 @@ use std::sync::Arc; use async_std::net::TcpListener; use async_std::task; +use openraft::storage::Adaptor; use openraft::Config; -use openraft::Raft; -use crate::app::ExampleApp; +use crate::app::App; use crate::network::api; use crate::network::management; -use crate::network::raft_network_impl::ExampleNetwork; -use crate::store::ExampleRequest; -use crate::store::ExampleResponse; -use crate::store::ExampleStore; +use crate::network::Network; +use crate::store::Request; +use crate::store::Response; +use crate::store::Store; pub mod app; pub mod client; pub mod network; pub mod store; -pub type ExampleNodeId = u64; +pub type NodeId = u64; #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq, Default)] -pub struct ExampleNode { +pub struct Node { pub rpc_addr: String, pub api_addr: String, } -impl Display for ExampleNode { +impl Display for Node { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, @@ -43,13 +43,17 @@ impl Display for ExampleNode { openraft::declare_raft_types!( /// Declare the type configuration for example K/V store. - pub ExampleTypeConfig: D = ExampleRequest, R = ExampleResponse, NodeId = ExampleNodeId, Node = ExampleNode, Entry = openraft::Entry + pub TypeConfig: D = Request, R = Response, NodeId = NodeId, Node = Node, Entry = openraft::Entry ); -pub type ExampleRaft = Raft>; -type Server = tide::Server>; +pub type LogStore = Adaptor>; +pub type StateMachineStore = Adaptor>; +pub type ExampleRaft = openraft::Raft; + +type Server = tide::Server>; + pub async fn start_example_raft_node

( - node_id: ExampleNodeId, + node_id: NodeId, dir: P, http_addr: String, rcp_addr: String, @@ -67,16 +71,18 @@ where let config = Arc::new(config.validate().unwrap()); // Create a instance of where the Raft data will be stored. - let store = ExampleStore::new(&dir).await; + let store = Store::new(&dir).await; + + let (log_store, state_machine) = Adaptor::new(store.clone()); // Create the network layer that will connect and communicate the raft instances and // will be used in conjunction with the store created above. - let network = ExampleNetwork {}; + let network = Network {}; // Create a local raft instance. - let raft = Raft::new(node_id, config.clone(), network, store.clone()).await.unwrap(); + let raft = openraft::Raft::new(node_id, config.clone(), network, log_store, state_machine).await.unwrap(); - let app = Arc::new(ExampleApp { + let app = Arc::new(App { id: node_id, api_addr: http_addr.clone(), rcp_addr: rcp_addr.clone(), @@ -85,7 +91,7 @@ where config, }); - let echo_service = Arc::new(crate::network::raft::Raft::new(app.clone())); + let echo_service = Arc::new(network::raft::Raft::new(app.clone())); let server = toy_rpc::Server::builder().register(echo_service).build(); diff --git a/examples/raft-kv-rocksdb/src/network.rs b/examples/raft-kv-rocksdb/src/network.rs index aa89c6e51..fa31098cf 100644 --- a/examples/raft-kv-rocksdb/src/network.rs +++ b/examples/raft-kv-rocksdb/src/network.rs @@ -1,4 +1,7 @@ pub mod api; pub mod management; pub mod raft; -pub mod raft_network_impl; +mod raft_network_impl; + +pub use raft_network_impl::Network; +pub use raft_network_impl::NetworkConnection; diff --git a/examples/raft-kv-rocksdb/src/network/api.rs b/examples/raft-kv-rocksdb/src/network/api.rs index 0c5e8572e..353df4f7b 100644 --- a/examples/raft-kv-rocksdb/src/network/api.rs +++ b/examples/raft-kv-rocksdb/src/network/api.rs @@ -7,9 +7,9 @@ use tide::Request; use tide::Response; use tide::StatusCode; -use crate::app::ExampleApp; -use crate::ExampleNode; -use crate::ExampleNodeId; +use crate::app::App; +use crate::Node; +use crate::NodeId; use crate::Server; pub fn rest(app: &mut Server) { @@ -27,13 +27,13 @@ pub fn rest(app: &mut Server) { * - `POST - /write` saves a value in a key and sync the nodes. * - `POST - /read` attempt to find a value from a given key. */ -async fn write(mut req: Request>) -> tide::Result { +async fn write(mut req: Request>) -> tide::Result { let body = req.body_json().await?; let res = req.state().raft.client_write(body).await; Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } -async fn read(mut req: Request>) -> tide::Result { +async fn read(mut req: Request>) -> tide::Result { let key: String = req.body_json().await?; let state_machine = req.state().store.state_machine.read().await; let value = state_machine.get(&key)?; @@ -42,7 +42,7 @@ async fn read(mut req: Request>) -> tide::Result { Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } -async fn consistent_read(mut req: Request>) -> tide::Result { +async fn consistent_read(mut req: Request>) -> tide::Result { let ret = req.state().raft.is_leader().await; match ret { @@ -52,7 +52,7 @@ async fn consistent_read(mut req: Request>) -> tide::Result { let value = state_machine.get(&key)?; - let res: Result> = Ok(value.unwrap_or_default()); + let res: Result> = Ok(value.unwrap_or_default()); Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } e => Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&e)?).build()), diff --git a/examples/raft-kv-rocksdb/src/network/management.rs b/examples/raft-kv-rocksdb/src/network/management.rs index b38b5ccd6..798d8189a 100644 --- a/examples/raft-kv-rocksdb/src/network/management.rs +++ b/examples/raft-kv-rocksdb/src/network/management.rs @@ -9,9 +9,9 @@ use tide::Request; use tide::Response; use tide::StatusCode; -use crate::app::ExampleApp; -use crate::ExampleNode; -use crate::ExampleNodeId; +use crate::app::App; +use crate::Node; +use crate::NodeId; use crate::Server; // --- Cluster management @@ -29,24 +29,24 @@ pub fn rest(app: &mut Server) { /// A Learner receives log replication from the leader but does not vote. /// This should be done before adding a node as a member into the cluster /// (by calling `change-membership`) -async fn add_learner(mut req: Request>) -> tide::Result { - let (node_id, api_addr, rpc_addr): (ExampleNodeId, String, String) = req.body_json().await?; - let node = ExampleNode { rpc_addr, api_addr }; +async fn add_learner(mut req: Request>) -> tide::Result { + let (node_id, api_addr, rpc_addr): (NodeId, String, String) = req.body_json().await?; + let node = Node { rpc_addr, api_addr }; let res = req.state().raft.add_learner(node_id, node, true).await; Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } /// Changes specified learners to members, or remove members. -async fn change_membership(mut req: Request>) -> tide::Result { - let body: BTreeSet = req.body_json().await?; +async fn change_membership(mut req: Request>) -> tide::Result { + let body: BTreeSet = req.body_json().await?; let res = req.state().raft.change_membership(body, false).await; Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } /// Initialize a single-node cluster. -async fn init(req: Request>) -> tide::Result { +async fn init(req: Request>) -> tide::Result { let mut nodes = BTreeMap::new(); - let node = ExampleNode { + let node = Node { api_addr: req.state().api_addr.clone(), rpc_addr: req.state().rcp_addr.clone(), }; @@ -57,9 +57,9 @@ async fn init(req: Request>) -> tide::Result { } /// Get the latest metrics of the cluster -async fn metrics(req: Request>) -> tide::Result { +async fn metrics(req: Request>) -> tide::Result { let metrics = req.state().raft.metrics().borrow().clone(); - let res: Result, Infallible> = Ok(metrics); + let res: Result, Infallible> = Ok(metrics); Ok(Response::builder(StatusCode::Ok).body(Body::from_json(&res)?).build()) } diff --git a/examples/raft-kv-rocksdb/src/network/raft.rs b/examples/raft-kv-rocksdb/src/network/raft.rs index 0d637cd49..2fd4aa5df 100644 --- a/examples/raft-kv-rocksdb/src/network/raft.rs +++ b/examples/raft-kv-rocksdb/src/network/raft.rs @@ -8,36 +8,38 @@ use openraft::raft::VoteRequest; use openraft::raft::VoteResponse; use toy_rpc::macros::export_impl; -use crate::app::ExampleApp; -use crate::ExampleTypeConfig; - -// --- Raft communication +use crate::app::App; +use crate::TypeConfig; +/// Raft protocol service. pub struct Raft { - app: Arc, + app: Arc, } #[export_impl] impl Raft { - pub fn new(app: Arc) -> Self { + pub fn new(app: Arc) -> Self { Self { app } } + #[export_method] pub async fn vote(&self, vote: VoteRequest) -> Result, toy_rpc::Error> { self.app.raft.vote(vote).await.map_err(|e| toy_rpc::Error::Internal(Box::new(e))) } + #[export_method] pub async fn append( &self, - req: AppendEntriesRequest, + req: AppendEntriesRequest, ) -> Result, toy_rpc::Error> { tracing::debug!("handle append"); self.app.raft.append_entries(req).await.map_err(|e| toy_rpc::Error::Internal(Box::new(e))) } + #[export_method] pub async fn snapshot( &self, - req: InstallSnapshotRequest, + req: InstallSnapshotRequest, ) -> Result, toy_rpc::Error> { self.app.raft.install_snapshot(req).await.map_err(|e| toy_rpc::Error::Internal(Box::new(e))) } diff --git a/examples/raft-kv-rocksdb/src/network/raft_network_impl.rs b/examples/raft-kv-rocksdb/src/network/raft_network_impl.rs index 85419417e..f87f256e9 100644 --- a/examples/raft-kv-rocksdb/src/network/raft_network_impl.rs +++ b/examples/raft-kv-rocksdb/src/network/raft_network_impl.rs @@ -21,36 +21,38 @@ use toy_rpc::pubsub::AckModeNone; use toy_rpc::Client; use super::raft::RaftClientStub; -use crate::ExampleNode; -use crate::ExampleNodeId; -use crate::ExampleTypeConfig; +use crate::Node; +use crate::NodeId; +use crate::TypeConfig; -pub struct ExampleNetwork {} +pub struct Network {} // NOTE: This could be implemented also on `Arc`, but since it's empty, implemented // directly. #[async_trait] -impl RaftNetworkFactory for ExampleNetwork { - type Network = ExampleNetworkConnection; +impl RaftNetworkFactory for Network { + type Network = NetworkConnection; #[tracing::instrument(level = "debug", skip_all)] - async fn new_client(&mut self, target: ExampleNodeId, node: &ExampleNode) -> Self::Network { + async fn new_client(&mut self, target: NodeId, node: &Node) -> Self::Network { let addr = format!("ws://{}", node.rpc_addr); + let client = Client::dial_websocket(&addr).await.ok(); tracing::debug!("new_client: is_none: {}", client.is_none()); - ExampleNetworkConnection { addr, client, target } + + NetworkConnection { addr, client, target } } } -pub struct ExampleNetworkConnection { +pub struct NetworkConnection { addr: String, client: Option>, - target: ExampleNodeId, + target: NodeId, } -impl ExampleNetworkConnection { +impl NetworkConnection { async fn c( &mut self, - ) -> Result<&Client, RPCError> { + ) -> Result<&Client, RPCError> { if self.client.is_none() { self.client = Client::dial_websocket(&self.addr).await.ok(); } @@ -69,10 +71,7 @@ impl Display for ErrWrap { impl std::error::Error for ErrWrap {} -fn to_error( - e: toy_rpc::Error, - target: ExampleNodeId, -) -> RPCError { +fn to_error(e: toy_rpc::Error, target: NodeId) -> RPCError { match e { toy_rpc::Error::IoError(e) => RPCError::Network(NetworkError::new(&e)), toy_rpc::Error::ParseError(e) => RPCError::Network(NetworkError::new(&ErrWrap(e))), @@ -92,13 +91,12 @@ fn to_error( } #[async_trait] -impl RaftNetwork for ExampleNetworkConnection { +impl RaftNetwork for NetworkConnection { #[tracing::instrument(level = "debug", skip_all, err(Debug))] async fn send_append_entries( &mut self, - req: AppendEntriesRequest, - ) -> Result, RPCError>> - { + req: AppendEntriesRequest, + ) -> Result, RPCError>> { tracing::debug!(req = debug(&req), "send_append_entries"); let c = self.c().await?; @@ -113,11 +111,8 @@ impl RaftNetwork for ExampleNetworkConnection { #[tracing::instrument(level = "debug", skip_all, err(Debug))] async fn send_install_snapshot( &mut self, - req: InstallSnapshotRequest, - ) -> Result< - InstallSnapshotResponse, - RPCError>, - > { + req: InstallSnapshotRequest, + ) -> Result, RPCError>> { tracing::debug!(req = debug(&req), "send_install_snapshot"); self.c().await?.raft().snapshot(req).await.map_err(|e| to_error(e, self.target)) } @@ -125,8 +120,8 @@ impl RaftNetwork for ExampleNetworkConnection { #[tracing::instrument(level = "debug", skip_all, err(Debug))] async fn send_vote( &mut self, - req: VoteRequest, - ) -> Result, RPCError>> { + req: VoteRequest, + ) -> Result, RPCError>> { tracing::debug!(req = debug(&req), "send_vote"); self.c().await?.raft().vote(req).await.map_err(|e| to_error(e, self.target)) } diff --git a/examples/raft-kv-rocksdb/src/store.rs b/examples/raft-kv-rocksdb/src/store.rs index 000f7dacc..7bc216888 100644 --- a/examples/raft-kv-rocksdb/src/store.rs +++ b/examples/raft-kv-rocksdb/src/store.rs @@ -35,9 +35,9 @@ use rocksdb::DB; use serde::Deserialize; use serde::Serialize; -use crate::ExampleNode; -use crate::ExampleNodeId; -use crate::ExampleTypeConfig; +use crate::Node; +use crate::NodeId; +use crate::TypeConfig; /** * Here you will set the types of request that will interact with the raft nodes. @@ -46,7 +46,7 @@ use crate::ExampleTypeConfig; * You will want to add any request that can write data in all nodes here. */ #[derive(Serialize, Deserialize, Debug, Clone)] -pub enum ExampleRequest { +pub enum Request { Set { key: String, value: String }, } @@ -59,13 +59,13 @@ pub enum ExampleRequest { * */ #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ExampleResponse { +pub struct Response { pub value: Option, } #[derive(Serialize, Deserialize, Debug)] -pub struct ExampleSnapshot { - pub meta: SnapshotMeta, +pub struct StoredSnapshot { + pub meta: SnapshotMeta, /// The data of the state machine at the time of this snapshot. pub data: Vec, @@ -79,16 +79,16 @@ pub struct ExampleSnapshot { */ #[derive(Serialize, Deserialize, Debug, Default, Clone)] pub struct SerializableExampleStateMachine { - pub last_applied_log: Option>, + pub last_applied_log: Option>, - pub last_membership: StoredMembership, + pub last_membership: StoredMembership, /// Application data. pub data: BTreeMap, } -impl From<&ExampleStateMachine> for SerializableExampleStateMachine { - fn from(state: &ExampleStateMachine) -> Self { +impl From<&StateMachine> for SerializableExampleStateMachine { + fn from(state: &StateMachine) -> Self { let mut data = BTreeMap::new(); for res in state.db.iterator_cf( state.db.cf_handle("data").expect("cf_handle"), @@ -111,20 +111,20 @@ impl From<&ExampleStateMachine> for SerializableExampleStateMachine { } #[derive(Debug, Clone)] -pub struct ExampleStateMachine { +pub struct StateMachine { /// Application data. pub db: Arc, } -fn sm_r_err(e: E) -> StorageError { +fn sm_r_err(e: E) -> StorageError { StorageIOError::read_state_machine(&e).into() } -fn sm_w_err(e: E) -> StorageError { +fn sm_w_err(e: E) -> StorageError { StorageIOError::write(&e).into() } -impl ExampleStateMachine { - fn get_last_membership(&self) -> StorageResult> { +impl StateMachine { + fn get_last_membership(&self) -> StorageResult> { self.db .get_cf( self.db.cf_handle("state_machine").expect("cf_handle"), @@ -137,7 +137,7 @@ impl ExampleStateMachine { .unwrap_or_else(|| Ok(StoredMembership::default())) }) } - fn set_last_membership(&self, membership: StoredMembership) -> StorageResult<()> { + fn set_last_membership(&self, membership: StoredMembership) -> StorageResult<()> { self.db .put_cf( self.db.cf_handle("state_machine").expect("cf_handle"), @@ -146,7 +146,7 @@ impl ExampleStateMachine { ) .map_err(sm_w_err) } - fn get_last_applied_log(&self) -> StorageResult>> { + fn get_last_applied_log(&self) -> StorageResult>> { self.db .get_cf( self.db.cf_handle("state_machine").expect("cf_handle"), @@ -155,7 +155,7 @@ impl ExampleStateMachine { .map_err(sm_r_err) .and_then(|value| value.map(|v| serde_json::from_slice(&v).map_err(sm_r_err)).transpose()) } - fn set_last_applied_log(&self, log_id: LogId) -> StorageResult<()> { + fn set_last_applied_log(&self, log_id: LogId) -> StorageResult<()> { self.db .put_cf( self.db.cf_handle("state_machine").expect("cf_handle"), @@ -177,7 +177,7 @@ impl ExampleStateMachine { Ok(r) } - fn new(db: Arc) -> ExampleStateMachine { + fn new(db: Arc) -> StateMachine { Self { db } } fn insert(&self, key: String, value: String) -> StorageResult<()> { @@ -195,13 +195,13 @@ impl ExampleStateMachine { } #[derive(Debug)] -pub struct ExampleStore { +pub struct Store { db: Arc, /// The Raft state machine. - pub state_machine: RwLock, + pub state_machine: RwLock, } -type StorageResult = Result>; +type StorageResult = Result>; /// converts an id to a byte vector for storing in the database. /// Note that we're using big endian encoding to ensure correct sorting of keys @@ -215,7 +215,7 @@ fn bin_to_id(buf: &[u8]) -> u64 { (&buf[0..8]).read_u64::().unwrap() } -impl ExampleStore { +impl Store { fn store(&self) -> &ColumnFamily { self.db.cf_handle("store").unwrap() } @@ -224,11 +224,7 @@ impl ExampleStore { self.db.cf_handle("logs").unwrap() } - fn flush( - &self, - subject: ErrorSubject, - verb: ErrorVerb, - ) -> Result<(), StorageIOError> { + fn flush(&self, subject: ErrorSubject, verb: ErrorVerb) -> Result<(), StorageIOError> { self.db.flush_wal(true).map_err(|e| StorageIOError::new(subject, verb, AnyError::new(&e)))?; Ok(()) } @@ -277,7 +273,7 @@ impl ExampleStore { Ok(()) } - fn set_vote_(&self, vote: &Vote) -> StorageResult<()> { + fn set_vote_(&self, vote: &Vote) -> StorageResult<()> { self.db .put_cf(self.store(), b"vote", serde_json::to_vec(vote).unwrap()) .map_err(|e| StorageError::IO { @@ -288,7 +284,7 @@ impl ExampleStore { Ok(()) } - fn get_vote_(&self) -> StorageResult>> { + fn get_vote_(&self) -> StorageResult>> { Ok(self .db .get_cf(self.store(), b"vote") @@ -298,7 +294,7 @@ impl ExampleStore { .and_then(|v| serde_json::from_slice(&v).ok())) } - fn get_current_snapshot_(&self) -> StorageResult> { + fn get_current_snapshot_(&self) -> StorageResult> { Ok(self .db .get_cf(self.store(), b"snapshot") @@ -308,7 +304,7 @@ impl ExampleStore { .and_then(|v| serde_json::from_slice(&v).ok())) } - fn set_current_snapshot_(&self, snap: ExampleSnapshot) -> StorageResult<()> { + fn set_current_snapshot_(&self, snap: StoredSnapshot) -> StorageResult<()> { self.db .put_cf(self.store(), b"snapshot", serde_json::to_vec(&snap).unwrap().as_slice()) .map_err(|e| StorageError::IO { @@ -320,11 +316,11 @@ impl ExampleStore { } #[async_trait] -impl RaftLogReader for Arc { - async fn get_log_state(&mut self) -> StorageResult> { +impl RaftLogReader for Arc { + async fn get_log_state(&mut self) -> StorageResult> { let last = self.db.iterator_cf(self.logs(), rocksdb::IteratorMode::End).next().and_then(|res| { let (_, ent) = res.unwrap(); - Some(serde_json::from_slice::>(&ent).ok()?.log_id) + Some(serde_json::from_slice::>(&ent).ok()?.log_id) }); let last_purged_log_id = self.get_last_purged_()?; @@ -342,7 +338,7 @@ impl RaftLogReader for Arc { async fn try_get_log_entries + Clone + Debug + Send + Sync>( &mut self, range: RB, - ) -> StorageResult>> { + ) -> StorageResult>> { let start = match range.start_bound() { std::ops::Bound::Included(x) => id_to_bin(*x), std::ops::Bound::Excluded(x) => id_to_bin(*x + 1), @@ -367,11 +363,9 @@ impl RaftLogReader for Arc { } #[async_trait] -impl RaftSnapshotBuilder>> for Arc { +impl RaftSnapshotBuilder>> for Arc { #[tracing::instrument(level = "trace", skip(self))] - async fn build_snapshot( - &mut self, - ) -> Result>>, StorageError> { + async fn build_snapshot(&mut self) -> Result>>, StorageError> { let data; let last_applied_log; let last_membership; @@ -401,7 +395,7 @@ impl RaftSnapshotBuilder>> for Arc>> for Arc for Arc { +impl RaftStorage for Arc { type SnapshotData = Cursor>; type LogReader = Self; type SnapshotBuilder = Self; #[tracing::instrument(level = "trace", skip(self))] - async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError> { + async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError> { self.set_vote_(vote) } - async fn read_vote(&mut self) -> Result>, StorageError> { + async fn read_vote(&mut self) -> Result>, StorageError> { self.get_vote_() } #[tracing::instrument(level = "trace", skip(self, entries))] async fn append_to_log(&mut self, entries: I) -> StorageResult<()> - where I: IntoIterator> + Send { + where I: IntoIterator> + Send { for entry in entries { let id = id_to_bin(entry.log_id.index); assert_eq!(bin_to_id(&id), entry.log_id.index); @@ -449,7 +443,7 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "debug", skip(self))] - async fn delete_conflict_logs_since(&mut self, log_id: LogId) -> StorageResult<()> { + async fn delete_conflict_logs_since(&mut self, log_id: LogId) -> StorageResult<()> { tracing::debug!("delete_log: [{:?}, +oo)", log_id); let from = id_to_bin(log_id.index); @@ -458,7 +452,7 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "debug", skip(self))] - async fn purge_logs_upto(&mut self, log_id: LogId) -> Result<(), StorageError> { + async fn purge_logs_upto(&mut self, log_id: LogId) -> Result<(), StorageError> { tracing::debug!("delete_log: [0, {:?}]", log_id); self.set_last_purged_(log_id)?; @@ -469,13 +463,7 @@ impl RaftStorage for Arc { async fn last_applied_state( &mut self, - ) -> Result< - ( - Option>, - StoredMembership, - ), - StorageError, - > { + ) -> Result<(Option>, StoredMembership), StorageError> { let state_machine = self.state_machine.read().await; Ok(( state_machine.get_last_applied_log()?, @@ -486,8 +474,8 @@ impl RaftStorage for Arc { #[tracing::instrument(level = "trace", skip(self, entries))] async fn apply_to_state_machine( &mut self, - entries: &[Entry], - ) -> Result, StorageError> { + entries: &[Entry], + ) -> Result, StorageError> { let mut res = Vec::with_capacity(entries.len()); let sm = self.state_machine.write().await; @@ -498,11 +486,11 @@ impl RaftStorage for Arc { sm.set_last_applied_log(entry.log_id)?; match entry.payload { - EntryPayload::Blank => res.push(ExampleResponse { value: None }), + EntryPayload::Blank => res.push(Response { value: None }), EntryPayload::Normal(ref req) => match req { - ExampleRequest::Set { key, value } => { + Request::Set { key, value } => { sm.insert(key.clone(), value.clone())?; - res.push(ExampleResponse { + res.push(Response { value: Some(value.clone()), }) } @@ -510,7 +498,7 @@ impl RaftStorage for Arc { EntryPayload::Membership(ref mem) => { sm.set_last_membership(StoredMembership::new(Some(entry.log_id), mem.clone()))?; - res.push(ExampleResponse { value: None }) + res.push(Response { value: None }) } }; } @@ -520,22 +508,22 @@ impl RaftStorage for Arc { } #[tracing::instrument(level = "trace", skip(self))] - async fn begin_receiving_snapshot(&mut self) -> Result, StorageError> { + async fn begin_receiving_snapshot(&mut self) -> Result, StorageError> { Ok(Box::new(Cursor::new(Vec::new()))) } #[tracing::instrument(level = "trace", skip(self, snapshot))] async fn install_snapshot( &mut self, - meta: &SnapshotMeta, + meta: &SnapshotMeta, snapshot: Box, - ) -> Result<(), StorageError> { + ) -> Result<(), StorageError> { tracing::info!( { snapshot_size = snapshot.get_ref().len() }, "decoding snapshot for installation" ); - let new_snapshot = ExampleSnapshot { + let new_snapshot = StoredSnapshot { meta: meta.clone(), data: snapshot.into_inner(), }; @@ -545,7 +533,7 @@ impl RaftStorage for Arc { let updated_state_machine: SerializableExampleStateMachine = serde_json::from_slice(&new_snapshot.data) .map_err(|e| StorageIOError::read_snapshot(new_snapshot.meta.signature(), &e))?; let mut state_machine = self.state_machine.write().await; - *state_machine = ExampleStateMachine::from_serializable(updated_state_machine, self.db.clone())?; + *state_machine = StateMachine::from_serializable(updated_state_machine, self.db.clone())?; } self.set_current_snapshot_(new_snapshot)?; @@ -555,8 +543,8 @@ impl RaftStorage for Arc { #[tracing::instrument(level = "trace", skip(self))] async fn get_current_snapshot( &mut self, - ) -> Result>, StorageError> { - match ExampleStore::get_current_snapshot_(self)? { + ) -> Result>, StorageError> { + match Store::get_current_snapshot_(self)? { Some(snapshot) => { let data = snapshot.data.clone(); Ok(Some(Snapshot { @@ -576,8 +564,8 @@ impl RaftStorage for Arc { self.clone() } } -impl ExampleStore { - pub(crate) async fn new>(db_path: P) -> Arc { +impl Store { + pub(crate) async fn new>(db_path: P) -> Arc { let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); db_opts.create_if_missing(true); @@ -590,7 +578,7 @@ impl ExampleStore { let db = DB::open_cf_descriptors(&db_opts, db_path, vec![store, state_machine, data, logs]).unwrap(); let db = Arc::new(db); - let state_machine = RwLock::new(ExampleStateMachine::new(db.clone())); - Arc::new(ExampleStore { db, state_machine }) + let state_machine = RwLock::new(StateMachine::new(db.clone())); + Arc::new(Store { db, state_machine }) } } diff --git a/examples/raft-kv-rocksdb/tests/cluster/test_cluster.rs b/examples/raft-kv-rocksdb/tests/cluster/test_cluster.rs index a7468bae0..406d18229 100644 --- a/examples/raft-kv-rocksdb/tests/cluster/test_cluster.rs +++ b/examples/raft-kv-rocksdb/tests/cluster/test_cluster.rs @@ -9,8 +9,8 @@ use maplit::btreemap; use maplit::btreeset; use raft_kv_rocksdb::client::ExampleClient; use raft_kv_rocksdb::start_example_raft_node; -use raft_kv_rocksdb::store::ExampleRequest; -use raft_kv_rocksdb::ExampleNode; +use raft_kv_rocksdb::store::Request; +use raft_kv_rocksdb::Node; use tracing_subscriber::EnvFilter; pub fn log_panic(panic: &PanicInfo) { @@ -126,9 +126,9 @@ async fn test_cluster() -> Result<(), Box> { x.membership_config.nodes().map(|(nid, node)| (*nid, node.clone())).collect::>(); assert_eq!( btreemap! { - 1 => ExampleNode{rpc_addr: get_rpc_addr(1), api_addr: get_addr(1)}, - 2 => ExampleNode{rpc_addr: get_rpc_addr(2), api_addr: get_addr(2)}, - 3 => ExampleNode{rpc_addr: get_rpc_addr(3), api_addr: get_addr(3)}, + 1 => Node{rpc_addr: get_rpc_addr(1), api_addr: get_addr(1)}, + 2 => Node{rpc_addr: get_rpc_addr(2), api_addr: get_addr(2)}, + 3 => Node{rpc_addr: get_rpc_addr(3), api_addr: get_addr(3)}, }, nodes_in_cluster ); @@ -164,7 +164,7 @@ async fn test_cluster() -> Result<(), Box> { println!("=== write `foo=bar`"); let _x = leader - .write(&ExampleRequest::Set { + .write(&Request::Set { key: "foo".to_string(), value: "bar".to_string(), }) @@ -194,7 +194,7 @@ async fn test_cluster() -> Result<(), Box> { println!("=== read `foo` on node 2"); let _x = client2 - .write(&ExampleRequest::Set { + .write(&Request::Set { key: "foo".to_string(), value: "wow".to_string(), }) @@ -227,7 +227,7 @@ async fn test_cluster() -> Result<(), Box> { match x { Err(e) => { let s = e.to_string(); - let expect_err:String = "error occur on remote peer 2: has to forward request to: Some(1), Some(ExampleNode { rpc_addr: \"127.0.0.1:22001\", api_addr: \"127.0.0.1:21001\" })".to_string(); + let expect_err:String = "error occur on remote peer 2: has to forward request to: Some(1), Some(Node { rpc_addr: \"127.0.0.1:22001\", api_addr: \"127.0.0.1:21001\" })".to_string(); assert_eq!(s, expect_err); } diff --git a/memstore/src/test.rs b/memstore/src/test.rs index e93f763e9..60bf9ffbd 100644 --- a/memstore/src/test.rs +++ b/memstore/src/test.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; +use openraft::storage::Adaptor; use openraft::testing::StoreBuilder; use openraft::testing::Suite; use openraft::StorageError; @@ -11,10 +12,13 @@ use crate::MemStore; struct MemBuilder {} #[async_trait] -impl StoreBuilder> for MemBuilder { - async fn build(&self) -> Result<((), Arc), StorageError> { +impl StoreBuilder>, Adaptor>> for MemBuilder { + async fn build( + &self, + ) -> Result<((), Adaptor>, Adaptor>), StorageError> { let store = MemStore::new_async().await; - Ok(((), store)) + let (log_store, sm) = Adaptor::new(store); + Ok(((), log_store, sm)) } } diff --git a/openraft/src/core/raft_core.rs b/openraft/src/core/raft_core.rs index faabaa253..df7f0aad0 100644 --- a/openraft/src/core/raft_core.rs +++ b/openraft/src/core/raft_core.rs @@ -4,6 +4,7 @@ use std::pin::Pin; use std::sync::atomic::Ordering; use std::sync::Arc; +use anyerror::AnyError; use futures::future::abortable; use futures::future::select; use futures::future::Either; @@ -81,26 +82,43 @@ use crate::replication::ReplicationHandle; use crate::replication::ReplicationResult; use crate::replication::ReplicationSessionId; use crate::runtime::RaftRuntime; +use crate::storage::LogFlushed; +use crate::storage::RaftLogReaderExt; +use crate::storage::RaftLogStorage; use crate::storage::RaftSnapshotBuilder; +use crate::storage::RaftStateMachine; use crate::versioned::Updatable; use crate::versioned::Versioned; use crate::ChangeMembers; use crate::LogId; use crate::Membership; use crate::MessageSummary; +use crate::Node; +use crate::NodeId; use crate::RPCTypes; use crate::RaftNetwork; use crate::RaftNetworkFactory; -use crate::RaftStorage; use crate::RaftTypeConfig; use crate::SnapshotId; use crate::SnapshotSegmentId; use crate::StorageError; -use crate::StorageHelper; use crate::StorageIOError; use crate::Update; use crate::Vote; +/// A temp struct to hold the data for a node that is being applied. +#[derive(Debug)] +pub(crate) struct ApplyingEntry { + log_id: LogId, + membership: Option>, +} + +impl ApplyingEntry { + fn new(log_id: LogId, membership: Option>) -> Self { + Self { log_id, membership } + } +} + /// Data for a Leader. /// /// It is created when RaftCore enters leader state, and will be dropped when it quits leader state. @@ -136,7 +154,13 @@ where SD: AsyncRead + AsyncSeek + Send + Unpin + 'static } /// The core type implementing the Raft protocol. -pub struct RaftCore, S: RaftStorage> { +pub struct RaftCore +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ /// This node's ID. pub(crate) id: C::NodeId, @@ -148,28 +172,37 @@ pub struct RaftCore, S: RaftStorage< /// The `RaftNetworkFactory` implementation. pub(crate) network: N, - /// The `RaftStorage` implementation. - pub(crate) storage: S, + /// The [`RaftLogStorage`] implementation. + pub(crate) log_store: LS, + + /// The [`RaftStateMachine`] implementation. + pub(crate) state_machine: SM, pub(crate) engine: Engine, - pub(crate) leader_data: Option>, + pub(crate) leader_data: Option>, /// The node's current snapshot state. - pub(crate) snapshot_state: snapshot_state::State, + pub(crate) snapshot_state: snapshot_state::State, /// Received snapshot that are ready to install. - pub(crate) received_snapshot: BTreeMap>, + pub(crate) received_snapshot: BTreeMap>, - pub(crate) tx_api: mpsc::UnboundedSender>, - pub(crate) rx_api: mpsc::UnboundedReceiver>, + pub(crate) tx_api: mpsc::UnboundedSender>, + pub(crate) rx_api: mpsc::UnboundedReceiver>, pub(crate) tx_metrics: watch::Sender>, pub(crate) span: Span, } -impl, S: RaftStorage> RaftCore { +impl RaftCore +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ /// The main loop of the Raft protocol. pub(crate) async fn main(mut self, rx_shutdown: oneshot::Receiver<()>) -> Result<(), Fatal> { let span = tracing::span!(parent: &self.span, Level::DEBUG, "main"); @@ -203,7 +236,6 @@ impl, S: RaftStorage> RaftCore, S: RaftStorage> RaftCore, S: RaftStorage> RaftCore, S: RaftStorage> RaftCore Option { + pub(crate) fn current_leader(&self) -> Option { tracing::debug!( self_id = display(self.id), vote = display(self.engine.state.vote_ref().summary()), @@ -775,6 +807,27 @@ impl, S: RaftStorage> RaftCore( + &mut self, + entries: I, + last_log_id: LogId, + ) -> Result<(), StorageError> + where + I: IntoIterator + Send, + { + tracing::debug!("append_to_log"); + + let (tx, rx) = oneshot::channel(); + let callback = LogFlushed::new(Some(last_log_id), tx); + self.log_store.append(entries, callback).await?; + rx.await + .map_err(|e| StorageIOError::write_logs(AnyError::error(e)))? + .map_err(|e| StorageIOError::write_logs(AnyError::error(e)))?; + Ok(()) + } + #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn apply_to_state_machine( &mut self, @@ -796,31 +849,38 @@ impl, S: RaftStorage> RaftCore(entries.as_slice())), "about to apply" ); + // Fake complain: avoid using `collect()` when not needed + #[allow(clippy::needless_collect)] + let applying_entries = entries + .iter() + .map(|e| ApplyingEntry::new(*e.get_log_id(), e.get_membership().cloned())) + .collect::>(); + // TODO: prepare response before apply_to_state_machine, // so that an Entry does not need to be Clone, // and no references will be used by apply_to_state_machine - let apply_results = self.storage.apply_to_state_machine(&entries).await?; - let last_applied = entries[entries.len() - 1].get_log_id(); + let last_applied = *entries[entries.len() - 1].get_log_id(); + let apply_results = self.state_machine.apply(entries).await?; + tracing::debug!(last_applied = display(last_applied), "update last_applied"); if let Some(l) = &mut self.leader_data { let mut results = apply_results.into_iter(); + let mut applying_entries = applying_entries.into_iter(); for log_index in since..end { - let tx = l.client_resp_channels.remove(&log_index); - - let i = log_index - since; - let entry = &entries[i as usize]; + let ent = applying_entries.next().unwrap(); let apply_res = results.next().unwrap(); + let tx = l.client_resp_channels.remove(&log_index); - Self::send_response(entry, apply_res, tx); + Self::send_response(ent, apply_res, tx); } } @@ -830,18 +890,18 @@ impl, S: RaftStorage> RaftCore>) { - tracing::debug!(entry = display(entry), "send_response"); + pub(super) fn send_response(entry: ApplyingEntry, resp: C::R, tx: Option>) { + tracing::debug!(entry = debug(&entry), "send_response"); let tx = match tx { None => return, Some(x) => x, }; - let membership = entry.get_membership().cloned(); + let membership = entry.membership; let res = Ok(ClientWriteResponse { - log_id: *entry.get_log_id(), + log_id: entry.log_id, data: resp, membership, }); @@ -860,7 +920,7 @@ impl, S: RaftStorage> RaftCore, - ) -> ReplicationHandle { + ) -> ReplicationHandle { // Safe unwrap(): target must be in membership let target_node = self.engine.state.membership_state.effective().get_node(&target).unwrap(); @@ -869,14 +929,14 @@ impl, S: RaftStorage> RaftCore::spawn( + ReplicationCore::::spawn( target, session_id, self.config.clone(), self.engine.state.committed().copied(), progress_entry.matching, network, - self.storage.get_log_reader().await, + self.log_store.get_log_reader().await, self.tx_api.clone(), tracing::span!(parent: &self.span, Level::DEBUG, "replication", id=display(self.id), target=display(target)), ) @@ -911,9 +971,7 @@ impl, S: RaftStorage> RaftCore, S: RaftStorage> RaftCore { #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn run_engine_commands(&mut self) -> Result<(), StorageError> { if tracing::enabled!(Level::DEBUG) { @@ -938,7 +996,7 @@ impl, S: RaftStorage> RaftCore, &str> = { + let msg_res: Result, &str> = { let recv = self.rx_api.recv(); pin_mut!(recv); @@ -1059,7 +1117,7 @@ impl, S: RaftStorage> RaftCore) -> Result<(), Fatal> { + pub(crate) async fn handle_api_msg(&mut self, msg: RaftMsg) -> Result<(), Fatal> { tracing::debug!("recv from rx_api: {}", msg.summary()); match msg { @@ -1113,7 +1171,7 @@ impl, S: RaftStorage> RaftCore { - req(&self.engine.state, &mut self.storage, &mut self.network); + req(&self.engine.state, &mut self.log_store, &mut self.network); } RaftMsg::ExternalCommand { cmd } => { match cmd { @@ -1353,7 +1411,13 @@ impl, S: RaftStorage> RaftCore, S: RaftStorage> RaftRuntime for RaftCore { +impl RaftRuntime for RaftCore +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ async fn run_command<'e>( &mut self, cmd: Command, @@ -1377,24 +1441,26 @@ impl, S: RaftStorage> RaftRuntime self.leader_data = None; } Command::AppendEntry { entry } => { + let log_id = *entry.get_log_id(); tracing::debug!("AppendEntry: {}", &entry); - self.storage.append_to_log([entry]).await? + self.append_to_log([entry], log_id).await? } Command::AppendInputEntries { entries } => { + let last_log_id = *entries.last().unwrap().get_log_id(); tracing::debug!("AppendInputEntries: {}", DisplaySlice::<_>(&entries),); - self.storage.append_to_log(entries).await? + self.append_to_log(entries, last_log_id).await? } Command::AppendBlankLog { log_id } => { let ent = C::Entry::new_blank(log_id); let entries = [ent]; - self.storage.append_to_log(entries).await? + self.append_to_log(entries, log_id).await? } Command::SaveVote { vote } => { - self.storage.save_vote(&vote).await?; + self.log_store.save_vote(&vote).await?; } - Command::PurgeLog { upto } => self.storage.purge_logs_upto(upto).await?, + Command::PurgeLog { upto } => self.log_store.purge(upto).await?, Command::DeleteConflictLog { since } => { - self.storage.delete_conflict_logs_since(since).await?; + self.log_store.truncate(since).await?; } // TODO(2): Engine initiate a snapshot building Command::BuildSnapshot { .. } => {} @@ -1434,7 +1500,7 @@ impl, S: RaftStorage> RaftRuntime let _ = node.tx_repl.send(Replicate::logs(id, log_id_range)); } Inflight::Snapshot { id, last_log_id } => { - let snapshot = self.storage.get_current_snapshot().await?; + let snapshot = self.state_machine.get_current_snapshot().await?; tracing::debug!("snapshot: {}", snapshot.as_ref().map(|x| &x.meta).summary()); if let Some(snapshot) = snapshot { @@ -1474,7 +1540,7 @@ impl, S: RaftStorage> RaftRuntime let data = self.received_snapshot.remove(&snapshot_meta.snapshot_id).unwrap(); tracing::info!("Start to install_snapshot, meta: {:?}", snapshot_meta); - self.storage.install_snapshot(&snapshot_meta, data).await?; + self.state_machine.install_snapshot(&snapshot_meta, data).await?; tracing::info!("Done install_snapshot, meta: {:?}", snapshot_meta); } Command::Respond { resp: send } => { diff --git a/openraft/src/core/tick.rs b/openraft/src/core/tick.rs index 6c34f0874..8e0ce0ea5 100644 --- a/openraft/src/core/tick.rs +++ b/openraft/src/core/tick.rs @@ -14,20 +14,20 @@ use tracing::Level; use tracing::Span; use crate::raft::RaftMsg; +use crate::storage::RaftLogStorage; use crate::RaftNetworkFactory; -use crate::RaftStorage; use crate::RaftTypeConfig; /// Emit RaftMsg::Tick event at regular `interval`. -pub(crate) struct Tick +pub(crate) struct Tick where C: RaftTypeConfig, N: RaftNetworkFactory, - S: RaftStorage, + LS: RaftLogStorage, { interval: Duration, - tx: mpsc::UnboundedSender>, + tx: mpsc::UnboundedSender>, /// Emit event or not enabled: Arc, @@ -38,13 +38,13 @@ pub(crate) struct TickHandle { join_handle: JoinHandle<()>, } -impl Tick +impl Tick where C: RaftTypeConfig, N: RaftNetworkFactory, - S: RaftStorage, + LS: RaftLogStorage, { - pub(crate) fn spawn(interval: Duration, tx: mpsc::UnboundedSender>, enabled: bool) -> TickHandle { + pub(crate) fn spawn(interval: Duration, tx: mpsc::UnboundedSender>, enabled: bool) -> TickHandle { let enabled = Arc::new(AtomicBool::from(enabled)); let this = Self { interval, diff --git a/openraft/src/engine/log_id_list.rs b/openraft/src/engine/log_id_list.rs index e095b9bbb..0065e8ed3 100644 --- a/openraft/src/engine/log_id_list.rs +++ b/openraft/src/engine/log_id_list.rs @@ -1,9 +1,8 @@ use crate::log_id::RaftLogId; -use crate::storage::StorageHelper; +use crate::storage::RaftLogReaderExt; use crate::LogId; use crate::LogIdOptionExt; use crate::NodeId; -use crate::RaftStorage; use crate::RaftTypeConfig; use crate::StorageError; @@ -43,14 +42,14 @@ where NID: NodeId /// A-------B-------C : find(A,B); find(B,C) // both find `B`, need to de-dup /// A-------C-------C : find(A,C) /// ``` - pub(crate) async fn load_log_ids( + pub(crate) async fn load_log_ids( last_purged_log_id: Option>, last_log_id: Option>, - sto: &mut StorageHelper<'_, C, Sto>, + sto: &mut LRX, ) -> Result, StorageError> where C: RaftTypeConfig, - Sto: RaftStorage, + LRX: RaftLogReaderExt, { let mut res = vec![]; diff --git a/openraft/src/raft.rs b/openraft/src/raft.rs index 4b268aa47..71ab32144 100644 --- a/openraft/src/raft.rs +++ b/openraft/src/raft.rs @@ -3,6 +3,7 @@ use std::collections::BTreeMap; use std::fmt::Debug; use std::fmt::Display; +use std::marker::PhantomData; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; @@ -43,6 +44,8 @@ use crate::metrics::Wait; use crate::node::Node; use crate::replication::ReplicationResult; use crate::replication::ReplicationSessionId; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; use crate::AppData; use crate::AppDataResponse; use crate::ChangeMembers; @@ -52,7 +55,6 @@ use crate::MessageSummary; use crate::NodeId; use crate::RaftNetworkFactory; use crate::RaftState; -use crate::RaftStorage; use crate::SnapshotMeta; use crate::StorageHelper; use crate::Vote; @@ -138,18 +140,21 @@ where NID: NodeId Done(Result<(), Fatal>), } -struct RaftInner, S: RaftStorage> { +struct RaftInner +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, +{ id: C::NodeId, config: Arc, runtime_config: Arc, tick_handle: TickHandle, - tx_api: mpsc::UnboundedSender>, + tx_api: mpsc::UnboundedSender>, rx_metrics: watch::Receiver>, // TODO(xp): it does not need to be a async mutex. #[allow(clippy::type_complexity)] tx_shutdown: Mutex>>, - marker_n: std::marker::PhantomData, - marker_s: std::marker::PhantomData, core_state: Mutex>, } @@ -176,11 +181,24 @@ struct RaftInner, S: RaftStorage> /// application needs to shutdown the Raft node for any reason, calling `shutdown` will do the /// trick. #[derive(Clone)] -pub struct Raft, S: RaftStorage> { - inner: Arc>, +pub struct Raft +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ + inner: Arc>, + _p: PhantomData, } -impl, S: RaftStorage> Raft { +impl Raft +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ /// Create and spawn a new Raft task. /// /// ### `id` @@ -200,8 +218,14 @@ impl, S: RaftStorage> Raft, network: N, mut storage: S) -> Result> { + #[tracing::instrument(level="debug", skip_all, fields(cluster=%config.cluster_name))] + pub async fn new( + id: C::NodeId, + config: Arc, + network: N, + mut log_store: LS, + mut state_machine: SM, + ) -> Result> { let (tx_api, rx_api) = mpsc::unbounded_channel(); let (tx_metrics, rx_metrics) = watch::channel(RaftMetrics::new_initial(id)); let (tx_shutdown, rx_shutdown) = oneshot::channel(); @@ -225,12 +249,12 @@ impl, S: RaftStorage> Raft, S: RaftStorage> Raft, S: RaftStorage> Raft, S: RaftStorage> Raft( &self, - mes: RaftMsg, + mes: RaftMsg, rx: oneshot::Receiver>, ) -> Result> where @@ -741,7 +767,10 @@ impl, S: RaftStorage> Raft, &mut S, &mut N) + Send + 'static>(&self, req: F) { + pub fn external_request, &mut LS, &mut N) + Send + 'static>( + &self, + req: F, + ) { let _ignore_error = self.inner.tx_api.send(RaftMsg::ExternalRequest { req: Box::new(req) }); } @@ -813,7 +842,12 @@ pub(crate) type ClientWriteTx = ResultSender, ClientWriteError<::NodeId, ::Node>>; /// A message coming from the Raft API. -pub(crate) enum RaftMsg, S: RaftStorage> { +pub(crate) enum RaftMsg +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, +{ AppendEntries { rpc: AppendEntriesRequest, tx: AppendEntriesTx, @@ -879,7 +913,7 @@ pub(crate) enum RaftMsg, S: RaftStor ExternalRequest { #[allow(clippy::type_complexity)] - req: Box, &mut S, &mut N) + Send + 'static>, + req: Box, &mut LS, &mut N) + Send + 'static>, }, ExternalCommand { @@ -935,11 +969,11 @@ pub(crate) enum RaftMsg, S: RaftStor ReplicationFatal, } -impl MessageSummary> for RaftMsg +impl MessageSummary> for RaftMsg where C: RaftTypeConfig, N: RaftNetworkFactory, - S: RaftStorage, + LS: RaftLogStorage, { fn summary(&self) -> String { match self { diff --git a/openraft/src/replication/mod.rs b/openraft/src/replication/mod.rs index f57895c9e..6652e7056 100644 --- a/openraft/src/replication/mod.rs +++ b/openraft/src/replication/mod.rs @@ -32,6 +32,8 @@ use crate::raft::AppendEntriesResponse; use crate::raft::InstallSnapshotRequest; use crate::raft::RaftMsg; use crate::storage::RaftLogReader; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; use crate::storage::Snapshot; use crate::ErrorSubject; use crate::ErrorVerb; @@ -42,7 +44,6 @@ use crate::NodeId; use crate::RPCTypes; use crate::RaftNetwork; use crate::RaftNetworkFactory; -use crate::RaftStorage; use crate::RaftTypeConfig; use crate::ToStorageResult; @@ -65,7 +66,13 @@ where /// NOTE: we do not stack replication requests to targets because this could result in /// out-of-order delivery. We always buffer until we receive a success response, then send the /// next payload from the buffer. -pub(crate) struct ReplicationCore, S: RaftStorage> { +pub(crate) struct ReplicationCore +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ /// The ID of the target Raft node which replication events are to be sent to. target: C::NodeId, @@ -74,16 +81,16 @@ pub(crate) struct ReplicationCore, S /// A channel for sending events to the RaftCore. #[allow(clippy::type_complexity)] - tx_raft_core: mpsc::UnboundedSender>, + tx_raft_core: mpsc::UnboundedSender>, /// A channel for receiving events from the RaftCore. - rx_repl: mpsc::UnboundedReceiver>, + rx_repl: mpsc::UnboundedReceiver>, /// The `RaftNetwork` interface. network: N::Network, /// The `RaftLogReader` of a `RaftStorage` interface. - log_reader: S::LogReader, + log_reader: LS::LogReader, /// The Raft's runtime config. config: Arc, @@ -95,10 +102,16 @@ pub(crate) struct ReplicationCore, S matching: Option>, /// Next replication action to run. - next_action: Option>, + next_action: Option>, } -impl, S: RaftStorage> ReplicationCore { +impl ReplicationCore +where + C: RaftTypeConfig, + N: RaftNetworkFactory, + LS: RaftLogStorage, + SM: RaftStateMachine, +{ /// Spawn a new replication task for the target node. #[tracing::instrument(level = "trace", skip_all,fields(target=display(target), session_id=display(session_id)))] #[allow(clippy::type_complexity)] @@ -110,10 +123,10 @@ impl, S: RaftStorage> Replication committed: Option>, matching: Option>, network: N::Network, - log_reader: S::LogReader, - tx_raft_core: mpsc::UnboundedSender>, + log_reader: LS::LogReader, + tx_raft_core: mpsc::UnboundedSender>, span: tracing::Span, - ) -> ReplicationHandle { + ) -> ReplicationHandle { tracing::debug!( session_id = display(&session_id), target = display(&target), @@ -405,7 +418,7 @@ impl, S: RaftStorage> Replication } #[tracing::instrument(level = "trace", skip_all)] - pub fn process_event(&mut self, event: Replicate) { + pub fn process_event(&mut self, event: Replicate) { tracing::debug!(event=%event.summary(), "process_event"); match event { @@ -433,6 +446,104 @@ impl, S: RaftStorage> Replication } } } + + #[tracing::instrument(level = "trace", skip(self, snapshot))] + async fn stream_snapshot( + &mut self, + id: u64, + mut snapshot: Snapshot, + ) -> Result<(), ReplicationError> { + tracing::debug!(id = display(id), snapshot = debug(&snapshot.meta), "stream_snapshot",); + + let err_x = || (ErrorSubject::Snapshot(snapshot.meta.signature()), ErrorVerb::Read); + + let mut offset = 0; + let end = snapshot.snapshot.seek(SeekFrom::End(0)).await.sto_res(err_x)?; + let mut buf = Vec::with_capacity(self.config.snapshot_max_chunk_size as usize); + + loop { + // Build the RPC. + snapshot.snapshot.seek(SeekFrom::Start(offset)).await.sto_res(err_x)?; + let n_read = snapshot.snapshot.read_buf(&mut buf).await.sto_res(err_x)?; + + let done = (offset + n_read as u64) == end; + let req = InstallSnapshotRequest { + vote: self.session_id.vote, + meta: snapshot.meta.clone(), + offset, + data: Vec::from(&buf[..n_read]), + done, + }; + buf.clear(); + + // Send the RPC over to the target. + tracing::debug!( + snapshot_size = req.data.len(), + req.offset, + end, + req.done, + "sending snapshot chunk" + ); + + let snap_timeout = if done { + self.config.install_snapshot_timeout() + } else { + self.config.send_snapshot_timeout() + }; + + let res = timeout(snap_timeout, self.network.send_install_snapshot(req)).await; + + let res = match res { + Ok(outer_res) => match outer_res { + Ok(res) => res, + Err(err) => { + tracing::warn!(error=%err, "error sending InstallSnapshot RPC to target"); + + // Sleep a short time otherwise in test environment it is a dead-loop that + // never yields. Because network implementation does + // not yield. + sleep(Duration::from_millis(10)).await; + continue; + } + }, + Err(err) => { + tracing::warn!(error=%err, "timeout while sending InstallSnapshot RPC to target"); + + // Sleep a short time otherwise in test environment it is a dead-loop that never + // yields. Because network implementation does not yield. + sleep(Duration::from_millis(10)).await; + continue; + } + }; + + // Handle response conditions. + if res.vote > self.session_id.vote { + return Err(ReplicationError::HigherVote(HigherVote { + higher: res.vote, + mine: self.session_id.vote, + })); + } + + // If we just sent the final chunk of the snapshot, then transition to lagging state. + if done { + tracing::debug!( + "done install snapshot: snapshot last_log_id: {:?}, matching: {}", + snapshot.meta.last_log_id, + self.matching.summary(), + ); + + self.update_matching(id, snapshot.meta.last_log_id); + + return Ok(()); + } + + // Everything is good, so update offset for sending the next chunk. + offset += n_read as u64; + + // Check raft channel to ensure we are staying up-to-date, then loop. + self.try_drain_events().await?; + } + } } /// Request to replicate a chunk of data, logs or snapshot. @@ -577,103 +688,3 @@ where } } } - -impl, S: RaftStorage> ReplicationCore { - #[tracing::instrument(level = "trace", skip(self, snapshot))] - async fn stream_snapshot( - &mut self, - id: u64, - mut snapshot: Snapshot, - ) -> Result<(), ReplicationError> { - tracing::debug!(id = display(id), snapshot = debug(&snapshot.meta), "stream_snapshot",); - - let err_x = || (ErrorSubject::Snapshot(snapshot.meta.signature()), ErrorVerb::Read); - - let mut offset = 0; - let end = snapshot.snapshot.seek(SeekFrom::End(0)).await.sto_res(err_x)?; - let mut buf = Vec::with_capacity(self.config.snapshot_max_chunk_size as usize); - - loop { - // Build the RPC. - snapshot.snapshot.seek(SeekFrom::Start(offset)).await.sto_res(err_x)?; - let n_read = snapshot.snapshot.read_buf(&mut buf).await.sto_res(err_x)?; - - let done = (offset + n_read as u64) == end; - let req = InstallSnapshotRequest { - vote: self.session_id.vote, - meta: snapshot.meta.clone(), - offset, - data: Vec::from(&buf[..n_read]), - done, - }; - buf.clear(); - - // Send the RPC over to the target. - tracing::debug!( - snapshot_size = req.data.len(), - req.offset, - end, - req.done, - "sending snapshot chunk" - ); - - let snap_timeout = if done { - self.config.install_snapshot_timeout() - } else { - self.config.send_snapshot_timeout() - }; - - let res = timeout(snap_timeout, self.network.send_install_snapshot(req)).await; - - let res = match res { - Ok(outer_res) => match outer_res { - Ok(res) => res, - Err(err) => { - tracing::warn!(error=%err, "error sending InstallSnapshot RPC to target"); - - // Sleep a short time otherwise in test environment it is a dead-loop that - // never yields. Because network implementation does - // not yield. - sleep(Duration::from_millis(10)).await; - continue; - } - }, - Err(err) => { - tracing::warn!(error=%err, "timeout while sending InstallSnapshot RPC to target"); - - // Sleep a short time otherwise in test environment it is a dead-loop that never - // yields. Because network implementation does not yield. - sleep(Duration::from_millis(10)).await; - continue; - } - }; - - // Handle response conditions. - if res.vote > self.session_id.vote { - return Err(ReplicationError::HigherVote(HigherVote { - higher: res.vote, - mine: self.session_id.vote, - })); - } - - // If we just sent the final chunk of the snapshot, then transition to lagging state. - if done { - tracing::debug!( - "done install snapshot: snapshot last_log_id: {:?}, matching: {}", - snapshot.meta.last_log_id, - self.matching.summary(), - ); - - self.update_matching(id, snapshot.meta.last_log_id); - - return Ok(()); - } - - // Everything is good, so update offset for sending the next chunk. - offset += n_read as u64; - - // Check raft channel to ensure we are staying up-to-date, then loop. - self.try_drain_events().await?; - } - } -} diff --git a/openraft/src/storage/adapter.rs b/openraft/src/storage/adapter.rs new file mode 100644 index 000000000..709aa46e4 --- /dev/null +++ b/openraft/src/storage/adapter.rs @@ -0,0 +1,207 @@ +use std::fmt::Debug; +use std::marker::PhantomData; +use std::ops::DerefMut; +use std::ops::RangeBounds; +use std::sync::Arc; + +use async_trait::async_trait; +use tokio::sync::RwLock; +use tokio::sync::RwLockReadGuard; +use tokio::sync::RwLockWriteGuard; + +use crate::storage::v2::sealed::Sealed; +use crate::storage::LogFlushed; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; +use crate::LogId; +use crate::LogState; +use crate::RaftLogReader; +use crate::RaftStorage; +use crate::RaftTypeConfig; +use crate::Snapshot; +use crate::SnapshotMeta; +use crate::StorageError; +use crate::StoredMembership; +use crate::Vote; + +/// An adapter that allows an implementation of [`RaftStorage`] to be used in the latest framework. +/// +/// It hide an implementation of [`RaftStorage`] behind a RWLock. +/// Therefore, it provides full functionalities but without any parallelism. +/// +/// `Adaptor` implements both [`RaftLogStorage`] and [`RaftStateMachine`], +/// and just pass the calls to the underlying [`RaftStorage`]. +/// +/// To use the old [`RaftStorage`] implementation in the latest framework: +/// ```ignore +/// # use std::sync::Arc; +/// # use openraft::{Config, Raft}; +/// # use openraft::storage::Adaptor; +/// +/// let store = MyRaftStorage::new(); +/// let (log_store, state_machine) = Adaptor::new(store); +/// Raft::new(1, Arc::new(Config::default()), MyNetwork::default(), log_store, state_machine); +/// ``` +#[derive(Debug, Clone)] +pub struct Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ + storage: Arc>, + _phantom: PhantomData, +} + +impl Default for Adaptor +where + C: RaftTypeConfig, + S: RaftStorage + Default, +{ + fn default() -> Self { + Self::create(Arc::new(RwLock::new(S::default()))) + } +} + +impl Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ + /// Create a [`RaftLogStorage`] and a [`RaftStateMachine`] upon an implementation of + /// [`RaftStorage`]. + pub fn new(store: S) -> (Self, Self) { + let s = Arc::new(RwLock::new(store)); + + let log_store = Adaptor::create(s.clone()); + let state_machine = Adaptor::create(s); + + (log_store, state_machine) + } + + fn create(storage: Arc>) -> Self { + Self { + storage, + _phantom: PhantomData::default(), + } + } + + // TODO(1): make it private because only tests need it. + // rewrite memstore with separated log-store and state machine. + /// Get a write lock of the underlying storage. + pub async fn storage_mut(&self) -> RwLockWriteGuard { + self.storage.write().await + } + + /// Get a read lock of the underlying storage. + pub async fn storage(&self) -> RwLockReadGuard { + self.storage.read().await + } +} + +#[async_trait] +impl RaftLogReader for Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ + async fn get_log_state(&mut self) -> Result, StorageError> { + S::get_log_state(self.storage_mut().await.deref_mut()).await + } + + async fn try_get_log_entries + Clone + Debug + Send + Sync>( + &mut self, + range: RB, + ) -> Result, StorageError> { + S::try_get_log_entries(self.storage_mut().await.deref_mut(), range).await + } +} + +impl Sealed for Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ +} + +#[async_trait] +impl RaftLogStorage for Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ + type LogReader = S::LogReader; + + async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError> { + S::save_vote(self.storage_mut().await.deref_mut(), vote).await + } + + async fn read_vote(&mut self) -> Result>, StorageError> { + S::read_vote(self.storage_mut().await.deref_mut()).await + } + + async fn get_log_reader(&mut self) -> Self::LogReader { + S::get_log_reader(self.storage_mut().await.deref_mut()).await + } + + async fn append(&mut self, entries: I, callback: LogFlushed) -> Result<(), StorageError> + where I: IntoIterator + Send { + // Default implementation that calls the flush-before-return `append_to_log`. + + S::append_to_log(self.storage_mut().await.deref_mut(), entries).await?; + callback.log_io_completed(Ok(())); + + Ok(()) + } + + async fn truncate(&mut self, log_id: LogId) -> Result<(), StorageError> { + S::delete_conflict_logs_since(self.storage_mut().await.deref_mut(), log_id).await + } + + async fn purge(&mut self, log_id: LogId) -> Result<(), StorageError> { + S::purge_logs_upto(self.storage_mut().await.deref_mut(), log_id).await + } +} + +#[async_trait] +impl RaftStateMachine for Adaptor +where + C: RaftTypeConfig, + S: RaftStorage, +{ + type SnapshotData = S::SnapshotData; + type SnapshotBuilder = S::SnapshotBuilder; + + async fn applied_state( + &mut self, + ) -> Result<(Option>, StoredMembership), StorageError> { + S::last_applied_state(self.storage_mut().await.deref_mut()).await + } + + async fn apply(&mut self, entries: I) -> Result, StorageError> + where I: IntoIterator + Send { + let entries = entries.into_iter().collect::>(); + S::apply_to_state_machine(self.storage_mut().await.deref_mut(), &entries).await + } + + async fn get_snapshot_builder(&mut self) -> Self::SnapshotBuilder { + S::get_snapshot_builder(self.storage_mut().await.deref_mut()).await + } + + async fn begin_receiving_snapshot(&mut self) -> Result, StorageError> { + S::begin_receiving_snapshot(self.storage_mut().await.deref_mut()).await + } + + async fn install_snapshot( + &mut self, + meta: &SnapshotMeta, + snapshot: Box, + ) -> Result<(), StorageError> { + S::install_snapshot(self.storage_mut().await.deref_mut(), meta, snapshot).await + } + + async fn get_current_snapshot( + &mut self, + ) -> Result>, StorageError> { + S::get_current_snapshot(self.storage_mut().await.deref_mut()).await + } +} diff --git a/openraft/src/storage/callback.rs b/openraft/src/storage/callback.rs new file mode 100644 index 000000000..a0bda0895 --- /dev/null +++ b/openraft/src/storage/callback.rs @@ -0,0 +1,92 @@ +//! Callbacks used by Storage API + +use std::io; + +use tokio::sync::oneshot; + +use crate::display_ext::DisplayOption; +use crate::LogId; +use crate::NodeId; +use crate::RaftTypeConfig; +use crate::StorageIOError; + +/// A oneshot callback for completion of log io operation. +pub struct LogFlushed +where NID: NodeId +{ + last_log_id: Option>, + tx: oneshot::Sender>, io::Error>>, +} + +impl LogFlushed +where NID: NodeId +{ + pub(crate) fn new( + last_log_id: Option>, + tx: oneshot::Sender>, io::Error>>, + ) -> Self { + Self { last_log_id, tx } + } + + /// Report log io completion event. + /// + /// It will be called when the log is successfully appended to the storage or an error occurs. + pub fn log_io_completed(self, result: Result<(), io::Error>) { + let res = if let Err(e) = result { + tracing::error!( + "LogFlush error: {}, while flushing upto {}", + e, + DisplayOption(&self.last_log_id) + ); + self.tx.send(Err(e)) + } else { + self.tx.send(Ok(self.last_log_id)) + }; + + if let Err(e) = res { + tracing::error!("failed to send log io completion event: {:?}", e); + } + } +} + +/// A oneshot callback for completion of applying logs to state machine. +pub struct LogApplied +where C: RaftTypeConfig +{ + last_log_id: LogId, + tx: oneshot::Sender, Vec), StorageIOError>>, +} + +impl LogApplied +where C: RaftTypeConfig +{ + #[allow(dead_code)] + pub(crate) fn new( + last_log_id: LogId, + tx: oneshot::Sender, Vec), StorageIOError>>, + ) -> Self { + Self { last_log_id, tx } + } + + /// Report apply io completion event. + /// + /// It will be called when the log is successfully applied to the state machine or an error + /// occurs. + pub fn completed(self, result: Result, StorageIOError>) { + let res = match result { + Ok(x) => { + tracing::debug!("LogApplied upto {}", self.last_log_id); + let resp = (self.last_log_id, x); + self.tx.send(Ok(resp)) + } + Err(e) => { + tracing::error!("LogApplied error: {}, while applying upto {}", e, self.last_log_id); + self.tx.send(Err(e)) + } + }; + + if let Err(_e) = res { + tracing::error!("failed to send apply complete event, last_log_id: {}", self.last_log_id); + } + } +} diff --git a/openraft/src/storage/helper.rs b/openraft/src/storage/helper.rs index c8db71fd1..fe47de630 100644 --- a/openraft/src/storage/helper.rs +++ b/openraft/src/storage/helper.rs @@ -1,45 +1,47 @@ -use std::fmt::Debug; use std::marker::PhantomData; -use std::ops::RangeBounds; use std::sync::Arc; use tokio::time::Instant; -use crate::defensive::check_range_matches_entries; use crate::engine::LogIdList; use crate::entry::RaftPayload; use crate::log_id::RaftLogId; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; use crate::utime::UTime; use crate::EffectiveMembership; -use crate::LogId; use crate::LogIdOptionExt; use crate::MembershipState; use crate::RaftState; -use crate::RaftStorage; use crate::RaftTypeConfig; use crate::StorageError; use crate::StoredMembership; -/// StorageHelper provides additional methods to access a [`RaftStorage`] implementation. -pub struct StorageHelper<'a, C, Sto> +/// StorageHelper provides additional methods to access a [`RaftLogStorage`] and +/// [`RaftStateMachine`] implementation. +pub struct StorageHelper<'a, C, LS, SM> where C: RaftTypeConfig, - Sto: RaftStorage, + LS: RaftLogStorage, + SM: RaftStateMachine, { - pub(crate) sto: &'a mut Sto, + pub(crate) log_store: &'a mut LS, + pub(crate) state_machine: &'a mut SM, _p: PhantomData, } -impl<'a, C, Sto> StorageHelper<'a, C, Sto> +impl<'a, C, LS, SM> StorageHelper<'a, C, LS, SM> where C: RaftTypeConfig, - Sto: RaftStorage, + LS: RaftLogStorage, + SM: RaftStateMachine, { /// Creates a new `StorageHelper` that provides additional functions based on the underlying - /// [`RaftStorage`] implementation. - pub fn new(sto: &'a mut Sto) -> Self { + /// [`RaftLogStorage`] and [`RaftStateMachine`] implementation. + pub fn new(sto: &'a mut LS, sm: &'a mut SM) -> Self { Self { - sto, + log_store: sto, + state_machine: sm, _p: Default::default(), } } @@ -53,23 +55,23 @@ where /// When the Raft node is first started, it will call this interface to fetch the last known /// state from stable storage. pub async fn get_initial_state(&mut self) -> Result, StorageError> { - let vote = self.sto.read_vote().await?; - let st = self.sto.get_log_state().await?; + let vote = self.log_store.read_vote().await?; + let st = self.log_store.get_log_state().await?; let mut last_purged_log_id = st.last_purged_log_id; let mut last_log_id = st.last_log_id; - let (last_applied, _) = self.sto.last_applied_state().await?; + let (last_applied, _) = self.state_machine.applied_state().await?; let mem_state = self.get_membership().await?; // Clean up dirty state: snapshot is installed but logs are not cleaned. if last_log_id < last_applied { - self.sto.purge_logs_upto(last_applied.unwrap()).await?; + self.log_store.purge(last_applied.unwrap()).await?; last_log_id = last_applied; last_purged_log_id = last_applied; } - let log_ids = LogIdList::load_log_ids(last_purged_log_id, last_log_id, self).await?; + let log_ids = LogIdList::load_log_ids(last_purged_log_id, last_log_id, self.log_store).await?; - let snapshot_meta = self.sto.get_current_snapshot().await?.map(|x| x.meta).unwrap_or_default(); + let snapshot_meta = self.state_machine.get_current_snapshot().await?.map(|x| x.meta).unwrap_or_default(); let now = Instant::now(); @@ -90,19 +92,6 @@ where }) } - /// Get the log id of the entry at `index`. - pub async fn get_log_id(&mut self, log_index: u64) -> Result, StorageError> { - let st = self.sto.get_log_state().await?; - - if Some(log_index) == st.last_purged_log_id.index() { - return Ok(st.last_purged_log_id.unwrap()); - } - - let entries = self.get_log_entries(log_index..=log_index).await?; - - Ok(*entries[0].get_log_id()) - } - /// Returns the last 2 membership config found in log or state machine. /// /// A raft node needs to store at most 2 membership config log: @@ -120,7 +109,7 @@ where /// /// Thus a raft node will only need to store at most two recent membership logs. pub async fn get_membership(&mut self) -> Result, StorageError> { - let (_, sm_mem) = self.sto.last_applied_state().await?; + let (_, sm_mem) = self.state_machine.applied_state().await?; let sm_mem_next_index = sm_mem.log_id().next_index(); @@ -154,12 +143,12 @@ where /// This method returns at most membership logs with greatest log index which is /// `>=since_index`. If no such membership log is found, it returns `None`, e.g., when logs /// are cleaned after being applied. - #[tracing::instrument(level = "trace", skip(self))] + #[tracing::instrument(level = "trace", skip_all)] pub async fn last_membership_in_log( &mut self, since_index: u64, ) -> Result>, StorageError> { - let st = self.sto.get_log_state().await?; + let st = self.log_store.get_log_state().await?; let mut end = st.last_log_id.next_index(); let start = std::cmp::max(st.last_purged_log_id.next_index(), since_index); @@ -169,7 +158,7 @@ where while start < end { let step_start = std::cmp::max(start, end.saturating_sub(step)); - let entries = self.sto.try_get_log_entries(step_start..end).await?; + let entries = self.log_store.try_get_log_entries(step_start..end).await?; for ent in entries.iter().rev() { if let Some(mem) = ent.get_membership() { @@ -186,27 +175,4 @@ where Ok(res) } - - /// Try to get an log entry. - /// - /// It does not return an error if the log entry at `log_index` is not found. - pub async fn try_get_log_entry(&mut self, log_index: u64) -> Result, StorageError> { - let mut res = self.sto.try_get_log_entries(log_index..(log_index + 1)).await?; - Ok(res.pop()) - } - - /// Get a series of log entries from storage. - /// - /// Similar to `try_get_log_entries` except an error will be returned if there is an entry not - /// found in the specified range. - pub async fn get_log_entries + Clone + Debug + Send + Sync>( - &mut self, - range: RB, - ) -> Result, StorageError> { - let res = self.sto.try_get_log_entries(range.clone()).await?; - - check_range_matches_entries::(range, &res)?; - - Ok(res) - } } diff --git a/openraft/src/storage/log_store_ext.rs b/openraft/src/storage/log_store_ext.rs new file mode 100644 index 000000000..e7a5e1b0a --- /dev/null +++ b/openraft/src/storage/log_store_ext.rs @@ -0,0 +1,60 @@ +use std::fmt::Debug; +use std::ops::RangeBounds; + +use async_trait::async_trait; + +use crate::defensive::check_range_matches_entries; +use crate::LogId; +use crate::LogIdOptionExt; +use crate::RaftLogId; +use crate::RaftLogReader; +use crate::RaftTypeConfig; +use crate::StorageError; + +#[async_trait] +pub trait RaftLogReaderExt: RaftLogReader +where C: RaftTypeConfig +{ + /// Try to get an log entry. + /// + /// It does not return an error if the log entry at `log_index` is not found. + async fn try_get_log_entry(&mut self, log_index: u64) -> Result, StorageError> { + let mut res = self.try_get_log_entries(log_index..(log_index + 1)).await?; + Ok(res.pop()) + } + + /// Get a series of log entries from storage. + /// + /// Similar to `try_get_log_entries` except an error will be returned if there is an entry not + /// found in the specified range. + async fn get_log_entries + Clone + Debug + Send + Sync>( + &mut self, + range: RB, + ) -> Result, StorageError> { + let res = self.try_get_log_entries(range.clone()).await?; + + check_range_matches_entries::(range, &res)?; + + Ok(res) + } + + /// Get the log id of the entry at `index`. + async fn get_log_id(&mut self, log_index: u64) -> Result, StorageError> { + let st = self.get_log_state().await?; + + if Some(log_index) == st.last_purged_log_id.index() { + return Ok(st.last_purged_log_id.unwrap()); + } + + let entries = self.get_log_entries(log_index..=log_index).await?; + + Ok(*entries[0].get_log_id()) + } +} + +impl RaftLogReaderExt for LR +where + C: RaftTypeConfig, + LR: RaftLogReader, +{ +} diff --git a/openraft/src/storage/mod.rs b/openraft/src/storage/mod.rs index 046038d3b..2a99e5895 100644 --- a/openraft/src/storage/mod.rs +++ b/openraft/src/storage/mod.rs @@ -1,19 +1,30 @@ //! The Raft storage interface and data types. +pub(crate) mod adapter; +mod callback; mod helper; +mod log_store_ext; mod snapshot_signature; +mod v2; + use std::fmt::Debug; use std::ops::RangeBounds; +pub use adapter::Adaptor; use async_trait::async_trait; pub use helper::StorageHelper; +pub use log_store_ext::RaftLogReaderExt; pub use snapshot_signature::SnapshotSignature; use tokio::io::AsyncRead; use tokio::io::AsyncSeek; use tokio::io::AsyncWrite; +pub use v2::RaftLogStorage; +pub use v2::RaftStateMachine; use crate::node::Node; use crate::raft_types::SnapshotId; +pub use crate::storage::callback::LogApplied; +pub use crate::storage::callback::LogFlushed; use crate::LogId; use crate::MessageSummary; use crate::NodeId; @@ -260,12 +271,6 @@ where C: RaftTypeConfig &mut self, ) -> Result<(Option>, StoredMembership), StorageError>; - // TODO: The reply should happen asynchronously, somehow. Make this method synchronous and - // instead of using the result, pass a channel where to post the completion. The Raft core can - // then collect completions on this channel and update the client with the result once all - // the preceding operations have been applied to the state machine. This way we'll reach - // operation pipelining w/o the need to wait for the completion of each operation inline. - // --- /// Apply the given payload of entries to the state machine. /// /// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which diff --git a/openraft/src/storage/v2.rs b/openraft/src/storage/v2.rs new file mode 100644 index 000000000..7937ef234 --- /dev/null +++ b/openraft/src/storage/v2.rs @@ -0,0 +1,123 @@ +//! Defines [`RaftLogStorage`] and [`RaftStateMachine`] trait to replace the previous +//! [`RaftStorage`](`crate::storage::RaftStorage`). [`RaftLogStorage`] is responsible for storing +//! logs, and [`RaftStateMachine`] is responsible for storing state machine and snapshot. + +use async_trait::async_trait; +use tokio::io::AsyncRead; +use tokio::io::AsyncSeek; +use tokio::io::AsyncWrite; + +use crate::storage::callback::LogFlushed; +use crate::storage::v2::sealed::Sealed; +use crate::LogId; +use crate::RaftLogReader; +use crate::RaftSnapshotBuilder; +use crate::RaftTypeConfig; +use crate::Snapshot; +use crate::SnapshotMeta; +use crate::StorageError; +use crate::StoredMembership; +use crate::Vote; + +pub(crate) mod sealed { + /// Seal [`RaftLogStorage`] and [`RaftStateMachine`]. This is to prevent users from implementing + /// them before being stable. + pub trait Sealed {} +} + +#[async_trait] +pub trait RaftLogStorage: Sealed + RaftLogReader + Send + Sync + 'static +where C: RaftTypeConfig +{ + type LogReader: RaftLogReader; + + async fn save_vote(&mut self, vote: &Vote) -> Result<(), StorageError>; + + async fn read_vote(&mut self) -> Result>, StorageError>; + + async fn get_log_reader(&mut self) -> Self::LogReader; + + /// Append log entries and call the `callback` once logs are persisted on disk. + /// + /// It should returns immediately after saving the input log entries in memory, and calls the + /// `callback` when the entries are persisted on disk, i.e., avoid blocking. + /// + /// This method is still async because preparing preparing the IO is usually async. + /// + /// To ensure correctness: + /// + /// - When this method returns, the entries must be readable, i.e., a `LogReader` can read these + /// entries. + /// + /// - When the `callback` is called, the entries must be persisted on disk. + /// + /// NOTE that: the `callback` can be called either before or after this method returns. + /// + /// - There must not be a **hole** in logs. Because Raft only examine the last log id to ensure + /// correctness. + async fn append(&mut self, entries: I, callback: LogFlushed) -> Result<(), StorageError> + where I: IntoIterator + Send; + + /// Truncate logs since `log_id`, inclusive + async fn truncate(&mut self, log_id: LogId) -> Result<(), StorageError>; + + /// Purge logs upto `log_id`, inclusive + async fn purge(&mut self, log_id: LogId) -> Result<(), StorageError>; +} + +#[async_trait] +pub trait RaftStateMachine: Sealed + Send + Sync + 'static +where C: RaftTypeConfig +{ + type SnapshotData: AsyncRead + AsyncWrite + AsyncSeek + Send + Sync + Unpin + 'static; + + type SnapshotBuilder: RaftSnapshotBuilder; + + async fn applied_state( + &mut self, + ) -> Result<(Option>, StoredMembership), StorageError>; + + /// Apply the given payload of entries to the state machine. + /// + /// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which + /// have been replicated to a quorum of the cluster, will be applied to the state machine. + /// + /// This is where the business logic of interacting with your application's state machine + /// should live. This is 100% application specific. Perhaps this is where an application + /// specific transaction is being started, or perhaps committed. This may be where a key/value + /// is being stored. + /// + /// For every entry to apply, an implementation should: + /// - Store the log id as last applied log id. + /// - Deal with the business logic log. + /// - Store membership config if `RaftEntry::get_membership()` returns `Some`. + /// + /// Note that for a membership log, the implementation need to do nothing about it, except + /// storing it. + /// + /// An implementation may choose to persist either the state machine or the snapshot: + /// + /// - An implementation with persistent state machine: persists the state on disk before + /// returning from `apply_to_state_machine()`. So that a snapshot does not need to be + /// persistent. + /// + /// - An implementation with persistent snapshot: `apply_to_state_machine()` does not have to + /// persist state on disk. But every snapshot has to be persistent. And when starting up the + /// application, the state machine should be rebuilt from the last snapshot. + async fn apply(&mut self, entries: I) -> Result, StorageError> + where I: IntoIterator + Send; + + async fn get_snapshot_builder(&mut self) -> Self::SnapshotBuilder; + + async fn begin_receiving_snapshot(&mut self) -> Result, StorageError>; + + async fn install_snapshot( + &mut self, + meta: &SnapshotMeta, + snapshot: Box, + ) -> Result<(), StorageError>; + + async fn get_current_snapshot( + &mut self, + ) -> Result>, StorageError>; +} diff --git a/openraft/src/testing/mod.rs b/openraft/src/testing/mod.rs index 818195ef7..96f03c25e 100644 --- a/openraft/src/testing/mod.rs +++ b/openraft/src/testing/mod.rs @@ -1,12 +1,20 @@ mod store_builder; mod suite; +use anyerror::AnyError; pub use store_builder::StoreBuilder; pub use suite::Suite; +use tokio::sync::oneshot; +use crate::log_id::RaftLogId; +use crate::storage::LogFlushed; +use crate::storage::RaftLogStorage; use crate::BasicNode; use crate::CommittedLeaderId; use crate::LogId; +use crate::RaftTypeConfig; +use crate::StorageError; +use crate::StorageIOError; crate::declare_raft_types!( /// Dummy Raft types for the purpose of testing internal structures requiring @@ -21,3 +29,22 @@ pub fn log_id(term: u64, index: u64) -> LogId { index, } } + +/// Append to log and wait for the log to be flushed. +pub async fn blocking_append, I>( + log_store: &mut LS, + entries: I, +) -> Result<(), StorageError> +where + I: IntoIterator, +{ + let entries = entries.into_iter().collect::>(); + let last_log_id = entries.last().map(|e| *e.get_log_id()).unwrap(); + + let (tx, rx) = oneshot::channel(); + let cb = LogFlushed::new(Some(last_log_id), tx); + log_store.append(entries, cb).await?; + rx.await.unwrap().map_err(|e| StorageIOError::write_logs(AnyError::error(e)))?; + + Ok(()) +} diff --git a/openraft/src/testing/store_builder.rs b/openraft/src/testing/store_builder.rs index c178782f9..f4aeaf088 100644 --- a/openraft/src/testing/store_builder.rs +++ b/openraft/src/testing/store_builder.rs @@ -1,23 +1,27 @@ use async_trait::async_trait; -use crate::RaftStorage; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; use crate::RaftTypeConfig; use crate::StorageError; -/// The trait to build a [`RaftStorage`] implementation. +/// The trait to build a [`RaftLogStorage`] and [`RaftStateMachine`] implementation. /// -/// The generic parameter `C` is type config for a `RaftStorage` implementation, -/// `S` is the type that implements `RaftStorage`, +/// The generic parameter `C` is type config for a `RaftLogStorage` and `RaftStateMachine` +/// implementation, +/// `LS` is the type that implements `RaftLogStorage`, +/// `SM` is the type that implements `RaftStateMachine`, /// and `G` is a guard type that cleanup resource when being dropped. /// /// By default `G` is a trivial guard `()`. To test a store that is backed by a folder on disk, `G` /// could be the dropper of the temp-dir that stores data. #[async_trait] -pub trait StoreBuilder: Send + Sync +pub trait StoreBuilder: Send + Sync where C: RaftTypeConfig, - S: RaftStorage, + LS: RaftLogStorage, + SM: RaftStateMachine, { - /// Build a [`RaftStorage`] implementation - async fn build(&self) -> Result<(G, S), StorageError>; + /// Build a [`RaftLogStorage`] implementation + async fn build(&self) -> Result<(G, LS, SM), StorageError>; } diff --git a/openraft/src/testing/suite.rs b/openraft/src/testing/suite.rs index 09d1c008d..7dba598fc 100644 --- a/openraft/src/testing/suite.rs +++ b/openraft/src/testing/suite.rs @@ -4,14 +4,20 @@ use std::future::Future; use std::marker::PhantomData; use std::option::Option::None; +use anyerror::AnyError; use maplit::btreeset; +use tokio::sync::oneshot; use crate::entry::RaftEntry; use crate::log_id::RaftLogId; use crate::membership::EffectiveMembership; use crate::raft_state::LogStateReader; use crate::raft_state::RaftState; +use crate::storage::LogFlushed; use crate::storage::LogState; +use crate::storage::RaftLogReaderExt; +use crate::storage::RaftLogStorage; +use crate::storage::RaftStateMachine; use crate::storage::StorageHelper; use crate::testing::StoreBuilder; use crate::vote::CommittedLeaderId; @@ -21,9 +27,9 @@ use crate::LogId; use crate::Membership; use crate::NodeId; use crate::RaftSnapshotBuilder; -use crate::RaftStorage; use crate::RaftTypeConfig; use crate::StorageError; +use crate::StorageIOError; use crate::StoredMembership; use crate::Vote; @@ -40,31 +46,29 @@ macro_rules! btreeset { } /// Test suite to ensure a `RaftStore` impl works as expected. -/// -/// Usage: -pub struct Suite +pub struct Suite where C: RaftTypeConfig, C::D: AppData + Debug, C::R: AppDataResponse + Debug, - S: RaftStorage, - B: StoreBuilder, + LS: RaftLogStorage, + SM: RaftStateMachine, + B: StoreBuilder, G: Send + Sync, { - c: PhantomData, - p: PhantomData, - f: PhantomData, - g: PhantomData, + _p: PhantomData<(C, LS, SM, B, G)>, } -impl Suite +#[allow(unused)] +impl Suite where C: RaftTypeConfig, C::D: AppData + Debug, C::R: AppDataResponse + Debug, C::NodeId: From, - S: RaftStorage, - B: StoreBuilder, + LS: RaftLogStorage, + SM: RaftStateMachine, + B: StoreBuilder, G: Send + Sync, { pub fn test_all(builder: B) -> Result<(), StorageError> { @@ -110,55 +114,56 @@ where Ok(()) } - pub async fn last_membership_in_log_initial(mut store: S) -> Result<(), StorageError> { - let membership = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + pub async fn last_membership_in_log_initial(mut store: LS, mut sm: SM) -> Result<(), StorageError> { + let membership = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert!(membership.is_empty()); Ok(()) } - pub async fn last_membership_in_log(mut store: S) -> Result<(), StorageError> { + pub async fn last_membership_in_log(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- no log, do not read membership from state machine"); { - store - .apply_to_state_machine(&[blank_ent::(1, 1), membership_ent::(1, 1, btreeset! {3,4,5})]) - .await?; + apply(&mut sm, [ + blank_ent::(1, 1), + membership_ent::(1, 1, btreeset! {3,4,5}), + ]) + .await?; - let mem = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + let mem = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert!(mem.is_empty()); } tracing::info!("--- membership presents in log, smaller than last_applied, read from log"); { - store.append_to_log([membership_ent::(1, 1, btreeset! {1,2,3})]).await?; + append(&mut store, [membership_ent::(1, 1, btreeset! {1,2,3})]).await?; - let mem = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + let mem = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert_eq!(1, mem.len()); let mem = mem[0].clone(); assert_eq!(&Membership::new(vec![btreeset! {1, 2, 3}], None), mem.membership(),); - let mem = StorageHelper::new(&mut store).last_membership_in_log(1).await?; + let mem = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(1).await?; assert_eq!(1, mem.len()); let mem = mem[0].clone(); assert_eq!(&Membership::new(vec![btreeset! {1, 2, 3}], None), mem.membership(),); - let mem = StorageHelper::new(&mut store).last_membership_in_log(2).await?; + let mem = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(2).await?; assert!(mem.is_empty()); } tracing::info!("--- membership presents in log and > sm.last_applied, read 2 membership entries from log"); { - store - .append_to_log([ - blank_ent::(1, 2), - membership_ent::(1, 3, btreeset! {7,8,9}), - blank_ent::(1, 4), - ]) - .await?; - - let mems = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + append(&mut store, [ + blank_ent::(1, 2), + membership_ent::(1, 3, btreeset! {7,8,9}), + blank_ent::(1, 4), + ]) + .await?; + + let mems = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert_eq!(2, mems.len()); let mem = mems[0].clone(); @@ -170,15 +175,15 @@ where tracing::info!("--- membership presents in log and > sm.last_applied, read from log but since_index is greater than the last"); { - let mem = StorageHelper::new(&mut store).last_membership_in_log(4).await?; + let mem = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(4).await?; assert!(mem.is_empty()); } tracing::info!("--- 3 memberships in log, only return the last 2 of them"); { - store.append_to_log([membership_ent::(1, 5, btreeset! {10,11})]).await?; + append(&mut store, [membership_ent::(1, 5, btreeset! {10,11})]).await?; - let mems = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + let mems = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert_eq!(2, mems.len()); let mem = mems[0].clone(); @@ -191,24 +196,23 @@ where Ok(()) } - pub async fn last_membership_in_log_multi_step(mut store: S) -> Result<(), StorageError> { + pub async fn last_membership_in_log_multi_step(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- find membership log entry backwards, multiple steps"); { - store - .append_to_log([ - // - membership_ent::(1, 1, btreeset! {1,2,3}), - membership_ent::(1, 2, btreeset! {3,4,5}), - ]) - .await?; + append(&mut store, [ + // + membership_ent::(1, 1, btreeset! {1,2,3}), + membership_ent::(1, 2, btreeset! {3,4,5}), + ]) + .await?; for i in 3..100 { - store.append_to_log([blank_ent::(1, i)]).await?; + append(&mut store, [blank_ent::(1, i)]).await?; } - store.append_to_log([membership_ent::(1, 100, btreeset! {5,6,7})]).await?; + append(&mut store, [membership_ent::(1, 100, btreeset! {5,6,7})]).await?; - let mems = StorageHelper::new(&mut store).last_membership_in_log(0).await?; + let mems = StorageHelper::new(&mut store, &mut sm).last_membership_in_log(0).await?; assert_eq!(2, mems.len()); let mem = mems[0].clone(); assert_eq!(&Membership::new(vec![btreeset! {3,4,5}], None), mem.membership(),); @@ -220,8 +224,8 @@ where Ok(()) } - pub async fn get_membership_initial(mut store: S) -> Result<(), StorageError> { - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + pub async fn get_membership_initial(mut store: LS, mut sm: SM) -> Result<(), StorageError> { + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!(&EffectiveMembership::default(), mem_state.committed().as_ref()); assert_eq!(&EffectiveMembership::default(), mem_state.effective().as_ref()); @@ -229,14 +233,17 @@ where Ok(()) } - pub async fn get_membership_from_log_and_empty_sm(mut store: S) -> Result<(), StorageError> { + pub async fn get_membership_from_log_and_empty_sm( + mut store: LS, + mut sm: SM, + ) -> Result<(), StorageError> { tracing::info!("--- no log, read membership from state machine"); { // There is an empty membership config in an empty state machine. - store.append_to_log([membership_ent::(1, 1, btreeset! {1,2,3})]).await?; + append(&mut store, [membership_ent::(1, 1, btreeset! {1,2,3})]).await?; - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!(&EffectiveMembership::default(), mem_state.committed().as_ref()); assert_eq!( @@ -248,14 +255,16 @@ where Ok(()) } - pub async fn get_membership_from_log_and_sm(mut store: S) -> Result<(), StorageError> { + pub async fn get_membership_from_log_and_sm(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- no log, read membership from state machine"); { - store - .apply_to_state_machine(&[blank_ent::(1, 1), membership_ent::(1, 2, btreeset! {3,4,5})]) - .await?; + apply(&mut sm, [ + blank_ent::(1, 1), + membership_ent::(1, 2, btreeset! {3,4,5}), + ]) + .await?; - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {3,4,5}], None), @@ -269,9 +278,9 @@ where tracing::info!("--- membership presents in log, but smaller than last_applied, read from state machine"); { - store.append_to_log([membership_ent::(1, 1, btreeset! {1,2,3})]).await?; + append(&mut store, [membership_ent::(1, 1, btreeset! {1,2,3})]).await?; - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {3,4,5}], None), @@ -285,9 +294,13 @@ where tracing::info!("--- membership presents in log and > sm.last_applied, read from log"); { - store.append_to_log([blank_ent::(1, 2), membership_ent::(1, 3, btreeset! {7,8,9})]).await?; + append(&mut store, [ + blank_ent::(1, 2), + membership_ent::(1, 3, btreeset! {7,8,9}), + ]) + .await?; - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {3,4,5}], None), @@ -301,9 +314,13 @@ where tracing::info!("--- two membership present in log and > sm.last_applied, read 2 from log"); { - store.append_to_log([blank_ent::(1, 4), membership_ent::(1, 5, btreeset! {10,11})]).await?; + append(&mut store, [ + blank_ent::(1, 4), + membership_ent::(1, 5, btreeset! {10,11}), + ]) + .await?; - let mem_state = StorageHelper::new(&mut store).get_membership().await?; + let mem_state = StorageHelper::new(&mut store, &mut sm).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {7,8,9}], None), @@ -318,8 +335,8 @@ where Ok(()) } - pub async fn get_initial_state_without_init(mut store: S) -> Result<(), StorageError> { - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + pub async fn get_initial_state_without_init(mut store: LS, mut sm: SM) -> Result<(), StorageError> { + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; let mut want = RaftState::default(); want.vote.update(initial.vote.utime().unwrap(), Vote::default()); @@ -327,14 +344,19 @@ where Ok(()) } - pub async fn get_initial_state_with_state(mut store: S) -> Result<(), StorageError> { + pub async fn get_initial_state_with_state(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::default_vote(&mut store).await?; - store.append_to_log([blank_ent::(0, 0), blank_ent::(1, 1), blank_ent::(3, 2)]).await?; + append(&mut store, [ + blank_ent::(0, 0), + blank_ent::(1, 1), + blank_ent::(3, 2), + ]) + .await?; - store.apply_to_state_machine(&[blank_ent::(3, 1)]).await?; + apply(&mut sm, [blank_ent::(3, 1)]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( Some(&log_id(3, 2)), @@ -354,7 +376,10 @@ where Ok(()) } - pub async fn get_initial_state_membership_from_log_and_sm(mut store: S) -> Result<(), StorageError> { + pub async fn get_initial_state_membership_from_log_and_sm( + mut store: LS, + mut sm: SM, + ) -> Result<(), StorageError> { // It should never return membership from logs that are included in state machine present. Self::default_vote(&mut store).await?; @@ -363,11 +388,13 @@ where tracing::info!("--- no log, read membership from state machine"); { - store - .apply_to_state_machine(&[blank_ent::(1, 1), membership_ent::(1, 2, btreeset! {3,4,5})]) - .await?; + apply(&mut sm, [ + blank_ent::(1, 1), + membership_ent::(1, 2, btreeset! {3,4,5}), + ]) + .await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( &Membership::new(vec![btreeset! {3,4,5}], None), @@ -377,9 +404,9 @@ where tracing::info!("--- membership presents in log, but smaller than last_applied, read from state machine"); { - store.append_to_log([membership_ent::(1, 1, btreeset! {1,2,3})]).await?; + append(&mut store, [membership_ent::(1, 1, btreeset! {1,2,3})]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( &Membership::new(vec![btreeset! {3,4,5}], None), @@ -389,9 +416,9 @@ where tracing::info!("--- membership presents in log and > sm.last_applied, read from log"); { - store.append_to_log([membership_ent::(1, 3, btreeset! {1,2,3})]).await?; + append(&mut store, [membership_ent::(1, 3, btreeset! {1,2,3})]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( &Membership::new(vec![btreeset! {1,2,3}], None), @@ -402,14 +429,14 @@ where Ok(()) } - pub async fn get_initial_state_last_log_gt_sm(mut store: S) -> Result<(), StorageError> { + pub async fn get_initial_state_last_log_gt_sm(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::default_vote(&mut store).await?; - store.append_to_log([blank_ent::(0, 0), blank_ent::(2, 1)]).await?; + append(&mut store, [blank_ent::(0, 0), blank_ent::(2, 1)]).await?; - store.apply_to_state_machine(&[blank_ent::(1, 1), blank_ent::(1, 2)]).await?; + apply(&mut sm, [blank_ent::(1, 1), blank_ent::(1, 2)]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( Some(&log_id(2, 1)), @@ -419,14 +446,14 @@ where Ok(()) } - pub async fn get_initial_state_last_log_lt_sm(mut store: S) -> Result<(), StorageError> { + pub async fn get_initial_state_last_log_lt_sm(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::default_vote(&mut store).await?; - store.append_to_log([blank_ent::(1, 2)]).await?; + append(&mut store, [blank_ent::(1, 2)]).await?; - store.apply_to_state_machine(&[blank_ent::(3, 1)]).await?; + apply(&mut sm, [blank_ent::(3, 1)]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( Some(&log_id(3, 1)), @@ -441,7 +468,7 @@ where Ok(()) } - pub async fn get_initial_state_log_ids(mut store: S) -> Result<(), StorageError> { + pub async fn get_initial_state_log_ids(mut store: LS, mut sm: SM) -> Result<(), StorageError> { let log_id = |t, n: u64, i| LogId:: { leader_id: CommittedLeaderId::new(t, n.into()), index: i, @@ -449,23 +476,28 @@ where tracing::info!("--- empty store, expect []"); { - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!(Vec::>::new(), initial.log_ids.key_log_ids()); } tracing::info!("--- log terms: [0], last_purged_log_id is None, expect [(0,0)]"); { - store.append_to_log([blank_ent::(0, 0)]).await?; + append(&mut store, [blank_ent::(0, 0)]).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!(vec![log_id(0, 0, 0)], initial.log_ids.key_log_ids()); } tracing::info!("--- log terms: [0,1,1,2], last_purged_log_id is None, expect [(0,0),(1,1),(2,3)]"); { - store.append_to_log([blank_ent::(1, 1), blank_ent::(1, 2), blank_ent::(2, 3)]).await?; - - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + append(&mut store, [ + blank_ent::(1, 1), + blank_ent::(1, 2), + blank_ent::(2, 3), + ]) + .await?; + + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![log_id(0, 0, 0), log_id(1, 0, 1), log_id(2, 0, 3)], initial.log_ids.key_log_ids() @@ -476,9 +508,14 @@ where "--- log terms: [0,1,1,2,2,3,3], last_purged_log_id is None, expect [(0,0),(1,1),(2,3),(3,5),(3,6)]" ); { - store.append_to_log([blank_ent::(2, 4), blank_ent::(3, 5), blank_ent::(3, 6)]).await?; - - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + append(&mut store, [ + blank_ent::(2, 4), + blank_ent::(3, 5), + blank_ent::(3, 6), + ]) + .await?; + + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![ log_id(0, 0, 0), @@ -495,9 +532,9 @@ where "--- log terms: [x,1,1,2,2,3,3], last_purged_log_id: (0,0), expect [(0,0),(1,1),(2,3),(3,5),(3,6)]" ); { - store.purge_logs_upto(log_id(0, 0, 0)).await?; + store.purge(log_id(0, 0, 0)).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![ log_id(0, 0, 0), @@ -512,9 +549,9 @@ where tracing::info!("--- log terms: [x,x,1,2,2,3,3], last_purged_log_id: (1,1), expect [(1,1),(2,3),(3,5),(3,6)]"); { - store.purge_logs_upto(log_id(1, 0, 1)).await?; + store.purge(log_id(1, 0, 1)).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![log_id(1, 0, 1), log_id(2, 0, 3), log_id(3, 0, 5), log_id(3, 0, 6)], initial.log_ids.key_log_ids() @@ -523,9 +560,9 @@ where tracing::info!("--- log terms: [x,x,x,2,2,3,3], last_purged_log_id: (1,2), expect [(1,2),(2,3),(3,5),(3,6)]"); { - store.purge_logs_upto(log_id(1, 0, 2)).await?; + store.purge(log_id(1, 0, 2)).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![log_id(1, 0, 2), log_id(2, 0, 3), log_id(3, 0, 5), log_id(3, 0, 6)], initial.log_ids.key_log_ids() @@ -534,9 +571,9 @@ where tracing::info!("--- log terms: [x,x,x,x,2,3,3], last_purged_log_id: (2,3), expect [(2,3),(3,5),(3,6)]"); { - store.purge_logs_upto(log_id(2, 0, 3)).await?; + store.purge(log_id(2, 0, 3)).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!( vec![log_id(2, 0, 3), log_id(3, 0, 5), log_id(3, 0, 6)], initial.log_ids.key_log_ids() @@ -545,16 +582,16 @@ where tracing::info!("--- log terms: [x,x,x,x,x,x,x], last_purged_log_id: (3,6), e.g., all purged expect [(3,6)]"); { - store.purge_logs_upto(log_id(3, 0, 6)).await?; + store.purge(log_id(3, 0, 6)).await?; - let initial = StorageHelper::new(&mut store).get_initial_state().await?; + let initial = StorageHelper::new(&mut store, &mut sm).get_initial_state().await?; assert_eq!(vec![log_id(3, 0, 6)], initial.log_ids.key_log_ids()); } Ok(()) } - pub async fn save_vote(mut store: S) -> Result<(), StorageError> { + pub async fn save_vote(mut store: LS, mut sm: SM) -> Result<(), StorageError> { store.save_vote(&Vote::new(100, NODE_ID.into())).await?; let got = store.read_vote().await?; @@ -563,18 +600,18 @@ where Ok(()) } - pub async fn get_log_entries(mut store: S) -> Result<(), StorageError> { + pub async fn get_log_entries(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::feed_10_logs_vote_self(&mut store).await?; tracing::info!("--- get start == stop"); { - let logs = StorageHelper::new(&mut store).get_log_entries(3..3).await?; + let logs = store.get_log_entries(3..3).await?; assert_eq!(logs.len(), 0, "expected no logs to be returned"); } tracing::info!("--- get start < stop"); { - let logs = StorageHelper::new(&mut store).get_log_entries(5..7).await?; + let logs = store.get_log_entries(5..7).await?; assert_eq!(logs.len(), 2); assert_eq!(*logs[0].get_log_id(), log_id(1, 5)); @@ -584,34 +621,31 @@ where Ok(()) } - pub async fn try_get_log_entry(mut store: S) -> Result<(), StorageError> { + pub async fn try_get_log_entry(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(LogId::new(CommittedLeaderId::new(0, C::NodeId::default()), 0)).await?; - - let mut sh = StorageHelper::new(&mut store); + store.purge(LogId::new(CommittedLeaderId::new(0, C::NodeId::default()), 0)).await?; - let ent = sh.try_get_log_entry(3).await?; + let ent = store.try_get_log_entry(3).await?; assert_eq!(Some(log_id(1, 3)), ent.map(|x| *x.get_log_id())); - let ent = sh.try_get_log_entry(0).await?; + let ent = store.try_get_log_entry(0).await?; assert_eq!(None, ent.map(|x| *x.get_log_id())); - let ent = sh.try_get_log_entry(11).await?; + let ent = store.try_get_log_entry(11).await?; assert_eq!(None, ent.map(|x| *x.get_log_id())); Ok(()) } - pub async fn initial_logs(mut store: S) -> Result<(), StorageError> { - let mut sh = StorageHelper::new(&mut store); - let ent = sh.try_get_log_entry(0).await?; + pub async fn initial_logs(mut store: LS, mut sm: SM) -> Result<(), StorageError> { + let ent = store.try_get_log_entry(0).await?; assert!(ent.is_none(), "store initialized"); Ok(()) } - pub async fn get_log_state(mut store: S) -> Result<(), StorageError> { + pub async fn get_log_state(mut store: LS, mut sm: SM) -> Result<(), StorageError> { let st = store.get_log_state().await?; assert_eq!(None, st.last_purged_log_id); @@ -619,7 +653,12 @@ where tracing::info!("--- only logs"); { - store.append_to_log([blank_ent::(0, 0), blank_ent::(1, 1), blank_ent::(1, 2)]).await?; + append(&mut store, [ + blank_ent::(0, 0), + blank_ent::(1, 1), + blank_ent::(1, 2), + ]) + .await?; let st = store.get_log_state().await?; assert_eq!(None, st.last_purged_log_id); @@ -628,7 +667,7 @@ where tracing::info!("--- delete log 0-0"); { - store.purge_logs_upto(log_id(0, 0)).await?; + store.purge(log_id(0, 0)).await?; let st = store.get_log_state().await?; assert_eq!( @@ -640,7 +679,7 @@ where tracing::info!("--- delete all log"); { - store.purge_logs_upto(log_id(1, 2)).await?; + store.purge(log_id(1, 2)).await?; let st = store.get_log_state().await?; assert_eq!(Some(log_id(1, 2)), st.last_purged_log_id); @@ -649,7 +688,7 @@ where tracing::info!("--- delete advance last present logs"); { - store.purge_logs_upto(log_id(2, 3)).await?; + store.purge(log_id(2, 3)).await?; let st = store.get_log_state().await?; assert_eq!(Some(log_id(2, 3)), st.last_purged_log_id); @@ -659,33 +698,38 @@ where Ok(()) } - pub async fn get_log_id(mut store: S) -> Result<(), StorageError> { + pub async fn get_log_id(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(log_id(1, 3)).await?; + store.purge(log_id(1, 3)).await?; - let res = StorageHelper::new(&mut store).get_log_id(0).await; + let res = store.get_log_id(0).await; assert!(res.is_err()); - let res = StorageHelper::new(&mut store).get_log_id(11).await; + let res = store.get_log_id(11).await; assert!(res.is_err()); - let res = StorageHelper::new(&mut store).get_log_id(3).await?; + let res = store.get_log_id(3).await?; assert_eq!(log_id(1, 3), res); - let res = StorageHelper::new(&mut store).get_log_id(4).await?; + let res = store.get_log_id(4).await?; assert_eq!(log_id(1, 4), res); Ok(()) } - pub async fn last_id_in_log(mut store: S) -> Result<(), StorageError> { + pub async fn last_id_in_log(mut store: LS, mut sm: SM) -> Result<(), StorageError> { let last_log_id = store.get_log_state().await?.last_log_id; assert_eq!(None, last_log_id); tracing::info!("--- only logs"); { - store.append_to_log([blank_ent::(0, 0), blank_ent::(1, 1), blank_ent::(1, 2)]).await?; + append(&mut store, [ + blank_ent::(0, 0), + blank_ent::(1, 1), + blank_ent::(1, 2), + ]) + .await?; let last_log_id = store.get_log_state().await?.last_log_id; assert_eq!(Some(log_id(1, 2)), last_log_id); @@ -693,14 +737,14 @@ where tracing::info!("--- last id in logs < last applied id in sm, only return the id in logs"); { - store.apply_to_state_machine(&[blank_ent::(1, 3)]).await?; + apply(&mut sm, [blank_ent::(1, 3)]).await?; let last_log_id = store.get_log_state().await?.last_log_id; assert_eq!(Some(log_id(1, 2)), last_log_id); } tracing::info!("--- no logs, return default"); { - store.purge_logs_upto(log_id(1, 2)).await?; + store.purge(log_id(1, 2)).await?; let last_log_id = store.get_log_state().await?.last_log_id; assert_eq!(Some(log_id(1, 2)), last_log_id); @@ -709,16 +753,16 @@ where Ok(()) } - pub async fn last_applied_state(mut store: S) -> Result<(), StorageError> { - let (applied, mem) = store.last_applied_state().await?; + pub async fn last_applied_state(mut store: LS, mut sm: SM) -> Result<(), StorageError> { + let (applied, mem) = sm.applied_state().await?; assert_eq!(None, applied); assert_eq!(StoredMembership::default(), mem); tracing::info!("--- with last_applied and last_membership"); { - store.apply_to_state_machine(&[membership_ent::(1, 3, btreeset! {1,2})]).await?; + apply(&mut sm, [membership_ent::(1, 3, btreeset! {1,2})]).await?; - let (applied, mem) = store.last_applied_state().await?; + let (applied, mem) = sm.applied_state().await?; assert_eq!(Some(log_id(1, 3)), applied); assert_eq!( StoredMembership::new(Some(log_id(1, 3)), Membership::new(vec![btreeset! {1,2}], None)), @@ -728,9 +772,9 @@ where tracing::info!("--- no logs, return default"); { - store.apply_to_state_machine(&[blank_ent::(1, 5)]).await?; + apply(&mut sm, [blank_ent::(1, 5)]).await?; - let (applied, mem) = store.last_applied_state().await?; + let (applied, mem) = sm.applied_state().await?; assert_eq!(Some(log_id(1, 5)), applied); assert_eq!( StoredMembership::new(Some(log_id(1, 3)), Membership::new(vec![btreeset! {1,2}], None)), @@ -741,12 +785,12 @@ where Ok(()) } - pub async fn purge_logs_upto_0(mut store: S) -> Result<(), StorageError> { + pub async fn purge_logs_upto_0(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- delete (-oo, 0]"); Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(log_id(0, 0)).await?; + store.purge(log_id(0, 0)).await?; let logs = store.try_get_log_entries(0..100).await?; assert_eq!(logs.len(), 10); @@ -762,12 +806,12 @@ where Ok(()) } - pub async fn purge_logs_upto_5(mut store: S) -> Result<(), StorageError> { + pub async fn purge_logs_upto_5(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- delete (-oo, 5]"); Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(log_id(1, 5)).await?; + store.purge(log_id(1, 5)).await?; let logs = store.try_get_log_entries(0..100).await?; assert_eq!(logs.len(), 5); @@ -783,12 +827,12 @@ where Ok(()) } - pub async fn purge_logs_upto_20(mut store: S) -> Result<(), StorageError> { + pub async fn purge_logs_upto_20(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- delete (-oo, 20]"); Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(log_id(1, 20)).await?; + store.purge(log_id(1, 20)).await?; let logs = store.try_get_log_entries(0..100).await?; assert_eq!(logs.len(), 0); @@ -803,12 +847,12 @@ where Ok(()) } - pub async fn delete_logs_since_11(mut store: S) -> Result<(), StorageError> { + pub async fn delete_logs_since_11(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- delete [11, +oo)"); Self::feed_10_logs_vote_self(&mut store).await?; - store.delete_conflict_logs_since(log_id(1, 11)).await?; + store.truncate(log_id(1, 11)).await?; let logs = store.try_get_log_entries(0..100).await?; assert_eq!(logs.len(), 11); @@ -823,12 +867,12 @@ where Ok(()) } - pub async fn delete_logs_since_0(mut store: S) -> Result<(), StorageError> { + pub async fn delete_logs_since_0(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- delete [0, +oo)"); Self::feed_10_logs_vote_self(&mut store).await?; - store.delete_conflict_logs_since(log_id(0, 0)).await?; + store.truncate(log_id(0, 0)).await?; let logs = store.try_get_log_entries(0..100).await?; assert_eq!(logs.len(), 0); @@ -844,12 +888,12 @@ where Ok(()) } - pub async fn append_to_log(mut store: S) -> Result<(), StorageError> { + pub async fn append_to_log(mut store: LS, mut sm: SM) -> Result<(), StorageError> { Self::feed_10_logs_vote_self(&mut store).await?; - store.purge_logs_upto(log_id(0, 0)).await?; + store.purge(log_id(0, 0)).await?; - store.append_to_log([blank_ent::(2, 10)]).await?; + append(&mut store, [blank_ent::(2, 10)]).await?; let l = store.try_get_log_entries(0..).await?.len(); let last = store.try_get_log_entries(0..).await?.into_iter().last().unwrap(); @@ -859,12 +903,12 @@ where Ok(()) } - pub async fn snapshot_meta(mut store: S) -> Result<(), StorageError> { + pub async fn snapshot_meta(mut store: LS, mut sm: SM) -> Result<(), StorageError> { tracing::info!("--- just initialized"); { - store.apply_to_state_machine(&[membership_ent::(0, 0, btreeset! {1,2})]).await?; + apply(&mut sm, [membership_ent::(0, 0, btreeset! {1,2})]).await?; - let mut b = store.get_snapshot_builder().await; + let mut b = sm.get_snapshot_builder().await; let snap = b.build_snapshot().await?; let meta = snap.meta; assert_eq!(Some(log_id(0, 0)), meta.last_log_id); @@ -877,11 +921,13 @@ where tracing::info!("--- one app log, one membership log"); { - store - .apply_to_state_machine(&[blank_ent::(1, 1), membership_ent::(2, 2, btreeset! {3,4})]) - .await?; + apply(&mut sm, [ + blank_ent::(1, 1), + membership_ent::(2, 2, btreeset! {3,4}), + ]) + .await?; - let mut b = store.get_snapshot_builder().await; + let mut b = sm.get_snapshot_builder().await; let snap = b.build_snapshot().await?; let meta = snap.meta; assert_eq!(Some(log_id(2, 2)), meta.last_log_id); @@ -895,7 +941,7 @@ where Ok(()) } - // pub async fn apply_single(mut store: S) -> Result<(), StorageError> { + // pub async fn apply_single(mut store: S, mut sm: SM) -> Result<(), StorageError> { // // let entry = Entry { @@ -908,7 +954,7 @@ where // }), // }; // - // store.apply_to_state_machine(&[&entry]).await?; + // apply(&mut sm, &[&entry]).await?; // let (last_applied, _) = store.last_applied_state().await?; // // assert_eq!( @@ -933,7 +979,7 @@ where // Ok(()) // } // - // pub async fn apply_multi(mut store: S) -> Result<(), StorageError> { + // pub async fn apply_multi(mut store: S, mut sm: SM) -> Result<(), StorageError> { // // let req0 = ClientRequest { @@ -964,7 +1010,7 @@ where // }) // .collect::>(); // - // store.apply_to_state_machine(&entries.iter().collect::>()).await?; + // apply(&mut sm, &entries.iter().collect::>()).await?; // // let (last_applied, _) = store.last_applied_state().await?; // @@ -1011,11 +1057,11 @@ where // Ok(()) // } - pub async fn feed_10_logs_vote_self(sto: &mut S) -> Result<(), StorageError> { - sto.append_to_log([blank_ent::(0, 0)]).await?; + pub async fn feed_10_logs_vote_self(sto: &mut LS) -> Result<(), StorageError> { + append(sto, [blank_ent::(0, 0)]).await?; for i in 1..=10 { - sto.append_to_log([blank_ent::(1, i)]).await?; + append(sto, [blank_ent::(1, i)]).await?; } Self::default_vote(sto).await?; @@ -1023,7 +1069,7 @@ where Ok(()) } - pub async fn default_vote(sto: &mut S) -> Result<(), StorageError> { + pub async fn default_vote(sto: &mut LS) -> Result<(), StorageError> { sto.save_vote(&Vote::new(1, NODE_ID.into())).await?; Ok(()) @@ -1063,14 +1109,48 @@ where } /// Build a `RaftStorage` implementation and run a test on it. -async fn run_test(builder: &B, test_fn: TestFn) -> Result> +async fn run_test( + builder: &B, + test_fn: TestFn, +) -> Result> where C: RaftTypeConfig, - S: RaftStorage, - B: StoreBuilder, + LS: RaftLogStorage, + SM: RaftStateMachine, + B: StoreBuilder, Fu: Future>> + Send, - TestFn: Fn(S) -> Fu + Sync + Send, + TestFn: Fn(LS, SM) -> Fu + Sync + Send, +{ + let (_g, store, sm) = builder.build().await?; + test_fn(store, sm).await +} + +/// A wrapper for calling nonblocking `RaftStorage::apply_to_state_machine()` +async fn apply(sm: &mut SM, entries: I) -> Result<(), StorageError> +where + C: RaftTypeConfig, + SM: RaftStateMachine, + I: IntoIterator + Send, +{ + sm.apply(entries).await?; + Ok(()) +} + +/// A wrapper for calling nonblocking `RaftStorage::append_to_log()` +async fn append(store: &mut LS, entries: I) -> Result<(), StorageError> +where + C: RaftTypeConfig, + LS: RaftLogStorage, + I: IntoIterator, { - let (_g, store) = builder.build().await?; - test_fn(store).await + let entries = entries.into_iter().collect::>(); + let last_log_id = *entries.last().unwrap().get_log_id(); + + let (tx, rx) = oneshot::channel(); + + let cb = LogFlushed::new(Some(last_log_id), tx); + + store.append(entries, cb).await?; + rx.await.unwrap().map_err(|e| StorageIOError::write_logs(AnyError::error(e)))?; + Ok(()) } diff --git a/rocksstore-compat07/src/test.rs b/rocksstore-compat07/src/test.rs index 52a0ff5ab..e454bbecf 100644 --- a/rocksstore-compat07/src/test.rs +++ b/rocksstore-compat07/src/test.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; +use openraft::storage::Adaptor; use openraft::testing::StoreBuilder; use openraft::testing::Suite; use openraft::StorageError; @@ -10,13 +11,17 @@ use crate::Config; use crate::RocksNodeId; use crate::RocksStore; +type LogStore = Adaptor>; +type StateMachine = Adaptor>; + struct RocksBuilder {} #[async_trait] -impl StoreBuilder, TempDir> for RocksBuilder { - async fn build(&self) -> Result<(TempDir, Arc), StorageError> { +impl StoreBuilder for RocksBuilder { + async fn build(&self) -> Result<(TempDir, LogStore, StateMachine), StorageError> { let td = tempfile::TempDir::new().expect("couldn't create temp dir"); let store = RocksStore::new(td.path()).await; - Ok((td, store)) + let (log_store, sm) = Adaptor::new(store); + Ok((td, log_store, sm)) } } diff --git a/rocksstore/src/test.rs b/rocksstore/src/test.rs index 0bce5704a..6b959d556 100644 --- a/rocksstore/src/test.rs +++ b/rocksstore/src/test.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; +use openraft::storage::Adaptor; use openraft::testing::StoreBuilder; use openraft::testing::Suite; use openraft::StorageError; @@ -10,13 +11,17 @@ use crate::Config; use crate::RocksNodeId; use crate::RocksStore; +type LogStore = Adaptor>; +type StateMachine = Adaptor>; + struct RocksBuilder {} #[async_trait] -impl StoreBuilder, TempDir> for RocksBuilder { - async fn build(&self) -> Result<(TempDir, Arc), StorageError> { - let td = tempfile::TempDir::new().expect("couldn't create temp dir"); +impl StoreBuilder for RocksBuilder { + async fn build(&self) -> Result<(TempDir, LogStore, StateMachine), StorageError> { + let td = TempDir::new().expect("couldn't create temp dir"); let store = RocksStore::new(td.path()).await; - Ok((td, store)) + let (log_store, sm) = Adaptor::new(store); + Ok((td, log_store, sm)) } } /// To customize a builder: diff --git a/sledstore/src/test.rs b/sledstore/src/test.rs index 96fff6ae4..fb29630d1 100644 --- a/sledstore/src/test.rs +++ b/sledstore/src/test.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; +use openraft::storage::Adaptor; use openraft::testing::StoreBuilder; use openraft::testing::Suite; use openraft::StorageError; @@ -17,15 +18,19 @@ pub fn test_sled_store() -> Result<(), StorageError> { Suite::test_all(SledBuilder {}) } +type LogStore = Adaptor>; +type StateMachine = Adaptor>; + #[async_trait] -impl StoreBuilder, TempDir> for SledBuilder { - async fn build(&self) -> Result<(TempDir, Arc), StorageError> { - let td = tempfile::TempDir::new().expect("couldn't create temp dir"); +impl StoreBuilder for SledBuilder { + async fn build(&self) -> Result<(TempDir, LogStore, StateMachine), StorageError> { + let td = TempDir::new().expect("couldn't create temp dir"); let db: sled::Db = sled::open(td.path()).unwrap(); let store = SledStore::new(Arc::new(db)).await; + let (log_store, sm) = Adaptor::new(store); - Ok((td, store)) + Ok((td, log_store, sm)) } } diff --git a/tests/tests/append_entries/t20_append_conflicts.rs b/tests/tests/append_entries/t20_append_conflicts.rs index db80d2e51..cc0e819f8 100644 --- a/tests/tests/append_entries/t20_append_conflicts.rs +++ b/tests/tests/append_entries/t20_append_conflicts.rs @@ -4,14 +4,14 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; use openraft::raft::AppendEntriesRequest; +use openraft::storage::RaftLogReaderExt; +use openraft::storage::RaftLogStorage; use openraft::CommittedLeaderId; use openraft::Config; use openraft::Entry; use openraft::LogId; -use openraft::RaftStorage; use openraft::RaftTypeConfig; use openraft::ServerState; -use openraft::StorageHelper; use openraft::Vote; use crate::fixtures::blank; @@ -39,7 +39,7 @@ async fn append_conflicts() -> Result<()> { router.wait_for_log(&btreeset![0], None, timeout(), "empty").await?; router.wait_for_state(&btreeset![0], ServerState::Learner, timeout(), "empty").await?; - let (r0, mut sto0) = router.remove_node(0).unwrap(); + let (r0, mut sto0, _sm0) = router.remove_node(0).unwrap(); check_logs(&mut sto0, vec![]).await?; tracing::info!("--- case 0: prev_log_id == None, no logs"); @@ -217,12 +217,12 @@ async fn append_conflicts() -> Result<()> { } /// To check if logs is as expected. -async fn check_logs(sto: &mut Sto, terms: Vec) -> Result<()> +async fn check_logs(log_store: &mut LS, terms: Vec) -> Result<()> where C: RaftTypeConfig, - Sto: RaftStorage, + LS: RaftLogStorage, { - let logs = StorageHelper::new(sto).get_log_entries(..).await?; + let logs = log_store.get_log_entries(..).await?; let skip = 0; let want: Vec> = terms .iter() diff --git a/tests/tests/append_entries/t30_append_inconsistent_log.rs b/tests/tests/append_entries/t30_append_inconsistent_log.rs index 3e587e666..cffb311b6 100644 --- a/tests/tests/append_entries/t30_append_inconsistent_log.rs +++ b/tests/tests/append_entries/t30_append_inconsistent_log.rs @@ -3,10 +3,11 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; +use openraft::storage::RaftLogReaderExt; +use openraft::storage::RaftLogStorage; +use openraft::testing; use openraft::Config; -use openraft::RaftStorage; use openraft::ServerState; -use openraft::StorageHelper; use openraft::Vote; use crate::fixtures::blank; @@ -47,18 +48,18 @@ async fn append_inconsistent_log() -> Result<()> { tracing::info!("--- remove all nodes and fake the logs"); - let (r0, mut sto0) = router.remove_node(0).unwrap(); - let (r1, sto1) = router.remove_node(1).unwrap(); - let (r2, mut sto2) = router.remove_node(2).unwrap(); + let (r0, mut sto0, sm0) = router.remove_node(0).unwrap(); + let (r1, sto1, sm1) = router.remove_node(1).unwrap(); + let (r2, mut sto2, sm2) = router.remove_node(2).unwrap(); r0.shutdown().await?; r1.shutdown().await?; r2.shutdown().await?; for i in log_index + 1..=100 { - sto0.append_to_log([blank(2, i)]).await?; + testing::blocking_append(&mut sto0, [blank(2, i)]).await?; - sto2.append_to_log([blank(3, i)]).await?; + testing::blocking_append(&mut sto2, [blank(3, i)]).await?; } sto0.save_vote(&Vote::new(2, 0)).await?; @@ -68,14 +69,14 @@ async fn append_inconsistent_log() -> Result<()> { tracing::info!("--- restart node 1 and isolate. To let node-2 to become leader, node-1 should not vote for node-0"); { - router.new_raft_node_with_sto(1, sto1.clone()).await; + router.new_raft_node_with_sto(1, sto1.clone(), sm1.clone()).await; router.isolate_node(1); } tracing::info!("--- restart node 0 and 2"); { - router.new_raft_node_with_sto(0, sto0.clone()).await; - router.new_raft_node_with_sto(2, sto2.clone()).await; + router.new_raft_node_with_sto(0, sto0.clone(), sm0.clone()).await; + router.new_raft_node_with_sto(2, sto2.clone(), sm2.clone()).await; } // leader appends at least one blank log. There may be more than one transient leaders @@ -108,7 +109,7 @@ async fn append_inconsistent_log() -> Result<()> { .log_at_least(Some(log_index), "sync log to node 0") .await?; - let logs = StorageHelper::new(&mut sto0).get_log_entries(60..=60).await?; + let logs = sto0.get_log_entries(60..=60).await?; assert_eq!( 3, logs.first().unwrap().log_id.leader_id.term, diff --git a/tests/tests/append_entries/t40_append_updates_membership.rs b/tests/tests/append_entries/t40_append_updates_membership.rs index 7198fed13..fcad556ce 100644 --- a/tests/tests/append_entries/t40_append_updates_membership.rs +++ b/tests/tests/append_entries/t40_append_updates_membership.rs @@ -40,7 +40,7 @@ async fn append_updates_membership() -> Result<()> { router.wait_for_log(&btreeset![0], None, None, "empty").await?; router.wait_for_state(&btreeset![0], ServerState::Learner, None, "empty").await?; - let (r0, _sto0) = router.remove_node(0).unwrap(); + let (r0, _sto0, _sm0) = router.remove_node(0).unwrap(); tracing::info!("--- append-entries update membership"); { diff --git a/tests/tests/append_entries/t50_append_entries_with_bigger_term.rs b/tests/tests/append_entries/t50_append_entries_with_bigger_term.rs index c2be495b5..ff9508392 100644 --- a/tests/tests/append_entries/t50_append_entries_with_bigger_term.rs +++ b/tests/tests/append_entries/t50_append_entries_with_bigger_term.rs @@ -55,11 +55,12 @@ async fn append_entries_with_bigger_term() -> Result<()> { assert!(resp.is_success()); // after append entries, check hard state in term 2 and vote for node 1 - let mut store = router.get_storage_handle(&0)?; + let (mut store, mut sm) = router.get_storage_handle(&0)?; router .assert_storage_state_with_sto( &mut store, + &mut sm, &0, 2, log_index, diff --git a/tests/tests/elect/t10_elect_compare_last_log.rs b/tests/tests/elect/t10_elect_compare_last_log.rs index 41d87ebee..7d4f6e3cb 100644 --- a/tests/tests/elect/t10_elect_compare_last_log.rs +++ b/tests/tests/elect/t10_elect_compare_last_log.rs @@ -5,10 +5,11 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; use openraft::entry::RaftEntry; +use openraft::storage::RaftLogStorage; +use openraft::testing; use openraft::Config; use openraft::Entry; use openraft::Membership; -use openraft::RaftStorage; use openraft::ServerState; use openraft::Vote; @@ -33,14 +34,14 @@ async fn elect_compare_last_log() -> Result<()> { let mut router = RaftRouter::new(config.clone()); - let mut sto0 = router.new_store(); - let mut sto1 = router.new_store(); + let (mut sto0, sm0) = router.new_store(); + let (mut sto1, sm1) = router.new_store(); tracing::info!("--- fake store: sto0: last log: 2,1"); { sto0.save_vote(&Vote::new(10, 0)).await?; - sto0.append_to_log([ + testing::blocking_append(&mut sto0, [ // blank(0, 0), Entry::new_membership(log_id(2, 0, 1), Membership::new(vec![btreeset! {0,1}], None)), @@ -52,7 +53,7 @@ async fn elect_compare_last_log() -> Result<()> { { sto1.save_vote(&Vote::new(10, 0)).await?; - sto1.append_to_log([ + testing::blocking_append(&mut sto1, [ blank(0, 0), Entry::new_membership(log_id(1, 0, 1), Membership::new(vec![btreeset! {0,1}], None)), blank(1, 2), @@ -62,8 +63,8 @@ async fn elect_compare_last_log() -> Result<()> { tracing::info!("--- bring up cluster and elect"); - router.new_raft_node_with_sto(0, sto0.clone()).await; - router.new_raft_node_with_sto(1, sto1.clone()).await; + router.new_raft_node_with_sto(0, sto0.clone(), sm0.clone()).await; + router.new_raft_node_with_sto(1, sto1.clone(), sm1.clone()).await; router.wait(&0, timeout()).state(ServerState::Leader, "only node 0 becomes leader").await?; diff --git a/tests/tests/fixtures/mod.rs b/tests/tests/fixtures/mod.rs index bb8e21ef1..a8b78f173 100644 --- a/tests/tests/fixtures/mod.rs +++ b/tests/tests/fixtures/mod.rs @@ -7,8 +7,6 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; use std::env; -use std::fmt::Debug; -use std::marker::PhantomData; use std::panic::PanicInfo; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; @@ -38,7 +36,9 @@ use openraft::raft::InstallSnapshotRequest; use openraft::raft::InstallSnapshotResponse; use openraft::raft::VoteRequest; use openraft::raft::VoteResponse; -use openraft::storage::RaftStorage; +use openraft::storage::Adaptor; +use openraft::storage::RaftLogStorage; +use openraft::storage::RaftStateMachine; use openraft::CommittedLeaderId; use openraft::Config; use openraft::Entry; @@ -48,12 +48,15 @@ use openraft::LogIdOptionExt; use openraft::Membership; use openraft::MessageSummary; use openraft::Raft; +use openraft::RaftLogReader; use openraft::RaftMetrics; use openraft::RaftNetwork; use openraft::RaftNetworkFactory; use openraft::RaftState; use openraft::RaftTypeConfig; use openraft::ServerState; +use openraft_memstore::ClientRequest; +use openraft_memstore::ClientResponse; use openraft_memstore::Config as MemConfig; use openraft_memstore::IntoMemClientRequest; use openraft_memstore::MemNodeId; @@ -66,8 +69,11 @@ use crate::fixtures::logging::init_file_logging; pub mod logging; +pub type MemLogStore = Adaptor>; +pub type MemStateMachine = Adaptor>; + /// A concrete Raft type used during testing. -pub type MemRaft> = Raft, S>; +pub type MemRaft = Raft; pub fn init_default_ut_tracing() { static START: Once = Once::new(); @@ -130,20 +136,16 @@ pub fn log_panic(panic: &PanicInfo) { } /// A type which emulates a network transport and implements the `RaftNetworkFactory` trait. -pub struct TypedRaftRouter = Arc> -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ +pub struct TypedRaftRouter { /// The Raft runtime config which all nodes are using. config: Arc, + /// The table of all nodes currently known to this router instance. #[allow(clippy::type_complexity)] - routing_table: Arc, S)>>>, + routing_table: Arc>>, /// Nodes which are isolated can neither send nor receive frames. - isolated_nodes: Arc>>, + isolated_nodes: Arc>>, /// To emulate network delay for sending, in milliseconds. /// 0 means no delay. @@ -151,26 +153,20 @@ where } /// Default `RaftRouter` for memstore. -pub type RaftRouter = TypedRaftRouter>; +pub type RaftRouter = TypedRaftRouter; -pub struct Builder> { +pub struct Builder { config: Arc, send_delay: u64, - _phantom: PhantomData<(C, S)>, } -impl> Builder -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ +impl Builder { pub fn send_delay(mut self, ms: u64) -> Self { self.send_delay = ms; self } - pub fn build(self) -> TypedRaftRouter { + pub fn build(self) -> TypedRaftRouter { let send_delay = { let send_delay = env::var("OPENRAFT_NETWORK_SEND_DELAY").ok(); @@ -190,12 +186,7 @@ where } } -impl> Clone for TypedRaftRouter -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ +impl Clone for TypedRaftRouter { fn clone(&self) -> Self { Self { config: self.config.clone(), @@ -206,18 +197,9 @@ where } } -impl> TypedRaftRouter -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ - pub fn builder(config: Arc) -> Builder { - Builder { - config, - send_delay: 0, - _phantom: PhantomData, - } +impl TypedRaftRouter { + pub fn builder(config: Arc) -> Builder { + Builder { config, send_delay: 0 } } /// Create a new instance. @@ -247,10 +229,10 @@ where #[tracing::instrument(level = "debug", skip(self))] pub async fn new_cluster( &mut self, - voter_ids: BTreeSet, - learners: BTreeSet, + voter_ids: BTreeSet, + learners: BTreeSet, ) -> anyhow::Result { - let leader_id = C::NodeId::default(); + let leader_id = MemNodeId::default(); assert!(voter_ids.contains(&leader_id)); self.new_raft_node(leader_id).await; @@ -291,7 +273,7 @@ where if voter_ids.len() > 1 { tracing::info!("--- change membership to setup voters: {:?}", voter_ids); - let node = self.get_raft_handle(&C::NodeId::default())?; + let node = self.get_raft_handle(&MemNodeId::default())?; node.change_membership(voter_ids.clone(), false).await?; log_index += 2; @@ -307,7 +289,7 @@ where for id in learners.clone() { tracing::info!("--- add learner: {}", id); self.new_raft_node(id).await; - self.add_learner(C::NodeId::default(), id).await?; + self.add_learner(MemNodeId::default(), id).await?; log_index += 1; } self.wait_for_log( @@ -322,24 +304,26 @@ where } /// Create and register a new Raft node bearing the given ID. - pub async fn new_raft_node(&mut self, id: C::NodeId) { - let memstore = self.new_store(); - self.new_raft_node_with_sto(id, memstore).await + pub async fn new_raft_node(&mut self, id: MemNodeId) { + let (log_store, sm) = self.new_store(); + self.new_raft_node_with_sto(id, log_store, sm).await } - pub fn new_store(&mut self) -> S { - S::default() + pub fn new_store(&mut self) -> (MemLogStore, MemStateMachine) { + let store = Arc::new(MemStore::default()); + Adaptor::new(store) } - #[tracing::instrument(level = "debug", skip(self, sto))] - pub async fn new_raft_node_with_sto(&mut self, id: C::NodeId, sto: S) { - let node = Raft::new(id, self.config.clone(), self.clone(), sto.clone()).await.unwrap(); + #[tracing::instrument(level = "debug", skip_all)] + + pub async fn new_raft_node_with_sto(&mut self, id: MemNodeId, log_store: MemLogStore, sm: MemStateMachine) { + let node = Raft::new(id, self.config.clone(), self.clone(), log_store.clone(), sm.clone()).await.unwrap(); let mut rt = self.routing_table.lock().unwrap(); - rt.insert(id, (node, sto)); + rt.insert(id, (node, log_store, sm)); } /// Remove the target node from the routing table & isolation. - pub fn remove_node(&mut self, id: C::NodeId) -> Option<(MemRaft, S)> { + pub fn remove_node(&mut self, id: MemNodeId) -> Option<(MemRaft, MemLogStore, MemStateMachine)> { let opt_handles = { let mut rt = self.routing_table.lock().unwrap(); rt.remove(&id) @@ -354,9 +338,9 @@ where } /// Initialize all nodes based on the config in the routing table. - pub async fn initialize_from_single_node(&self, node_id: C::NodeId) -> anyhow::Result<()> { + pub async fn initialize_from_single_node(&self, node_id: MemNodeId) -> anyhow::Result<()> { tracing::info!({ node_id = display(node_id) }, "initializing cluster from single node"); - let members: BTreeSet = { + let members: BTreeSet = { let rt = self.routing_table.lock().unwrap(); rt.keys().cloned().collect() }; @@ -368,13 +352,13 @@ where /// Isolate the network of the specified node. #[tracing::instrument(level = "debug", skip(self))] - pub fn isolate_node(&self, id: C::NodeId) { + pub fn isolate_node(&self, id: MemNodeId) { self.isolated_nodes.lock().unwrap().insert(id); } /// Get a payload of the latest metrics from each node in the cluster. #[allow(clippy::significant_drop_in_scrutinee)] - pub fn latest_metrics(&self) -> Vec> { + pub fn latest_metrics(&self) -> Vec> { let rt = self.routing_table.lock().unwrap(); let mut metrics = vec![]; for node in rt.values() { @@ -385,14 +369,14 @@ where metrics } - pub fn get_metrics(&self, node_id: &C::NodeId) -> anyhow::Result> { + pub fn get_metrics(&self, node_id: &MemNodeId) -> anyhow::Result> { let node = self.get_raft_handle(node_id)?; let metrics = node.metrics().borrow().clone(); Ok(metrics) } #[tracing::instrument(level = "debug", skip(self))] - pub fn get_raft_handle(&self, node_id: &C::NodeId) -> Result, NetworkError> { + pub fn get_raft_handle(&self, node_id: &MemNodeId) -> Result { let rt = self.routing_table.lock().unwrap(); let raft_and_sto = rt .get(node_id) @@ -401,31 +385,31 @@ where Ok(r) } - pub fn get_storage_handle(&self, node_id: &C::NodeId) -> anyhow::Result { + pub fn get_storage_handle(&self, node_id: &MemNodeId) -> anyhow::Result<(MemLogStore, MemStateMachine)> { let rt = self.routing_table.lock().unwrap(); let addr = rt.get(node_id).with_context(|| format!("could not find node {} in routing table", node_id))?; - let sto = addr.clone().1; - Ok(sto) + let x = addr.clone(); + Ok((x.1, x.2)) } /// Wait for metrics until it satisfies some condition. #[tracing::instrument(level = "info", skip(self, func))] pub async fn wait_for_metrics( &self, - node_id: &C::NodeId, + node_id: &MemNodeId, func: T, timeout: Option, msg: &str, - ) -> anyhow::Result> + ) -> anyhow::Result> where - T: Fn(&RaftMetrics) -> bool + Send, + T: Fn(&RaftMetrics) -> bool + Send, { let wait = self.wait(node_id, timeout); let rst = wait.metrics(func, format!("node-{} {}", node_id, msg)).await?; Ok(rst) } - pub fn wait(&self, node_id: &C::NodeId, timeout: Option) -> Wait { + pub fn wait(&self, node_id: &MemNodeId, timeout: Option) -> Wait { let node = { let rt = self.routing_table.lock().unwrap(); rt.get(node_id).expect("target node not found in routing table").clone().0 @@ -438,7 +422,7 @@ where #[tracing::instrument(level = "info", skip(self))] pub async fn wait_for_log( &self, - node_ids: &BTreeSet, + node_ids: &BTreeSet, want_log: Option, timeout: Option, msg: &str, @@ -452,15 +436,15 @@ where #[tracing::instrument(level = "info", skip(self))] pub async fn wait_for_members( &self, - node_ids: &BTreeSet, - members: BTreeSet, + node_ids: &BTreeSet, + members: BTreeSet, timeout: Option, msg: &str, ) -> anyhow::Result<()> { for i in node_ids.iter() { let wait = self.wait(i, timeout); wait.metrics( - |x| x.membership_config.voter_ids().collect::>() == members, + |x| x.membership_config.voter_ids().collect::>() == members, msg, ) .await?; @@ -472,7 +456,7 @@ where #[tracing::instrument(level = "info", skip(self))] pub async fn wait_for_state( &self, - node_ids: &BTreeSet, + node_ids: &BTreeSet, want_state: ServerState, timeout: Option, msg: &str, @@ -487,8 +471,8 @@ where #[tracing::instrument(level = "info", skip(self))] pub async fn wait_for_snapshot( &self, - node_ids: &BTreeSet, - want: LogId, + node_ids: &BTreeSet, + want: LogId, timeout: Option, msg: &str, ) -> anyhow::Result<()> { @@ -499,7 +483,7 @@ where } /// Get the ID of the current leader. - pub fn leader(&self) -> Option { + pub fn leader(&self) -> Option { let isolated = { let isolated = self.isolated_nodes.lock().unwrap(); isolated.clone() @@ -522,7 +506,7 @@ where /// Restore the network of the specified node. #[tracing::instrument(level = "debug", skip(self))] - pub fn restore_node(&self, id: C::NodeId) { + pub fn restore_node(&self, id: MemNodeId) { let mut nodes = self.isolated_nodes.lock().unwrap(); nodes.remove(&id); } @@ -530,15 +514,15 @@ where /// Bring up a new learner and add it to the leader's membership. pub async fn add_learner( &self, - leader: C::NodeId, - target: C::NodeId, - ) -> Result, ClientWriteError> { + leader: MemNodeId, + target: MemNodeId, + ) -> Result, ClientWriteError> { let node = self.get_raft_handle(&leader).unwrap(); - node.add_learner(target, C::Node::default(), true).await.map_err(|e| e.into_api_error().unwrap()) + node.add_learner(target, (), true).await.map_err(|e| e.into_api_error().unwrap()) } /// Send a is_leader request to the target node. - pub async fn is_leader(&self, target: C::NodeId) -> Result<(), CheckIsLeaderError> { + pub async fn is_leader(&self, target: MemNodeId) -> Result<(), CheckIsLeaderError> { let node = { let rt = self.routing_table.lock().unwrap(); rt.get(&target).unwrap_or_else(|| panic!("node with ID {} does not exist", target)).clone() @@ -549,12 +533,12 @@ where /// Send a client request to the target node, causing test failure on error. pub async fn client_request( &self, - mut target: C::NodeId, + mut target: MemNodeId, client_id: &str, serial: u64, - ) -> Result<(), RaftError>> { + ) -> Result<(), RaftError>> { for ith in 0..3 { - let req = >::make_request(client_id, serial); + let req = ClientRequest::make_request(client_id, serial); if let Err(err) = self.send_client_request(target, req).await { tracing::error!({error=%err}, "error from client request"); @@ -587,10 +571,10 @@ where /// Send external request to the particular node. pub fn external_request< - F: FnOnce(&RaftState, &mut S, &mut TypedRaftRouter) + Send + 'static, + F: FnOnce(&RaftState, &mut MemLogStore, &mut TypedRaftRouter) + Send + 'static, >( &self, - target: C::NodeId, + target: MemNodeId, req: F, ) { let rt = self.routing_table.lock().unwrap(); @@ -601,7 +585,7 @@ where } /// Request the current leader from the target node. - pub async fn current_leader(&self, target: C::NodeId) -> Option { + pub async fn current_leader(&self, target: MemNodeId) -> Option { let node = self.get_raft_handle(&target).unwrap(); node.current_leader().await } @@ -610,10 +594,10 @@ where /// Returns the number of log written to raft. pub async fn client_request_many( &self, - target: C::NodeId, + target: MemNodeId, client_id: &str, count: usize, - ) -> Result>> { + ) -> Result>> { for idx in 0..count { self.client_request(target, client_id, idx as u64).await?; } @@ -623,9 +607,9 @@ where pub async fn send_client_request( &self, - target: C::NodeId, - req: C::D, - ) -> Result>> { + target: MemNodeId, + req: ClientRequest, + ) -> Result>> { let node = { let rt = self.routing_table.lock().unwrap(); rt.get(&target) @@ -771,12 +755,13 @@ where /// Assert against the state of the storage system one node in the cluster. pub async fn assert_storage_state_with_sto( &self, - storage: &mut S, - id: &C::NodeId, + storage: &mut MemLogStore, + sm: &mut MemStateMachine, + id: &MemNodeId, expect_term: u64, expect_last_log: u64, - expect_voted_for: Option, - expect_sm_last_applied_log: LogId, + expect_voted_for: Option, + expect_sm_last_applied_log: LogId, expect_snapshot: &Option<(ValueTest, u64)>, ) -> anyhow::Result<()> { let last_log_id = storage.get_log_state().await?.last_log_id; @@ -813,7 +798,7 @@ where } if let Some((index_test, term)) = &expect_snapshot { - let snap = storage + let snap = sm .get_current_snapshot() .await .map_err(|err| panic!("{}", err)) @@ -848,7 +833,7 @@ where ); } - let (last_applied, _) = storage.last_applied_state().await?; + let (last_applied, _) = sm.applied_state().await?; assert_eq!( &last_applied, @@ -867,8 +852,8 @@ where &self, expect_term: u64, expect_last_log: u64, - expect_voted_for: Option, - expect_sm_last_applied_log: LogId, + expect_voted_for: Option, + expect_sm_last_applied_log: LogId, expect_snapshot: Option<(ValueTest, u64)>, ) -> anyhow::Result<()> { let node_ids = { @@ -878,10 +863,11 @@ where }; for id in node_ids { - let mut storage = self.get_storage_handle(&id)?; + let (mut storage, mut sm) = self.get_storage_handle(&id)?; self.assert_storage_state_with_sto( &mut storage, + &mut sm, &id, expect_term, expect_last_log, @@ -896,7 +882,7 @@ where } #[tracing::instrument(level = "debug", skip(self))] - pub fn check_reachable(&self, id: C::NodeId, target: C::NodeId) -> Result<(), NetworkError> { + pub fn check_reachable(&self, id: MemNodeId, target: MemNodeId) -> Result<(), NetworkError> { let isolated = self.isolated_nodes.lock().unwrap(); if isolated.contains(&target) || isolated.contains(&id) { @@ -909,15 +895,10 @@ where } #[async_trait] -impl> RaftNetworkFactory for TypedRaftRouter -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ - type Network = RaftRouterNetwork; - - async fn new_client(&mut self, target: C::NodeId, _node: &C::Node) -> Self::Network { +impl RaftNetworkFactory for TypedRaftRouter { + type Network = RaftRouterNetwork; + + async fn new_client(&mut self, target: MemNodeId, _node: &()) -> Self::Network { RaftRouterNetwork { target, owner: self.clone(), @@ -925,28 +906,18 @@ where } } -pub struct RaftRouterNetwork> -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ - target: C::NodeId, - owner: TypedRaftRouter, +pub struct RaftRouterNetwork { + target: MemNodeId, + owner: TypedRaftRouter, } #[async_trait] -impl> RaftNetwork for RaftRouterNetwork -where - C::D: Debug + IntoMemClientRequest, - C::R: Debug, - S: Default + Clone, -{ +impl RaftNetwork for RaftRouterNetwork { /// Send an AppendEntries RPC to the target Raft node (§5). async fn send_append_entries( &mut self, - rpc: AppendEntriesRequest, - ) -> Result, RPCError>> { + rpc: AppendEntriesRequest, + ) -> Result, RPCError>> { tracing::debug!("append_entries to id={} {}", self.target, rpc.summary()); self.owner.check_reachable(rpc.vote.leader_id().voted_for().unwrap(), self.target)?; self.owner.rand_send_delay().await; @@ -963,11 +934,9 @@ where /// Send an InstallSnapshot RPC to the target Raft node (§7). async fn send_install_snapshot( &mut self, - rpc: InstallSnapshotRequest, - ) -> Result< - InstallSnapshotResponse, - RPCError>, - > { + rpc: InstallSnapshotRequest, + ) -> Result, RPCError>> + { self.owner.check_reachable(rpc.vote.leader_id().voted_for().unwrap(), self.target)?; self.owner.rand_send_delay().await; @@ -981,8 +950,8 @@ where /// Send a RequestVote RPC to the target Raft node (§5). async fn send_vote( &mut self, - rpc: VoteRequest, - ) -> Result, RPCError>> { + rpc: VoteRequest, + ) -> Result, RPCError>> { self.owner.check_reachable(rpc.vote.leader_id().voted_for().unwrap(), self.target)?; self.owner.rand_send_delay().await; diff --git a/tests/tests/life_cycle/t20_initialization.rs b/tests/tests/life_cycle/t20_initialization.rs index 66feb4783..23cf3f83b 100644 --- a/tests/tests/life_cycle/t20_initialization.rs +++ b/tests/tests/life_cycle/t20_initialization.rs @@ -7,15 +7,15 @@ use maplit::btreeset; use openraft::error::InitializeError; use openraft::error::NotAllowed; use openraft::error::NotInMembers; +use openraft::storage::RaftLogReaderExt; +use openraft::storage::RaftStateMachine; use openraft::CommittedLeaderId; use openraft::Config; use openraft::EffectiveMembership; use openraft::EntryPayload; use openraft::LogId; use openraft::Membership; -use openraft::RaftStorage; use openraft::ServerState; -use openraft::StorageHelper; use openraft::StoredMembership; use openraft::Vote; use tokio::sync::oneshot; @@ -113,8 +113,8 @@ async fn initialization() -> anyhow::Result<()> { } for i in [0, 1, 2] { - let mut sto = router.get_storage_handle(&1)?; - let first = StorageHelper::new(&mut sto).get_log_entries(0..2).await?.into_iter().next(); + let (mut sto, mut sm) = router.get_storage_handle(&1)?; + let first = sto.get_log_entries(0..2).await?.into_iter().next(); tracing::info!("--- check membership is replicated: id: {}, first log: {:?}", i, first); let mem = match first.unwrap().payload { @@ -125,7 +125,7 @@ async fn initialization() -> anyhow::Result<()> { }; assert_eq!(btreeset![0, 1, 2], mem.get_joint_config()[0].clone()); - let sm_mem = sto.last_applied_state().await?.1; + let sm_mem = sm.applied_state().await?.1; assert_eq!( StoredMembership::new( Some(LogId::new(CommittedLeaderId::new(0, 0), 0)), diff --git a/tests/tests/life_cycle/t20_shutdown.rs b/tests/tests/life_cycle/t20_shutdown.rs index 638af381f..12bffbcf5 100644 --- a/tests/tests/life_cycle/t20_shutdown.rs +++ b/tests/tests/life_cycle/t20_shutdown.rs @@ -26,7 +26,7 @@ async fn shutdown() -> Result<()> { tracing::info!("--- performing node shutdowns"); { for i in [0, 1, 2] { - let (node, _) = router.remove_node(i).unwrap(); + let (node, _, _) = router.remove_node(i).unwrap(); node.shutdown().await?; let m = node.metrics(); assert_eq!(ServerState::Shutdown, m.borrow().state, "shutdown node-{}", i); diff --git a/tests/tests/life_cycle/t30_follower_restart_does_not_interrupt.rs b/tests/tests/life_cycle/t30_follower_restart_does_not_interrupt.rs index 6fc6c9aba..a21ec4965 100644 --- a/tests/tests/life_cycle/t30_follower_restart_does_not_interrupt.rs +++ b/tests/tests/life_cycle/t30_follower_restart_does_not_interrupt.rs @@ -33,19 +33,19 @@ async fn follower_restart_does_not_interrupt() -> anyhow::Result<()> { let m = router.get_metrics(&0)?; let term = m.current_term; - let (n2, sto2) = router.remove_node(2).unwrap(); + let (n2, sto2, sm2) = router.remove_node(2).unwrap(); n2.shutdown().await?; - let (n1, sto1) = router.remove_node(1).unwrap(); + let (n1, sto1, sm1) = router.remove_node(1).unwrap(); n1.shutdown().await?; - let (n0, _sto0) = router.remove_node(0).unwrap(); + let (n0, _sto0, _sm0) = router.remove_node(0).unwrap(); n0.shutdown().await?; tracing::info!("--- restart node 1,2"); - router.new_raft_node_with_sto(1, sto1).await; - router.new_raft_node_with_sto(2, sto2).await; + router.new_raft_node_with_sto(1, sto1, sm1).await; + router.new_raft_node_with_sto(2, sto2, sm2).await; let res = router .wait(&1, Some(Duration::from_millis(1_000))) .metrics(|x| x.current_term > term, "node increase term to start election") diff --git a/tests/tests/life_cycle/t30_single_follower_restart.rs b/tests/tests/life_cycle/t30_single_follower_restart.rs index c76e75d1e..cbb5e0f71 100644 --- a/tests/tests/life_cycle/t30_single_follower_restart.rs +++ b/tests/tests/life_cycle/t30_single_follower_restart.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use std::time::Duration; use maplit::btreeset; +use openraft::storage::RaftLogStorage; use openraft::Config; -use openraft::RaftStorage; use openraft::ServerState; use openraft::Vote; @@ -39,7 +39,7 @@ async fn single_follower_restart() -> anyhow::Result<()> { tracing::info!("--- stop and restart node-0"); { - let (node, mut sto) = router.remove_node(0).unwrap(); + let (node, mut sto, sm) = router.remove_node(0).unwrap(); node.shutdown().await?; let v = sto.read_vote().await?.unwrap_or_default(); @@ -48,7 +48,7 @@ async fn single_follower_restart() -> anyhow::Result<()> { tracing::info!("--- restart node-0"); - router.new_raft_node_with_sto(0, sto).await; + router.new_raft_node_with_sto(0, sto, sm).await; router .wait(&0, Some(Duration::from_millis(1_000))) .state(ServerState::Leader, "single node restarted an became leader quickly") diff --git a/tests/tests/life_cycle/t90_issue_607_single_restart.rs b/tests/tests/life_cycle/t90_issue_607_single_restart.rs index 5f3a3a116..ab0046671 100644 --- a/tests/tests/life_cycle/t90_issue_607_single_restart.rs +++ b/tests/tests/life_cycle/t90_issue_607_single_restart.rs @@ -35,12 +35,12 @@ async fn single_restart() -> anyhow::Result<()> { tracing::info!("--- stop and restart node 0"); { - let (node, sto) = router.remove_node(0).unwrap(); + let (node, sto, sm) = router.remove_node(0).unwrap(); node.shutdown().await?; tracing::info!("--- restart node 0"); - router.new_raft_node_with_sto(0, sto).await; + router.new_raft_node_with_sto(0, sto, sm).await; } tracing::info!("--- write to 1 log after restart"); diff --git a/tests/tests/log_compaction/t10_compaction.rs b/tests/tests/log_compaction/t10_compaction.rs index 6e63b45f6..36494a364 100644 --- a/tests/tests/log_compaction/t10_compaction.rs +++ b/tests/tests/log_compaction/t10_compaction.rs @@ -5,6 +5,8 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; use openraft::raft::AppendEntriesRequest; +use openraft::storage::RaftLogReaderExt; +use openraft::testing; use openraft::CommittedLeaderId; use openraft::Config; use openraft::Entry; @@ -13,10 +15,8 @@ use openraft::LogId; use openraft::Membership; use openraft::RaftNetwork; use openraft::RaftNetworkFactory; -use openraft::RaftStorage; use openraft::ServerState; use openraft::SnapshotPolicy; -use openraft::StorageHelper; use openraft::Vote; use crate::fixtures::blank; @@ -93,14 +93,14 @@ async fn compaction() -> Result<()> { .await?; // Add a new node and assert that it received the same snapshot. - let mut sto1 = router.new_store(); - sto1.append_to_log([blank(0, 0), Entry { + let (mut sto1, sm1) = router.new_store(); + testing::blocking_append(&mut sto1, [blank(0, 0), Entry { log_id: LogId::new(CommittedLeaderId::new(1, 0), 1), payload: EntryPayload::Membership(Membership::new(vec![btreeset! {0}], None)), }]) .await?; - router.new_raft_node_with_sto(1, sto1.clone()).await; + router.new_raft_node_with_sto(1, sto1.clone(), sm1.clone()).await; router.add_learner(0, 1).await.expect("failed to add new node as learner"); log_index += 1; // add_learner log @@ -114,8 +114,8 @@ async fn compaction() -> Result<()> { tracing::info!("--- logs should be deleted after installing snapshot; left only the last one"); { - let mut sto = router.get_storage_handle(&1)?; - let logs = StorageHelper::new(&mut sto).get_log_entries(..).await?; + let (mut sto, _sm) = router.get_storage_handle(&1)?; + let logs = sto.get_log_entries(..).await?; assert_eq!(2, logs.len()); assert_eq!(LogId::new(CommittedLeaderId::new(1, 0), log_index - 1), logs[0].log_id) } diff --git a/tests/tests/membership/t00_learner_restart.rs b/tests/tests/membership/t00_learner_restart.rs index 5461a6d05..f14dfa2af 100644 --- a/tests/tests/membership/t00_learner_restart.rs +++ b/tests/tests/membership/t00_learner_restart.rs @@ -41,14 +41,14 @@ async fn learner_restart() -> Result<()> { router.wait_for_log(&btreeset![0, 1], Some(log_index), None, "write one log").await?; - let (node0, _sto0) = router.remove_node(0).unwrap(); + let (node0, _sto0, _sm0) = router.remove_node(0).unwrap(); node0.shutdown().await?; - let (node1, sto1) = router.remove_node(1).unwrap(); + let (node1, sto1, sm1) = router.remove_node(1).unwrap(); node1.shutdown().await?; // restart node-1, assert the state as expected. - let restarted = Raft::new(1, config.clone(), router.clone(), sto1).await?; + let restarted = Raft::new(1, config.clone(), router.clone(), sto1, sm1).await?; restarted.wait(timeout()).log(Some(log_index), "log after restart").await?; restarted.wait(timeout()).state(ServerState::Learner, "server state after restart").await?; diff --git a/tests/tests/membership/t10_add_learner.rs b/tests/tests/membership/t10_add_learner.rs index e85c97ec6..37b86b25e 100644 --- a/tests/tests/membership/t10_add_learner.rs +++ b/tests/tests/membership/t10_add_learner.rs @@ -6,6 +6,7 @@ use maplit::btreeset; use openraft::error::ChangeMembershipError; use openraft::error::ClientWriteError; use openraft::error::InProgress; +use openraft::storage::RaftLogReaderExt; use openraft::CommittedLeaderId; use openraft::Config; use openraft::LogId; @@ -65,9 +66,9 @@ async fn add_learner_basic() -> Result<()> { tracing::info!("--- add_learner blocks until the replication catches up"); { - let mut sto1 = router.get_storage_handle(&1)?; + let (mut sto1, _sm1) = router.get_storage_handle(&1)?; - let logs = StorageHelper::new(&mut sto1).get_log_entries(..).await?; + let logs = sto1.get_log_entries(..).await?; assert_eq!(log_index, logs[logs.len() - 1].log_id.index); // 0-th log @@ -252,8 +253,8 @@ async fn check_learner_after_leader_transferred() -> Result<()> { tracing::info!("--- check new cluster membership"); { - let mut sto1 = router.get_storage_handle(&1)?; - let m = StorageHelper::new(&mut sto1).get_membership().await?; + let (mut sto1, mut sm1) = router.get_storage_handle(&1)?; + let m = StorageHelper::new(&mut sto1, &mut sm1).get_membership().await?; // new membership is applied, thus get_membership() only returns one entry. diff --git a/tests/tests/membership/t20_change_membership.rs b/tests/tests/membership/t20_change_membership.rs index 27a973e7a..393bab944 100644 --- a/tests/tests/membership/t20_change_membership.rs +++ b/tests/tests/membership/t20_change_membership.rs @@ -4,10 +4,10 @@ use std::time::Duration; use maplit::btreeset; use openraft::error::ChangeMembershipError; use openraft::error::ClientWriteError; +use openraft::storage::RaftLogReaderExt; use openraft::Config; use openraft::LogIdOptionExt; use openraft::ServerState; -use openraft::StorageHelper; use crate::fixtures::init_default_ut_tracing; use crate::fixtures::RaftRouter; @@ -86,8 +86,8 @@ async fn change_with_new_learner_blocking() -> anyhow::Result<()> { tracing::info!("--- change_membership blocks until success: {:?}", res); for node_id in 0..2 { - let mut sto = router.get_storage_handle(&node_id)?; - let logs = StorageHelper::new(&mut sto).get_log_entries(..).await?; + let (mut sto, _sm) = router.get_storage_handle(&node_id)?; + let logs = sto.get_log_entries(..).await?; assert_eq!(log_index, logs[logs.len() - 1].log_id.index, "node: {}", node_id); // 0-th log assert_eq!(log_index + 1, logs.len() as u64, "node: {}", node_id); diff --git a/tests/tests/membership/t99_new_leader_auto_commit_uniform_config.rs b/tests/tests/membership/t99_new_leader_auto_commit_uniform_config.rs index 02e4e6ab9..f17265889 100644 --- a/tests/tests/membership/t99_new_leader_auto_commit_uniform_config.rs +++ b/tests/tests/membership/t99_new_leader_auto_commit_uniform_config.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use anyhow::Result; use maplit::btreeset; +use openraft::testing; use openraft::Config; use openraft::Entry; use openraft::EntryPayload; use openraft::Membership; use openraft::Raft; -use openraft::RaftStorage; use crate::fixtures::init_default_ut_tracing; use crate::fixtures::log_id; @@ -35,11 +35,11 @@ async fn new_leader_auto_commit_uniform_config() -> Result<()> { let mut log_index = router.new_cluster(btreeset! {0}, btreeset! {}).await?; - let mut sto = router.get_storage_handle(&0)?; + let (mut sto, sm) = router.get_storage_handle(&0)?; router.remove_node(0); { - sto.append_to_log([Entry { + testing::blocking_append(&mut sto, [Entry { log_id: log_id(1, 0, log_index + 1), payload: EntryPayload::Membership(Membership::new( vec![btreeset! {0}, btreeset! {0,1,2}], @@ -58,7 +58,7 @@ async fn new_leader_auto_commit_uniform_config() -> Result<()> { router.new_raft_node(1).await; router.new_raft_node(2).await; - let node = Raft::new(0, config.clone(), router.clone(), sto.clone()); + let node = Raft::new(0, config.clone(), router.clone(), sto.clone(), sm.clone()); let _ = node; diff --git a/tests/tests/metrics/t20_metrics_state_machine_consistency.rs b/tests/tests/metrics/t20_metrics_state_machine_consistency.rs index 7088aabcd..284a03942 100644 --- a/tests/tests/metrics/t20_metrics_state_machine_consistency.rs +++ b/tests/tests/metrics/t20_metrics_state_machine_consistency.rs @@ -56,8 +56,8 @@ async fn metrics_state_machine_consistency() -> Result<()> { log_index += 1; for node_id in 0..2 { router.wait_for_log(&btreeset![node_id], Some(log_index), None, "write one log").await?; - let sto = router.get_storage_handle(&node_id)?; - assert!(sto.get_state_machine().await.client_status.get("foo").is_some()); + let (sto, _sm) = router.get_storage_handle(&node_id)?; + assert!(sto.storage().await.get_state_machine().await.client_status.get("foo").is_some()); } Ok(()) diff --git a/tests/tests/snapshot/t23_snapshot_chunk_size.rs b/tests/tests/snapshot/t23_snapshot_chunk_size.rs index faeea3f1c..0c1145f5b 100644 --- a/tests/tests/snapshot/t23_snapshot_chunk_size.rs +++ b/tests/tests/snapshot/t23_snapshot_chunk_size.rs @@ -110,10 +110,11 @@ async fn snapshot_chunk_size() -> Result<()> { // after add_learner, log_index + 1, // leader has only log_index log in snapshot, cause it has compacted before add_learner - let mut store = router.get_storage_handle(&0)?; + let (mut store, mut sm) = router.get_storage_handle(&0)?; router .assert_storage_state_with_sto( &mut store, + &mut sm, &0, 1, log_index, @@ -125,10 +126,11 @@ async fn snapshot_chunk_size() -> Result<()> { // learner has log_index + 1 log in snapshot, cause it do compact after add_learner, // so learner's snapshot include add_learner log - let mut store = router.get_storage_handle(&1)?; + let (mut store, mut sm) = router.get_storage_handle(&1)?; router .assert_storage_state_with_sto( &mut store, + &mut sm, &1, 1, log_index, diff --git a/tests/tests/snapshot/t40_purge_in_snapshot_logs.rs b/tests/tests/snapshot/t40_purge_in_snapshot_logs.rs index d218848fe..dd23fedae 100644 --- a/tests/tests/snapshot/t40_purge_in_snapshot_logs.rs +++ b/tests/tests/snapshot/t40_purge_in_snapshot_logs.rs @@ -35,7 +35,7 @@ async fn purge_in_snapshot_logs() -> Result<()> { let leader = router.get_raft_handle(&0)?; let learner = router.get_raft_handle(&1)?; - let mut sto0 = router.get_storage_handle(&0)?; + let (mut sto0, mut _sm0) = router.get_storage_handle(&0)?; tracing::info!("--- build snapshot on leader, check purged log"); { @@ -48,7 +48,7 @@ async fn purge_in_snapshot_logs() -> Result<()> { "building 1st snapshot", ) .await?; - let mut sto0 = router.get_storage_handle(&0)?; + let (mut sto0, mut _sm0) = router.get_storage_handle(&0)?; // Wait for purge to complete. sleep(Duration::from_millis(500)).await; @@ -92,7 +92,7 @@ async fn purge_in_snapshot_logs() -> Result<()> { ) .await?; - let mut sto1 = router.get_storage_handle(&1)?; + let (mut sto1, mut _sm) = router.get_storage_handle(&1)?; let logs = sto1.try_get_log_entries(..).await?; assert_eq!(0, logs.len()); } diff --git a/tests/tests/snapshot/t41_snapshot_overrides_membership.rs b/tests/tests/snapshot/t41_snapshot_overrides_membership.rs index 592e239b3..e76090d6c 100644 --- a/tests/tests/snapshot/t41_snapshot_overrides_membership.rs +++ b/tests/tests/snapshot/t41_snapshot_overrides_membership.rs @@ -85,7 +85,7 @@ async fn snapshot_overrides_membership() -> Result<()> { { tracing::info!("--- create learner"); router.new_raft_node(1).await; - let mut sto = router.get_storage_handle(&1)?; + let (mut sto, mut sm) = router.get_storage_handle(&1)?; tracing::info!("--- add a membership config log to the learner"); { @@ -102,7 +102,7 @@ async fn snapshot_overrides_membership() -> Result<()> { tracing::info!("--- check that learner membership is affected"); { - let m = StorageHelper::new(&mut sto).get_membership().await?; + let m = StorageHelper::new(&mut sto, &mut sm).get_membership().await?; assert_eq!(&EffectiveMembership::default(), m.committed().as_ref()); assert_eq!( @@ -143,7 +143,7 @@ async fn snapshot_overrides_membership() -> Result<()> { ) .await?; - let m = StorageHelper::new(&mut sto).get_membership().await?; + let m = StorageHelper::new(&mut sto, &mut sm).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {0}], Some(btreeset! {1})), diff --git a/tests/tests/snapshot/t42_snapshot_uses_prev_snap_membership.rs b/tests/tests/snapshot/t42_snapshot_uses_prev_snap_membership.rs index 289f69500..4526f78f8 100644 --- a/tests/tests/snapshot/t42_snapshot_uses_prev_snap_membership.rs +++ b/tests/tests/snapshot/t42_snapshot_uses_prev_snap_membership.rs @@ -4,6 +4,7 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; +use openraft::storage::RaftLogReaderExt; use openraft::CommittedLeaderId; use openraft::Config; use openraft::LogId; @@ -47,7 +48,7 @@ async fn snapshot_uses_prev_snap_membership() -> Result<()> { let mut log_index = router.new_cluster(btreeset! {0,1}, btreeset! {}).await?; - let mut sto0 = router.get_storage_handle(&0)?; + let (mut sto0, mut sm0) = router.get_storage_handle(&0)?; tracing::info!("--- send just enough logs to trigger snapshot"); { @@ -72,10 +73,10 @@ async fn snapshot_uses_prev_snap_membership() -> Result<()> { .await?; { - let logs = StorageHelper::new(&mut sto0).get_log_entries(..).await?; + let logs = sto0.get_log_entries(..).await?; assert_eq!(3, logs.len(), "only one applied log is kept"); } - let m = StorageHelper::new(&mut sto0).get_membership().await?; + let m = StorageHelper::new(&mut sto0, &mut sm0).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {0,1}], None), @@ -108,10 +109,10 @@ async fn snapshot_uses_prev_snap_membership() -> Result<()> { tracing::info!("--- check membership"); { { - let logs = StorageHelper::new(&mut sto0).get_log_entries(..).await?; + let logs = sto0.get_log_entries(..).await?; assert_eq!(3, logs.len(), "only one applied log"); } - let m = StorageHelper::new(&mut sto0).get_membership().await?; + let m = StorageHelper::new(&mut sto0, &mut sm0).get_membership().await?; assert_eq!( &Membership::new(vec![btreeset! {0,1}], None), diff --git a/tests/tests/snapshot/t43_snapshot_delete_conflict_logs.rs b/tests/tests/snapshot/t43_snapshot_delete_conflict_logs.rs index 262fb551f..087861968 100644 --- a/tests/tests/snapshot/t43_snapshot_delete_conflict_logs.rs +++ b/tests/tests/snapshot/t43_snapshot_delete_conflict_logs.rs @@ -6,6 +6,9 @@ use anyhow::Result; use maplit::btreeset; use openraft::raft::AppendEntriesRequest; use openraft::raft::InstallSnapshotRequest; +use openraft::storage::RaftLogStorage; +use openraft::storage::RaftStateMachine; +use openraft::testing; use openraft::CommittedLeaderId; use openraft::Config; use openraft::Entry; @@ -16,7 +19,6 @@ use openraft::RaftLogReader; use openraft::RaftNetwork; use openraft::RaftNetworkFactory; use openraft::RaftSnapshotBuilder; -use openraft::RaftStorage; use openraft::ServerState; use openraft::SnapshotPolicy; use openraft::StorageHelper; @@ -54,18 +56,18 @@ async fn snapshot_delete_conflicting_logs() -> Result<()> { tracing::info!("--- manually init node-0 with a higher vote, in order to override conflict log on learner later"); { - let mut sto0 = router.new_store(); + let (mut sto0, sm0) = router.new_store(); // When the node starts, it will become candidate and increment its vote to (5,0) sto0.save_vote(&Vote::new(4, 0)).await?; - sto0.append_to_log([ + testing::blocking_append(&mut sto0, [ // manually insert the initializing log membership_ent(0, 0, 0, vec![btreeset! {0}]), ]) .await?; log_index = 1; - router.new_raft_node_with_sto(0, sto0).await; + router.new_raft_node_with_sto(0, sto0, sm0).await; router.wait(&0, timeout()).state(ServerState::Leader, "init node-0 server-state").await?; router.wait(&0, timeout()).log(Some(log_index), "init node-0 log").await?; @@ -118,8 +120,8 @@ async fn snapshot_delete_conflicting_logs() -> Result<()> { tracing::info!("--- check that learner membership is affected"); { - let mut sto1 = router.get_storage_handle(&1)?; - let m = StorageHelper::new(&mut sto1).get_membership().await?; + let (mut sto1, mut sm1) = router.get_storage_handle(&1)?; + let m = StorageHelper::new(&mut sto1, &mut sm1).get_membership().await?; tracing::info!("got membership of node-1: {:?}", m); assert_eq!( @@ -135,10 +137,10 @@ async fn snapshot_delete_conflicting_logs() -> Result<()> { tracing::info!("--- manually build and install snapshot to node-1"); { - let mut sto0 = router.get_storage_handle(&0)?; + let (mut sto0, mut sm0) = router.get_storage_handle(&0)?; let snap = { - let mut b = sto0.get_snapshot_builder().await; + let mut b = sm0.get_snapshot_builder().await; b.build_snapshot().await? }; @@ -162,9 +164,9 @@ async fn snapshot_delete_conflicting_logs() -> Result<()> { tracing::info!("--- check that learner membership is affected, conflict log are deleted"); { - let mut sto1 = router.get_storage_handle(&1)?; + let (mut sto1, mut sm1) = router.get_storage_handle(&1)?; - let m = StorageHelper::new(&mut sto1).get_membership().await?; + let m = StorageHelper::new(&mut sto1, &mut sm1).get_membership().await?; tracing::info!("got membership of node-1: {:?}", m); assert_eq!( diff --git a/tests/tests/snapshot/t44_replication_does_not_block_purge.rs b/tests/tests/snapshot/t44_replication_does_not_block_purge.rs index 2d572e540..88b6065fc 100644 --- a/tests/tests/snapshot/t44_replication_does_not_block_purge.rs +++ b/tests/tests/snapshot/t44_replication_does_not_block_purge.rs @@ -54,7 +54,7 @@ async fn replication_does_not_block_purge() -> Result<()> { sleep(Duration::from_millis(500)).await; - let mut sto0 = router.get_storage_handle(&0)?; + let (mut sto0, mut _sm0) = router.get_storage_handle(&0)?; let logs = sto0.try_get_log_entries(..).await?; assert_eq!(max_keep as usize, logs.len(), "leader's local logs are purged"); } diff --git a/tests/tests/state_machine/t10_total_order_apply.rs b/tests/tests/state_machine/t10_total_order_apply.rs index e9619a6a7..de6e40149 100644 --- a/tests/tests/state_machine/t10_total_order_apply.rs +++ b/tests/tests/state_machine/t10_total_order_apply.rs @@ -3,9 +3,9 @@ use std::time::Duration; use anyhow::Result; use maplit::btreeset; +use openraft::storage::RaftStateMachine; use openraft::Config; use openraft::LogIdOptionExt; -use openraft::RaftStorage; use openraft::ServerState; use tokio::sync::watch; @@ -42,7 +42,7 @@ async fn total_order_apply() -> Result<()> { let (tx, rx) = watch::channel(false); - let mut sto1 = router.get_storage_handle(&1)?; + let (_sto1, mut sm1) = router.get_storage_handle(&1)?; let mut prev = None; let h = tokio::spawn(async move { @@ -51,7 +51,7 @@ async fn total_order_apply() -> Result<()> { break; } - let (last, _) = sto1.last_applied_state().await.unwrap(); + let (last, _) = sm1.applied_state().await.unwrap(); if last.index() < prev { panic!("out of order apply"); diff --git a/tests/tests/state_machine/t20_state_machine_apply_membership.rs b/tests/tests/state_machine/t20_state_machine_apply_membership.rs index 245fa56a3..11d19ca89 100644 --- a/tests/tests/state_machine/t20_state_machine_apply_membership.rs +++ b/tests/tests/state_machine/t20_state_machine_apply_membership.rs @@ -5,12 +5,12 @@ use std::time::Duration; use anyhow::Result; use futures::stream::StreamExt; use maplit::btreeset; +use openraft::storage::RaftStateMachine; use openraft::CommittedLeaderId; use openraft::Config; use openraft::LogId; use openraft::LogIdOptionExt; use openraft::Membership; -use openraft::RaftStorage; use openraft::ServerState; use openraft::StoredMembership; @@ -53,13 +53,13 @@ async fn state_machine_apply_membership() -> Result<()> { router.assert_stable_cluster(Some(1), Some(log_index)); for i in 0..=0 { - let mut sto = router.get_storage_handle(&i)?; + let (_sto, mut sm) = router.get_storage_handle(&i)?; assert_eq!( StoredMembership::new( Some(LogId::new(CommittedLeaderId::new(0, 0), 0)), Membership::new(vec![btreeset! {0}], None) ), - sto.last_applied_state().await?.1 + sm.applied_state().await?.1 ); } @@ -102,8 +102,8 @@ async fn state_machine_apply_membership() -> Result<()> { .metrics(|x| x.last_applied.index() == Some(log_index), "uniform log applied") .await?; - let mut sto = router.get_storage_handle(&i)?; - let (_, last_membership) = sto.last_applied_state().await?; + let (_sto, mut sm) = router.get_storage_handle(&i)?; + let (_, last_membership) = sm.applied_state().await?; assert_eq!( StoredMembership::new( Some(LogId::new(CommittedLeaderId::new(1, 0), log_index)),