diff --git a/applications/tari_base_node/proto/base_node.proto b/applications/tari_base_node/proto/base_node.proto index 457c429705..f0a155d291 100644 --- a/applications/tari_base_node/proto/base_node.proto +++ b/applications/tari_base_node/proto/base_node.proto @@ -26,6 +26,7 @@ package tari.base_node; import "google/protobuf/wrappers.proto"; import "google/protobuf/timestamp.proto"; + // The gRPC interface for interacting with the base node. service BaseNode { // Lists headers in the current best chain @@ -33,13 +34,78 @@ service BaseNode { // Returns blocks in the current best chain. Currently only supports querying by height rpc GetBlocks(GetBlocksRequest) returns (stream HistoricalBlock); // Returns the calc timing for the chain heights - rpc GetCalcTiming(GetCalcTimingRequest) returns (CalcTimingResponse); + rpc GetCalcTiming(HeightRequest) returns (CalcTimingResponse); + // Returns the network Constants + rpc GetConstants(Empty) returns (ConsensusConstants); + // Returns Block Sizes + rpc GetBlockSize (BlockGroupRequest) returns (BlockGroupResponse); + // Returns Block Fees + rpc GetBlockFees (BlockGroupRequest) returns (BlockGroupResponse); + // Get Version + rpc GetVersion(Empty) returns (StringValue); + // Get coins in circulation + rpc GetTokensInCirculation(IntegerValue) returns (IntegerValue); + // Get network difficulties + rpc GetNetworkDifficulty(HeightRequest) returns (stream NetworkDifficultyResponse); +} + +/// An Empty placeholder for endpoints without request parameters +message Empty {} + +// Network difficulty response +message NetworkDifficultyResponse { + uint64 difficulty = 1; + uint64 estimated_hash_rate = 2; + uint64 height = 3; +} + +// A generic single value response for a specific height +message ValueAtHeightResponse { + uint64 value= 1; + uint64 height = 2; +} + +// A generic uint value +message IntegerValue { + uint64 value = 1; +} + +// A generic String value +message StringValue { + string value = 1; +} + +/// GetBlockSize / GetBlockFees Request +/// Either the starting and ending heights OR the from_tip param must be specified +message BlockGroupRequest { + // The height from the chain tip (optional) + uint64 from_tip = 1; + // The starting height (optional) + uint64 start_height = 2; + // The ending height (optional) + uint64 end_height = 3; + /// The type of calculation required (optional) + /// Defaults to median + /// median, mean, quartile, quantile + CalcType calc_type = 4; +} + +/// GetBlockSize / GetBlockFees Response +message BlockGroupResponse { + repeated double value = 1; + CalcType calc_type = 2; } +enum CalcType { + MEAN = 0; + MEDIAN = 1; + QUANTILE = 2; + QUARTILE = 3; +} -// The request used for querying the calc timing from the base node. +// The request used for querying a function that requires a height, either between 2 points or from the chain tip // If start_height and end_height are set and > 0, they take precedence, otherwise from_tip is used -message GetCalcTimingRequest { +message HeightRequest { // The height from the chain tip (optional) uint64 from_tip = 1; // The starting height (optional) @@ -48,6 +114,37 @@ message GetCalcTimingRequest { uint64 end_height = 3; } +/// Consensus Constants response +message ConsensusConstants { + /// The min height maturity a coinbase utxo must have + uint64 coinbase_lock_height = 1; + /// Current version of the blockchain + uint32 blockchain_version = 2; + /// The Future Time Limit (FTL) of the blockchain in seconds. This is the max allowable timestamp that is excepted. + /// We use TxN/20 where T = target time = 60 seconds, and N = block_window = 150 + uint64 future_time_limit = 3; + /// This is the our target time in seconds between blocks + uint64 target_block_interval = 4; + /// When doing difficulty adjustments and FTL calculations this is the amount of blocks we look at + uint64 difficulty_block_window = 5; + /// When doing difficulty adjustments, this is the maximum block time allowed + uint64 difficulty_max_block_interval = 6; + /// Maximum transaction weight used for the construction of new blocks. + uint64 max_block_transaction_weight = 7; + /// The amount of PoW algorithms used by the Tari chain. + uint64 pow_algo_count = 8; + /// This is how many blocks we use to count towards the median timestamp to ensure the block chain moves forward + uint64 median_timestamp_count = 9; + /// This is the initial emission curve amount + uint64 emission_initial = 10; + /// This is the emission curve delay + double emission_decay = 11; + /// This is the emission curve tail amount + uint64 emission_tail = 12; + /// This is the initial min difficulty for the difficulty adjustment + uint64 min_blake_pow_difficulty = 13; +} + // The return type of the rpc GetCalcTiming message CalcTimingResponse { uint64 max = 1; diff --git a/applications/tari_base_node/src/grpc/blocks.rs b/applications/tari_base_node/src/grpc/blocks.rs new file mode 100644 index 0000000000..8e952e922f --- /dev/null +++ b/applications/tari_base_node/src/grpc/blocks.rs @@ -0,0 +1,75 @@ +// Copyright 2019. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use tari_core::{base_node::LocalNodeCommsInterface, blocks::BlockHeader, chain_storage::HistoricalBlock}; +use tonic::Status; + +// The maximum number of blocks that can be requested at a time. These will be streamed to the +// client, so memory is not really a concern here, but a malicious client could request a large +// number here to keep the node busy +pub const GET_BLOCKS_MAX_HEIGHTS: usize = 1000; + +// The number of blocks to request from the base node at a time. This is to reduce the number of +// requests to the base node, but if you'd like to stream directly, this can be set to 1. +pub const GET_BLOCKS_PAGE_SIZE: usize = 10; + +/// Magic number for input and output sizes +pub const BLOCK_INPUT_SIZE: u64 = 4; +pub const BLOCK_OUTPUT_SIZE: u64 = 13; + +/// Returns the block heights based on the start and end heights or from_tip +pub async fn block_heights( + handler: LocalNodeCommsInterface, + start_height: u64, + end_height: u64, + from_tip: u64, +) -> Result, Status> +{ + let heights = if start_height > 0 && end_height > 0 { + Ok(BlockHeader::get_height_range(start_height, end_height)) + } else if from_tip > 0 { + BlockHeader::get_heights_from_tip(handler, from_tip) + .await + .map_err(|e| Status::internal(e.to_string())) + } else { + return Err(Status::invalid_argument("Invalid arguments provided")); + }; + heights +} + +pub fn block_size(block: &HistoricalBlock) -> u64 { + let body = block.clone().block.body; + + let input_size = body.inputs().len() as u64 * BLOCK_INPUT_SIZE; + let output_size = body.outputs().len() as u64 * BLOCK_OUTPUT_SIZE; + input_size + output_size +} + +pub fn block_fees(block: &HistoricalBlock) -> u64 { + let body = block.clone().block.body; + body.kernels() + .iter() + .map(|k| k.fee.into()) + .collect::>() + .iter() + .sum::() +} diff --git a/applications/tari_base_node/src/grpc/helpers.rs b/applications/tari_base_node/src/grpc/helpers.rs new file mode 100644 index 0000000000..cba931b048 --- /dev/null +++ b/applications/tari_base_node/src/grpc/helpers.rs @@ -0,0 +1,72 @@ +// Copyright 2019. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +pub fn median(mut list: Vec) -> Option { + if list.is_empty() { + return None; + } + list.sort(); + let mid_index = list.len() / 2; + let median = if list.len() % 2 == 0 { + (list[mid_index - 1] + list[mid_index]) as f64 / 2.0 + } else { + list[mid_index] as f64 + }; + Some(median) +} + +pub fn mean(list: Vec) -> Option { + if list.is_empty() { + return None; + } + let mut count = 0; + let total = list.iter().inspect(|_| count += 1).sum::(); + Some(total as f64 / count as f64) +} +/// TODO Implement the function for grpc responsed +pub fn quantile(_list: Vec) -> Option { + None +} + +/// TODO Implement the function for grpc responsed +pub fn quartile(_list: Vec) -> Option { + None +} + +#[cfg(test)] +pub mod test { + use super::*; + + #[test] + fn median() { + let mut values = vec![1u64, 8u64, 3u64, 9u64]; + let median_value = super::median(values); + assert_eq!(median_value, Some(5.5f64)) + } + + #[test] + fn mean() { + let values = vec![1u64, 8u64, 3u64, 9u64]; + let mean_value = super::mean(values); + assert_eq!(mean_value, Some(5.25f64)) + } +} diff --git a/applications/tari_base_node/src/grpc/mod.rs b/applications/tari_base_node/src/grpc/mod.rs new file mode 100644 index 0000000000..585614aba0 --- /dev/null +++ b/applications/tari_base_node/src/grpc/mod.rs @@ -0,0 +1,3 @@ +pub mod blocks; +pub mod helpers; +pub mod server; diff --git a/applications/tari_base_node/src/grpc.rs b/applications/tari_base_node/src/grpc/server.rs similarity index 57% rename from applications/tari_base_node/src/grpc.rs rename to applications/tari_base_node/src/grpc/server.rs index 3a1151b964..96762c4e27 100644 --- a/applications/tari_base_node/src/grpc.rs +++ b/applications/tari_base_node/src/grpc/server.rs @@ -19,21 +19,34 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use crate::grpc::{ + blocks::{block_fees, block_heights, block_size, GET_BLOCKS_MAX_HEIGHTS, GET_BLOCKS_PAGE_SIZE}, + helpers::{mean, median}, +}; use base_node_grpc::*; use log::*; use prost_types::Timestamp; -use std::cmp; +use std::{cmp, convert::TryFrom}; +use tari_common::GlobalConfig; use tari_core::{ base_node::LocalNodeCommsInterface, blocks::{Block, BlockHeader}, chain_storage::HistoricalBlock, + consensus::{emission::EmissionSchedule, ConsensusConstants, Network}, proof_of_work::PowAlgorithm, }; use tari_crypto::tari_utilities::{epoch_time::EpochTime, ByteArray, Hashable}; use tokio::{runtime, sync::mpsc}; use tonic::{Request, Response, Status}; +const VERSION: &'static str = env!("CARGO_PKG_VERSION"); + const LOG_TARGET: &str = "base_node::grpc"; +// The maximum number of difficulty ints that can be requested at a time. These will be streamed to the +// client, so memory is not really a concern here, but a malicious client could request a large +// number here to keep the node busy +const GET_DIFFICULTY_MAX_HEIGHTS: usize = 10_000; +const GET_DIFFICULTY_PAGE_SIZE: usize = 1_000; // The maximum number of headers a client can request at a time. If the client requests more than // this, this is the maximum that will be returned. const LIST_HEADERS_MAX_NUM_HEADERS: u64 = 10_000; @@ -43,15 +56,6 @@ const LIST_HEADERS_PAGE_SIZE: usize = 10; // The `num_headers` value if none is provided. const LIST_HEADERS_DEFAULT_NUM_HEADERS: u64 = 10; -// The maximum number of blocks that can be requested at a time. These will be streamed to the -// client, so memory is not really a concern here, but a malicious client could request a large -// number here to keep the node busy -const GET_BLOCKS_MAX_HEIGHTS: usize = 1000; - -// The number of blocks to request from the base node at a time. This is to reduce the number of -// requests to the base node, but if you'd like to stream directly, this can be set to 1. -const GET_BLOCKS_PAGE_SIZE: usize = 10; - pub(crate) mod base_node_grpc { tonic::include_proto!("tari.base_node"); } @@ -59,13 +63,15 @@ pub(crate) mod base_node_grpc { pub struct BaseNodeGrpcServer { executor: runtime::Handle, node_service: LocalNodeCommsInterface, + node_config: GlobalConfig, } impl BaseNodeGrpcServer { - pub fn new(executor: runtime::Handle, local_node: LocalNodeCommsInterface) -> Self { + pub fn new(executor: runtime::Handle, local_node: LocalNodeCommsInterface, node_config: GlobalConfig) -> Self { Self { executor, node_service: local_node, + node_config, } } } @@ -73,8 +79,104 @@ impl BaseNodeGrpcServer { #[tonic::async_trait] impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { type GetBlocksStream = mpsc::Receiver>; + type GetNetworkDifficultyStream = mpsc::Receiver>; type ListHeadersStream = mpsc::Receiver>; + async fn get_network_difficulty( + &self, + request: Request, + ) -> Result, Status> + { + let request = request.into_inner(); + debug!( + target: LOG_TARGET, + "Incoming GRPC request for GetNetworkDifficulty: from_tip: {:?} start_height: {:?} end_height: {:?}", + request.from_tip, + request.start_height, + request.end_height + ); + let mut handler = self.node_service.clone(); + let mut heights: Vec = request.get_heights(handler.clone()).await?; + heights = heights + .drain(..cmp::min(heights.len(), GET_DIFFICULTY_MAX_HEIGHTS)) + .collect(); + let (mut tx, rx) = mpsc::channel(GET_DIFFICULTY_MAX_HEIGHTS); + + self.executor.spawn(async move { + let mut page: Vec = heights + .drain(..cmp::min(heights.len(), GET_DIFFICULTY_PAGE_SIZE)) + .collect(); + while page.len() > 0 { + let difficulties = match handler.get_headers(page.clone()).await { + Err(err) => { + warn!( + target: LOG_TARGET, + "Error communicating with local base node: {:?}", err, + ); + return; + }, + Ok(data) => { + let mut iter = data.iter().peekable(); + let mut result = Vec::new(); + while let Some(next) = iter.next() { + let current_difficulty = next.pow.accumulated_blake_difficulty.as_u64(); + let current_timestamp = next.timestamp.as_u64(); + let current_height = next.height; + let estimated_hash_rate = if let Some(peek) = iter.peek() { + let peeked_timestamp = peek.timestamp.as_u64(); + let estimated_hash_rate = current_difficulty / (current_timestamp - peeked_timestamp); + estimated_hash_rate + } else { + 0 + }; + + result.push((current_height, current_difficulty, estimated_hash_rate)) + } + result + }, + }; + + let result_size = difficulties.len(); + for difficulty in difficulties { + match tx + .send(Ok({ + NetworkDifficultyResponse { + height: difficulty.0, + difficulty: difficulty.1, + estimated_hash_rate: difficulty.2, + } + })) + .await + { + Ok(_) => (), + Err(err) => { + warn!(target: LOG_TARGET, "Error sending difficulty via GRPC: {}", err); + match tx.send(Err(Status::unknown("Error sending data"))).await { + Ok(_) => (), + Err(send_err) => { + warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) + }, + } + return; + }, + } + } + if result_size < GET_DIFFICULTY_PAGE_SIZE { + break; + } + page = heights + .drain(..cmp::min(heights.len(), GET_DIFFICULTY_PAGE_SIZE)) + .collect(); + } + }); + + debug!( + target: LOG_TARGET, + "Sending GetNetworkDifficulty response stream to client" + ); + Ok(Response::new(rx)) + } + async fn list_headers( &self, request: Request, @@ -88,6 +190,7 @@ impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { request.num_headers, request.sorting ); + let mut handler = self.node_service.clone(); let tip = match handler.get_metadata().await { Err(err) => { @@ -181,6 +284,7 @@ impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { heights = heights .drain(..cmp::min(heights.len(), GET_BLOCKS_MAX_HEIGHTS)) .collect(); + let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(GET_BLOCKS_PAGE_SIZE); self.executor.spawn(async move { @@ -224,11 +328,7 @@ impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(rx)) } - async fn get_calc_timing( - &self, - request: Request, - ) -> Result, Status> - { + async fn get_calc_timing(&self, request: Request) -> Result, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, @@ -239,22 +339,7 @@ impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { ); let mut handler = self.node_service.clone(); - let heights = if request.start_height > 0 && request.end_height > 0 { - BlockHeader::get_height_range(request.start_height, request.end_height) - } else if request.from_tip > 0 { - match BlockHeader::get_heights_from_tip(handler.clone(), request.from_tip).await { - Ok(heights) => heights, - Err(err) => { - warn!( - target: LOG_TARGET, - "Error getting heights from tip for GRPC client: {}", err - ); - Vec::new() - }, - } - } else { - return Err(Status::invalid_argument("Invalid arguments provided")); - }; + let heights: Vec = request.get_heights(handler.clone()).await?; let headers = match handler.get_headers(heights).await { Ok(headers) => headers, @@ -266,11 +351,118 @@ impl base_node_grpc::base_node_server::BaseNode for BaseNodeGrpcServer { let (max, min, avg) = BlockHeader::timing_stats(&headers); let response: base_node_grpc::CalcTimingResponse = base_node_grpc::CalcTimingResponse { max, min, avg }; - debug!(target: LOG_TARGET, "Sending GetCalcTiming response stream to client"); + debug!(target: LOG_TARGET, "Sending GetCalcTiming response to client"); Ok(Response::new(response)) } + + async fn get_constants( + &self, + _request: Request, + ) -> Result, Status> + { + debug!(target: LOG_TARGET, "Incoming GRPC request for GetConstants",); + let network: Network = self.node_config.network.into(); + debug!(target: LOG_TARGET, "Sending GetConstants response to client"); + Ok(Response::new(network.create_consensus_constants().into())) + } + + async fn get_block_size( + &self, + request: Request, + ) -> Result, Status> + { + get_block_group(self.node_service.clone(), request, BlockGroupType::BlockSize).await + } + + async fn get_block_fees( + &self, + request: Request, + ) -> Result, Status> + { + get_block_group(self.node_service.clone(), request, BlockGroupType::BlockFees).await + } + + async fn get_version(&self, _request: Request) -> Result, Status> { + Ok(Response::new(VERSION.to_string().into())) + } + + async fn get_tokens_in_circulation( + &self, + request: Request, + ) -> Result, Status> + { + debug!(target: LOG_TARGET, "Incoming GRPC request for GetTokensInCirculation",); + let request = request.into_inner(); + let network: Network = self.node_config.network.into(); + let constants = network.create_consensus_constants(); + let (initial, decay, tail) = constants.emission_amounts(); + let schedule = EmissionSchedule::new(initial, decay, tail); + let value: u64 = schedule.supply_at_block(request.value).into(); + debug!( + target: LOG_TARGET, + "Sending GetTokensInCirculation response {} to client", value + ); + Ok(Response::new(IntegerValue { value })) + } } +enum BlockGroupType { + BlockFees, + BlockSize, +} +async fn get_block_group( + mut handler: LocalNodeCommsInterface, + request: Request, + block_group_type: BlockGroupType, +) -> Result, Status> +{ + let request = request.into_inner(); + let calc_type_response = request.calc_type; + let calc_type: CalcType = request.calc_type(); + let height_request: HeightRequest = request.into(); + + debug!( + target: LOG_TARGET, + "Incoming GRPC request for GetBlockSize: from_tip: {:?} start_height: {:?} end_height: {:?}", + height_request.from_tip, + height_request.start_height, + height_request.end_height + ); + + let heights = height_request.get_heights(handler.clone()).await?; + + let blocks = match handler.get_blocks(heights).await { + Err(err) => { + warn!( + target: LOG_TARGET, + "Error communicating with local base node: {:?}", err, + ); + vec![] + }, + Ok(data) => data, + }; + let extractor = match block_group_type { + BlockGroupType::BlockFees => block_fees, + BlockGroupType::BlockSize => block_size, + }; + let values = blocks.iter().map(extractor).collect::>(); + let value = match calc_type { + CalcType::Median => median(values).map(|v| vec![v]), + CalcType::Mean => mean(values).map(|v| vec![v]), + CalcType::Quantile => return Err(Status::unimplemented("Quantile has not been implemented")), + CalcType::Quartile => return Err(Status::unimplemented("Quartile has not been implemented")), + _ => median(values).map(|v| vec![v]), + } + .unwrap_or(vec![]); + debug!( + target: LOG_TARGET, + "Sending GetBlockSize response to client: {:?}", value + ); + Ok(Response::new(BlockGroupResponse { + value, + calc_type: calc_type_response, + })) +} /// Utility function that converts a `chrono::DateTime` to a `prost::Timestamp` fn datetime_to_timestamp(datetime: EpochTime) -> Timestamp { Timestamp { @@ -279,6 +471,54 @@ fn datetime_to_timestamp(datetime: EpochTime) -> Timestamp { } } +impl From for base_node_grpc::IntegerValue { + fn from(value: u64) -> Self { + Self { value } + } +} + +impl From for base_node_grpc::StringValue { + fn from(value: String) -> Self { + Self { value } + } +} + +impl base_node_grpc::HeightRequest { + pub async fn get_heights(&self, handler: LocalNodeCommsInterface) -> Result, Status> { + block_heights(handler, self.start_height, self.end_height, self.from_tip).await + } +} + +impl From for base_node_grpc::HeightRequest { + fn from(b: BlockGroupRequest) -> Self { + Self { + from_tip: b.from_tip, + start_height: b.start_height, + end_height: b.end_height, + } + } +} + +impl From for base_node_grpc::ConsensusConstants { + fn from(cc: ConsensusConstants) -> Self { + let (emission_initial, emission_decay, emission_tail) = cc.emission_amounts(); + Self { + coinbase_lock_height: cc.coinbase_lock_height(), + blockchain_version: cc.blockchain_version().into(), + future_time_limit: cc.ftl().as_u64(), + target_block_interval: cc.get_target_block_interval(), + difficulty_block_window: cc.get_difficulty_block_window(), + difficulty_max_block_interval: cc.get_difficulty_max_block_interval(), + max_block_transaction_weight: cc.get_max_block_transaction_weight(), + pow_algo_count: cc.get_pow_algo_count(), + median_timestamp_count: u64::try_from(cc.get_median_timestamp_count()).unwrap_or(0), + emission_initial: emission_initial.into(), + emission_decay: emission_decay.into(), + emission_tail: emission_tail.into(), + min_blake_pow_difficulty: cc.min_pow_difficulty(PowAlgorithm::Blake).into(), + } + } +} impl From for base_node_grpc::HistoricalBlock { fn from(hb: HistoricalBlock) -> Self { Self { diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index f976af84c1..3c8e90d4cf 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -202,7 +202,8 @@ fn main_inner() -> Result<(), ExitCodes> { cli::print_banner(parser.get_commands(), 3); if node_config.grpc_enabled { - let grpc = crate::grpc::BaseNodeGrpcServer::new(rt.handle().clone(), ctx.local_node()); + let grpc = + crate::grpc::server::BaseNodeGrpcServer::new(rt.handle().clone(), ctx.local_node(), node_config.clone()); rt.spawn(run_grpc(grpc, node_config.grpc_address)); } @@ -225,11 +226,11 @@ fn main_inner() -> Result<(), ExitCodes> { } /// Runs the gRPC server -async fn run_grpc(grpc: crate::grpc::BaseNodeGrpcServer, grpc_address: SocketAddr) -> Result<(), String> { +async fn run_grpc(grpc: crate::grpc::server::BaseNodeGrpcServer, grpc_address: SocketAddr) -> Result<(), String> { info!(target: LOG_TARGET, "Starting GRPC on {}", grpc_address); Server::builder() - .add_service(crate::grpc::base_node_grpc::base_node_server::BaseNodeServer::new(grpc)) + .add_service(crate::grpc::server::base_node_grpc::base_node_server::BaseNodeServer::new(grpc)) .serve(grpc_address) .await .map_err(|e| format!("GRPC server returned error:{}", e))?; diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index ed41c9b732..499fb72248 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -31,6 +31,7 @@ use crate::{ blocks::{Block, BlockHeader, NewBlockTemplate}, chain_storage::{ChainMetadata, HistoricalBlock}, proof_of_work::{Difficulty, PowAlgorithm}, + transactions::types::HashOutput, }; use futures::{stream::Fuse, StreamExt}; use std::sync::Arc; diff --git a/base_layer/core/src/blocks/blockheader.rs b/base_layer/core/src/blocks/blockheader.rs index 7f2a25bd2a..b138b5a70b 100644 --- a/base_layer/core/src/blocks/blockheader.rs +++ b/base_layer/core/src/blocks/blockheader.rs @@ -173,11 +173,13 @@ impl BlockHeader { { let metadata = handler.get_metadata().await?; let tip = metadata.height_of_longest_chain.unwrap_or(0); - - let start = std::cmp::max(tip - height_from_tip, 1); + // Avoid overflow + let height_from_tip = std::cmp::min(tip, height_from_tip); + let start = std::cmp::max(tip - height_from_tip, 0); Ok(BlockHeader::get_height_range(start, tip)) } + /// Returns a height range in descending order pub fn get_height_range(start: u64, end_inclusive: u64) -> Vec { let mut heights: Vec = (std::cmp::min(start, end_inclusive)..=std::cmp::max(start, end_inclusive)).collect(); diff --git a/base_layer/core/src/consensus/network.rs b/base_layer/core/src/consensus/network.rs index 9bc840e0b0..396e990e8b 100644 --- a/base_layer/core/src/consensus/network.rs +++ b/base_layer/core/src/consensus/network.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::consensus_constants::ConsensusConstants; - +use tari_common::configuration::Network as GlobalNetwork; /// Specifies the configured chain network. #[derive(Copy, Clone)] pub enum Network { @@ -43,3 +43,12 @@ impl Network { } } } + +impl From for Network { + fn from(global_network: GlobalNetwork) -> Self { + match global_network { + GlobalNetwork::MainNet => Network::MainNet, + GlobalNetwork::Rincewind => Network::Rincewind, + } + } +} diff --git a/common/src/configuration/global.rs b/common/src/configuration/global.rs index 9a428c7535..2950dcd381 100644 --- a/common/src/configuration/global.rs +++ b/common/src/configuration/global.rs @@ -36,7 +36,7 @@ use std::{ //------------------------------------- Main Configuration Struct --------------------------------------// -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct GlobalConfig { pub network: Network, pub comms_transport: CommsTransport, @@ -350,7 +350,7 @@ fn config_string(network: &str, key: &str) -> String { } //--------------------------------------------- Network type ------------------------------------------// -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Copy)] pub enum Network { MainNet, Rincewind, @@ -382,7 +382,7 @@ impl Display for Network { } //--------------------------------------------- Database type ------------------------------------------// -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum DatabaseType { LMDB(PathBuf), Memory,