Skip to content

Commit

Permalink
[KEY MANAGEMENT] Add state pub keys to HotshotConfig (#2656)
Browse files Browse the repository at this point in the history
* add state pub keys to hotshotconfig

* lint

* refactor config getter for orchestrator

* clean up
  • Loading branch information
dailinsubjam authored Feb 26, 2024
1 parent ca6e235 commit a3544a7
Show file tree
Hide file tree
Showing 10 changed files with 202 additions and 57 deletions.
38 changes: 16 additions & 22 deletions crates/examples/infra/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ use hotshot_orchestrator::{
};
use hotshot_types::message::Message;
use hotshot_types::traits::network::ConnectedNetwork;
use hotshot_types::PeerConfig;
use hotshot_types::ValidatorConfig;
use hotshot_types::{
consensus::ConsensusMetricsValue,
Expand Down Expand Up @@ -111,14 +112,9 @@ pub fn load_config_from_file<TYPES: NodeType>(
// but its type is too complex to load so we'll generate it from seed now
config.config.my_own_validator_config =
ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1);
let my_own_validator_config_with_stake = config
.config
.my_own_validator_config
.public_key
.get_stake_table_entry(1u64);
// initialize it with size for better assignment of other peers' config
// initialize it with size for better assignment of peers' config
config.config.known_nodes_with_stake =
vec![my_own_validator_config_with_stake; config.config.total_nodes.get() as usize];
vec![PeerConfig::default(); config.config.total_nodes.get() as usize];

config
}
Expand Down Expand Up @@ -768,27 +764,25 @@ pub async fn main_entry_point<
let orchestrator_client: OrchestratorClient =
OrchestratorClient::new(args.clone(), public_ip.to_string());

// We assume one node will not call this twice to generate two validator_config-s with same identity.
let my_own_validator_config = NetworkConfig::<TYPES::SignatureKey, TYPES::ElectionConfigType>::generate_init_validator_config(
&orchestrator_client,
).await;

// conditionally save/load config from file or orchestrator
let (mut run_config, source) =
NetworkConfig::<TYPES::SignatureKey, TYPES::ElectionConfigType>::from_file_or_orchestrator(
// This is a function that will return correct complete config from orchestrator.
// It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored.
// It returns the complete config which also includes peer's public key and public config.
// This function will be taken solely by sequencer right after OrchestratorClient::new,
// which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot.
let (run_config, source) =
NetworkConfig::<TYPES::SignatureKey, TYPES::ElectionConfigType>::get_complete_config(
&orchestrator_client,
my_own_validator_config,
args.clone().network_config_file,
)
.await;

let node_index = run_config.node_index;
error!("Retrieved config; our node index is {node_index}");

// one more round of orchestrator here to get peer's public key/config
let updated_config: NetworkConfig<TYPES::SignatureKey, TYPES::ElectionConfigType> =
orchestrator_client
.post_and_wait_all_public_keys::<TYPES::SignatureKey, TYPES::ElectionConfigType>(
run_config.node_index,
run_config.config.my_own_validator_config.public_key.clone(),
)
.await;
run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake;

error!("Initializing networking");
let run = RUNDA::initialize_networking(run_config.clone()).await;
let hotshot = run.initialize_state_and_hotshot().await;
Expand Down
11 changes: 8 additions & 3 deletions crates/hotshot/src/traits/election/static_committee.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use hotshot_types::traits::{
node_implementation::NodeType,
signature_key::SignatureKey,
};
use hotshot_types::PeerConfig;
#[allow(deprecated)]
use serde::{Deserialize, Serialize};
use std::{marker::PhantomData, num::NonZeroU64};
Expand Down Expand Up @@ -99,14 +100,18 @@ where
}

fn create_election(
keys_qc: Vec<PUBKEY::StakeTableEntry>,
entries: Vec<PeerConfig<PUBKEY>>,
config: TYPES::ElectionConfigType,
) -> Self {
let mut committee_nodes_with_stake = keys_qc.clone();
let nodes_with_stake: Vec<PUBKEY::StakeTableEntry> = entries
.iter()
.map(|x| x.stake_table_entry.clone())
.collect();
let mut committee_nodes_with_stake: Vec<PUBKEY::StakeTableEntry> = nodes_with_stake.clone();
debug!("Election Membership Size: {}", config.num_nodes);
committee_nodes_with_stake.truncate(config.num_nodes.try_into().unwrap());
Self {
nodes_with_stake: keys_qc,
nodes_with_stake,
committee_nodes_with_stake,
_type_phantom: PhantomData,
}
Expand Down
8 changes: 8 additions & 0 deletions crates/orchestrator/api.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@ This must be a POST request so we can update the OrchestratorState in the server
received from the 'identity' endpoint
"""

# GET the latest temporary node index only for generating validator's key pair
[route.tmp_node_index]
PATH = ["tmp_node_index"]
METHOD = "POST"
DOC = """
Get the latest temporary node index only for generating validator's key pair for testing in hotshot, later the generated key pairs might be bound with other node_index.
"""

# POST the node's node index to generate public key for pubkey collection
[route.postpubkey]
PATH = ["pubkey/:node_index"]
Expand Down
30 changes: 21 additions & 9 deletions crates/orchestrator/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@ use futures::{Future, FutureExt};

use hotshot_types::{
traits::{election::ElectionConfig, signature_key::SignatureKey},
ValidatorConfig,
PeerConfig,
};
use surf_disco::{error::ClientError, Client};
use tide_disco::Url;

/// Holds the client connection to the orchestrator
pub struct OrchestratorClient {
/// the client
Expand Down Expand Up @@ -102,13 +101,13 @@ impl OrchestratorClient {
}

/// Sends an identify message to the orchestrator and attempts to get its config
/// Returns both the `node_index` and the run configuration from the orchestrator
/// Returns both the `node_index` and the run configuration without peer's public config from the orchestrator
/// Will block until both are returned
/// # Panics
/// if unable to convert the node index from usize into u64
/// (only applicable on 32 bit systems)
#[allow(clippy::type_complexity)]
pub async fn get_config<K: SignatureKey, E: ElectionConfig>(
pub async fn get_config_without_peer<K: SignatureKey, E: ElectionConfig>(
&self,
identity: String,
) -> NetworkConfig<K, E> {
Expand Down Expand Up @@ -140,13 +139,26 @@ impl OrchestratorClient {

let mut config = self.wait_for_fn_from_orchestrator(f).await;
config.node_index = From::<u16>::from(node_index);
// The orchestrator will generate keys for validator if it doesn't load keys from file
config.config.my_own_validator_config =
ValidatorConfig::<K>::generated_from_seed_indexed(config.seed, config.node_index, 1);

config
}

/// Post to the orchestrator and get the latest `node_index`
/// Then return it for the init validator config
/// # Panics
/// if unable to post
pub async fn get_node_index_for_init_validator_config(&self) -> u16 {
let cur_node_index = |client: Client<ClientError>| {
async move {
let cur_node_index: Result<u16, ClientError> =
client.post("api/tmp_node_index").send().await;
cur_node_index
}
.boxed()
};
self.wait_for_fn_from_orchestrator(cur_node_index).await
}

/// Sends my public key to the orchestrator so that it can collect all public keys
/// And get the updated config
/// Blocks until the orchestrator collects all peer's public keys/configs
Expand All @@ -155,13 +167,13 @@ impl OrchestratorClient {
pub async fn post_and_wait_all_public_keys<K: SignatureKey, E: ElectionConfig>(
&self,
node_index: u64,
my_pub_key: K,
my_pub_key: PeerConfig<K>,
) -> NetworkConfig<K, E> {
// send my public key
let _send_pubkey_ready_f: Result<(), ClientError> = self
.client
.post(&format!("api/pubkey/{node_index}"))
.body_binary(&my_pub_key.to_bytes())
.body_binary(&PeerConfig::<K>::to_bytes(&my_pub_key)) //&my_pub_key.stake_table_entry.get_public_key().to_bytes()
.unwrap()
.send()
.await;
Expand Down
63 changes: 54 additions & 9 deletions crates/orchestrator/src/config.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use hotshot_types::{
traits::{election::ElectionConfig, signature_key::SignatureKey},
ExecutionType, HotShotConfig, ValidatorConfig,
ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig,
};
use serde_inline_default::serde_inline_default;
use std::{
Expand Down Expand Up @@ -208,27 +208,75 @@ impl<K: SignatureKey, E: ElectionConfig> NetworkConfig<K, E> {
// fallback to orchestrator
error!("{e}, falling back to orchestrator");

let config = client.get_config(client.identity.clone()).await;
let config = client
.get_config_without_peer(client.identity.clone())
.await;

// save to file if we fell back
if let Err(e) = config.to_file(file) {
error!("{e}");
};

(config, NetworkConfigSource::Orchestrator)
(config, NetworkConfigSource::File)
}
}
} else {
error!("Retrieving config from the orchestrator");

// otherwise just get from orchestrator
(
client.get_config(client.identity.clone()).await,
client
.get_config_without_peer(client.identity.clone())
.await,
NetworkConfigSource::Orchestrator,
)
}
}

/// Get a temporary node index for generating a validator config
pub async fn generate_init_validator_config(client: &OrchestratorClient) -> ValidatorConfig<K> {
// This cur_node_index is only used for key pair generation, it's not bound with the node,
// lather the node with the generated key pair will get a new node_index from orchestrator.
let cur_node_index = client.get_node_index_for_init_validator_config().await;
ValidatorConfig::generated_from_seed_indexed([0u8; 32], cur_node_index.into(), 1)
}

/// Asynchronously retrieves a `NetworkConfig` from an orchestrator.
/// The retrieved one includes correct `node_index` and peer's public config.
pub async fn get_complete_config(
client: &OrchestratorClient,
my_own_validator_config: ValidatorConfig<K>,
file: Option<String>,
) -> (NetworkConfig<K, E>, NetworkConfigSource) {
let (mut run_config, source) = Self::from_file_or_orchestrator(client, file).await;
let node_index = run_config.node_index;

// Assign my_own_validator_config to the run_config if not loading from file
match source {
NetworkConfigSource::Orchestrator => {
run_config.config.my_own_validator_config = my_own_validator_config;
}
NetworkConfigSource::File => {
// do nothing, my_own_validator_config has already been loaded from file
}
}

// one more round of orchestrator here to get peer's public key/config
let updated_config: NetworkConfig<K, E> = client
.post_and_wait_all_public_keys::<K, E>(
run_config.node_index,
run_config
.config
.my_own_validator_config
.get_public_config(),
)
.await;
run_config.config.known_nodes_with_stake = updated_config.config.known_nodes_with_stake;

error!("Retrieved config; our node index is {node_index}");
(run_config, source)
}

/// Loads a `NetworkConfig` from a file.
///
/// This function takes a file path as a string, reads the file, and then deserializes the contents into a `NetworkConfig`.
Expand Down Expand Up @@ -436,7 +484,7 @@ pub struct HotShotConfigFile<KEY: SignatureKey> {
pub my_own_validator_config: ValidatorConfig<KEY>,
#[serde(skip)]
/// The known nodes' public key and stake value
pub known_nodes_with_stake: Vec<KEY::StakeTableEntry>,
pub known_nodes_with_stake: Vec<PeerConfig<KEY>>,
/// Number of committee nodes
pub committee_nodes: usize,
/// Maximum transactions per block
Expand Down Expand Up @@ -561,10 +609,7 @@ impl<KEY: SignatureKey> Default for HotShotConfigFile<KEY> {
.map(|node_id| {
let cur_validator_config: ValidatorConfig<KEY> =
ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1);

cur_validator_config
.public_key
.get_stake_table_entry(cur_validator_config.stake_value)
cur_validator_config.get_public_config()
})
.collect();
Self {
Expand Down
36 changes: 31 additions & 5 deletions crates/orchestrator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@ pub mod client;
pub mod config;

use async_lock::RwLock;
use hotshot_types::traits::{election::ElectionConfig, signature_key::SignatureKey};
use hotshot_types::{
traits::{election::ElectionConfig, signature_key::SignatureKey},
PeerConfig,
};
use std::{
collections::HashSet,
io,
Expand Down Expand Up @@ -48,6 +51,8 @@ pub fn libp2p_generate_indexed_identity(seed: [u8; 32], index: u64) -> Keypair {
struct OrchestratorState<KEY: SignatureKey, ELECTION: ElectionConfig> {
/// Tracks the latest node index we have generated a configuration for
latest_index: u16,
/// Tracks the latest temporary index we have generated for init validator's key pair
tmp_latest_index: u16,
/// The network configuration
config: NetworkConfig<KEY, ELECTION>,
/// The total nodes that have posted their public keys
Expand All @@ -70,6 +75,7 @@ impl<KEY: SignatureKey + 'static, ELECTION: ElectionConfig + 'static>
pub fn new(network_config: NetworkConfig<KEY, ELECTION>) -> Self {
OrchestratorState {
latest_index: 0,
tmp_latest_index: 0,
config: network_config,
nodes_with_pubkey: 0,
peer_pub_ready: false,
Expand All @@ -93,6 +99,10 @@ pub trait OrchestratorApi<KEY: SignatureKey, ELECTION: ElectionConfig> {
&mut self,
_node_index: u16,
) -> Result<NetworkConfig<KEY, ELECTION>, ServerError>;
/// get endpoint for the next available temporary node index
/// # Errors
/// if unable to serve
fn get_tmp_node_index(&mut self) -> Result<u16, ServerError>;
/// post endpoint for each node's public key
/// # Errors
/// if unable to serve
Expand Down Expand Up @@ -181,6 +191,21 @@ where
Ok(self.config.clone())
}

// Assumes one node do not get twice
fn get_tmp_node_index(&mut self) -> Result<u16, ServerError> {
let tmp_node_index = self.tmp_latest_index;
self.tmp_latest_index += 1;

if usize::from(tmp_node_index) >= self.config.config.total_nodes.get() {
return Err(ServerError {
status: tide_disco::StatusCode::BadRequest,
message: "Node index getter for key pair generation has reached capacity"
.to_string(),
});
}
Ok(tmp_node_index)
}

#[allow(clippy::cast_possible_truncation)]
fn register_public_key(
&mut self,
Expand All @@ -195,11 +220,9 @@ where
}
self.pub_posted.insert(node_index);

// Sishan NOTE: let me know if there's a better way to remove the first extra 8 bytes
// The guess is extra bytes are from orchestrator serialization
// The guess is extra 8 starting bytes are from orchestrator serialization
pubkey.drain(..8);
let register_pub_key = <KEY as SignatureKey>::from_bytes(pubkey).unwrap();
let register_pub_key_with_stake = register_pub_key.get_stake_table_entry(1u64);
let register_pub_key_with_stake = PeerConfig::<KEY>::from_bytes(pubkey).unwrap();
self.config.config.known_nodes_with_stake[node_index as usize] =
register_pub_key_with_stake;
self.nodes_with_pubkey += 1;
Expand Down Expand Up @@ -295,6 +318,9 @@ where
}
.boxed()
})?
.post("tmp_node_index", |_req, state| {
async move { state.get_tmp_node_index() }.boxed()
})?
.post("postpubkey", |req, state| {
async move {
let node_index = req.integer_param("node_index")?;
Expand Down
7 changes: 2 additions & 5 deletions crates/testing/src/test_builder.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use hotshot::{traits::NetworkReliability, types::SignatureKey};
use hotshot::traits::NetworkReliability;
use hotshot_orchestrator::config::ValidatorConfigFile;
use hotshot_types::traits::election::Membership;
use std::{num::NonZeroUsize, sync::Arc, time::Duration};
Expand Down Expand Up @@ -233,10 +233,7 @@ impl TestMetadata {
.map(|node_id_| {
let cur_validator_config: ValidatorConfig<TYPES::SignatureKey> =
ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id_ as u64, 1);

cur_validator_config
.public_key
.get_stake_table_entry(cur_validator_config.stake_value)
cur_validator_config.get_public_config()
})
.collect();
// But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0.
Expand Down
Loading

0 comments on commit a3544a7

Please sign in to comment.