Skip to content

Commit

Permalink
Merge branch 'main' into vmarkushin/packets-query
Browse files Browse the repository at this point in the history
# Conflicts:
#	hyperspace/cosmos/src/client.rs
#	light-clients/ics10-grandpa-cw/src/contract.rs
#	light-clients/ics10-grandpa-cw/src/msg.rs
  • Loading branch information
vmarkushin committed Aug 8, 2024
2 parents b0a9957 + cb1d5f0 commit 9d39c9b
Show file tree
Hide file tree
Showing 12 changed files with 364 additions and 194 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ name: Cargo Check
on:
pull_request:
branches:
- 'master'
- 'main'
- 'develop'
- 'release*'
push:
branches:
- 'master'
- 'main'
- 'develop'
- 'release*'

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/hyperspace-docker-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: "Build and publish Hyperspace Docker image"
on:
push:
branches:
- 'master'
- 'main'
- 'develop'
- 'release*'
tags:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ name: Lint
on:
pull_request:
branches:
- 'master'
- 'main'
- 'develop'
- 'release*'
push:
branches:
- 'master'
- 'main'
- 'develop'
- 'release*'

Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/parachain-node-docker-image.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This workflow pushes new parachain-node docker images on every new push on master.
# This workflow pushes new parachain-node docker images on every new push on main.
#
# All the images above have support for linux/amd64 and linux/arm64.
#
Expand All @@ -9,7 +9,7 @@ name: "Build and publish Parachain-node Docker image"
on:
push:
branches:
- master
- main

jobs:
build-and-publish:
Expand Down
17 changes: 17 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
name: Release

on:
push:
tags:
- "v*.*.*"

jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release
uses: softprops/action-gh-release@v2
with:
generate_release_notes: true
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: Test
on:
push:
branches:
- 'master'
- 'main'
- 'release*'

env:
Expand Down
14 changes: 8 additions & 6 deletions hyperspace/cosmos/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,15 +253,15 @@ where
pub async fn new(config: CosmosClientConfig) -> Result<Self, Error> {
let (rpc_client, rpc_driver) = WebSocketClient::new(config.websocket_url.clone())
.await
.map_err(|e| Error::RpcError(format!("failed to connect to WS: {:?}", e)))?;
.map_err(|e| Error::RpcError(format!("failed to connect to Websocket {:?}", e)))?;
let rpc_http_client = HttpClient::new(config.rpc_url.clone())
.map_err(|e| Error::RpcError(format!("failed to connect to RPC: {:?}", e)))?;
.map_err(|e| Error::RpcError(format!("failed to connect to RPC {:?}", e)))?;
let ws_driver_jh = tokio::spawn(rpc_driver.run());
let grpc_client = tonic::transport::Endpoint::new(config.grpc_url.to_string())
.map_err(|e| Error::RpcError(format!("failed to connect to GRPC: {:?}", e)))?
.map_err(|e| Error::RpcError(format!("failed to create a GRPC endpoint {:?}", e)))?
.connect()
.await
.map_err(|e| Error::RpcError(format!("failed to connect to GRPC: {:?}", e)))?;
.map_err(|e| Error::RpcError(format!("failed to connect to GRPC {:?}", e)))?;

let chain_id = ChainId::from(config.chain_id);
let light_client =
Expand Down Expand Up @@ -401,15 +401,17 @@ where
to: TmHeight,
trusted_height: Height,
) -> Result<Vec<(Header, UpdateType)>, Error> {
let from = from.increment();
let mut xs = Vec::new();
let heightss = (from.value()..=to.value()).collect::<Vec<_>>();
let client = Arc::new(self.clone());
let to = self.rpc_call_delay().as_millis();
let delay_to = self.rpc_call_delay().as_millis();
for heights in heightss.chunks(5) {
let mut join_set = JoinSet::<Result<Result<_, Error>, Elapsed>>::new();
for height in heights.to_owned() {
let client = client.clone();
let duration = Duration::from_millis(rand::thread_rng().gen_range(0..to) as u64);
let duration =
Duration::from_millis(rand::thread_rng().gen_range(0..delay_to) as u64);
let fut = async move {
log::trace!(target: "hyperspace_cosmos", "Fetching header at height {:?}", height);
let latest_light_block =
Expand Down
116 changes: 68 additions & 48 deletions hyperspace/cosmos/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,13 @@ use ibc::{
identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId},
path::{
AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath,
CommitmentsPath, ConnectionsPath, Path, ReceiptsPath, SeqRecvsPath,
CommitmentsPath, ConnectionsPath, Path, ReceiptsPath, SeqRecvsPath, SeqSendsPath,
},
},
},
events::IbcEvent,
protobuf::Protobuf,
signer::Signer,
timestamp::Timestamp,
tx_msg::Msg,
Height,
Expand Down Expand Up @@ -82,7 +83,9 @@ use tendermint_rpc::{
};
use tokio::{task::JoinSet, time::sleep};

pub const NUMBER_OF_BLOCKS_TO_PROCESS_PER_ITER: u64 = 250;
// At least one *mandatory* update should happen during that period
// TODO: make it configurable
pub const NUMBER_OF_BLOCKS_TO_PROCESS_PER_ITER: u64 = 500;

#[derive(Clone, Debug)]
pub enum FinalityEvent {
Expand Down Expand Up @@ -141,7 +144,6 @@ where
let update_headers =
self.msg_update_client_header(from, to, client_state.latest_height).await?;
let mut block_events = Vec::new();
block_events.push((0, Vec::new()));
let mut join_set: JoinSet<Result<_, anyhow::Error>> = JoinSet::new();
let range = (from.value()..to.value()).collect::<Vec<_>>();
let to = self.rpc_call_delay().as_millis();
Expand Down Expand Up @@ -177,9 +179,15 @@ where
block_events.sort_by_key(|(height, _)| *height);

let mut updates = Vec::new();
for (events, (update_header, update_type)) in
block_events.into_iter().map(|(_, events)| events).zip(update_headers)
for (i, (events, (update_header, mut update_type))) in block_events
.into_iter()
.map(|(_, events)| events)
.zip(update_headers)
.enumerate()
{
if i == NUMBER_OF_BLOCKS_TO_PROCESS_PER_ITER as usize - 1 {
update_type = UpdateType::Mandatory;
}
let height = update_header.height();
let update_client_header = {
let msg = MsgUpdateAnyClient::<LocalClientTypes> {
Expand Down Expand Up @@ -231,17 +239,17 @@ where
let Event { data, events: _, query } = event.unwrap();
match data {
EventData::NewBlock { block, .. }
if query == Query::from(EventType::NewBlock).to_string() =>
{
let height = Height::new(
ChainId::chain_version(chain_id.to_string().as_str()),
u64::from(block.as_ref().ok_or("tx.height").unwrap().header.height),
);
events_with_height.push(IbcEventWithHeight::new(
ClientEvents::NewBlock::new(height).into(),
height,
));
},
if query == Query::from(EventType::NewBlock).to_string() =>
{
let height = Height::new(
ChainId::chain_version(chain_id.to_string().as_str()),
u64::from(block.as_ref().ok_or("tx.height").unwrap().header.height),
);
events_with_height.push(IbcEventWithHeight::new(
ClientEvents::NewBlock::new(height).into(),
height,
));
},
EventData::Tx { tx_result } => {
let height = Height::new(
ChainId::chain_version(chain_id.to_string().as_str()),
Expand All @@ -265,10 +273,10 @@ where
events_with_height
.push(IbcEventWithHeight::new(ibc_event, height));
} else {
log::debug!(target: "hyperspace_cosmos", "The event is unknown");
log::debug!(target: "hyperspace_cosmos", "the event is unknown");
}
} else {
log::debug!(target: "hyperspace_cosmos", "Failed to parse event {:?}", abci_event);
log::debug!(target: "hyperspace_cosmos", "Event wasn't parsed {:?}", abci_event);
}
}
},
Expand Down Expand Up @@ -313,6 +321,9 @@ where
Path::ClientState(ClientStatePath(client_id.clone())).to_string().into_bytes();
let (q, proof) = self.query_path(path_bytes.clone(), at, true).await?;
let client_state = Any::decode(&*q.value)?;
if client_state.type_url.is_empty() || client_state.value.is_empty() {
return Err(Error::Custom(format!("empty client state for height {at}")))
}
Ok(QueryClientStateResponse {
client_state: Some(client_state),
proof,
Expand Down Expand Up @@ -513,7 +524,6 @@ where

let commitment_sequences: Vec<u64> =
response.commitments.into_iter().map(|v| v.sequence).collect();

Ok(commitment_sequences)
}

Expand Down Expand Up @@ -807,52 +817,62 @@ where

fn expected_block_time(&self) -> Duration {
// cosmos chain block time is roughly 6-7 seconds
Duration::from_secs(7)
Duration::from_secs(5)
}

async fn query_client_update_time_and_height(
&self,
client_id: ClientId,
client_height: Height,
) -> Result<(Height, Timestamp), Self::Error> {
log::trace!(
log::debug!(
target: "hyperspace_cosmos",
"Querying client update time and height for client {:?} at height {:?}",
client_id,
client_height
);
let query_str = Query::eq("update_client.client_id", client_id.to_string())
let query_update = Query::eq("update_client.client_id", client_id.to_string())
.and_eq("update_client.consensus_height", client_height.to_string());
let query_create = Query::eq("create_client.client_id", client_id.to_string())
.and_eq("create_client.consensus_height", client_height.to_string());
for query_str in [query_update, query_create] {
let response = self
.rpc_http_client
.tx_search(
query_str,
true,
1,
1, // get only the first Tx matching the query
Order::Ascending,
)
.await
.map_err(|e| Error::RpcError(format!("{e:?}")))?;

let response = self
.rpc_http_client
.tx_search(
query_str,
true,
1,
1, // get only the first Tx matching the query
Order::Ascending,
)
.await
.map_err(|e| Error::RpcError(format!("{e:?}")))?;

for tx in response.txs {
for ev in &tx.tx_result.events {
let height = tx.height.value();
let ev =
ibc_event_try_from_abci_event(ev, Height::new(self.id().version(), height));
let timestamp = self.query_timestamp_at(height).await?;
match ev {
Ok(IbcEvent::UpdateClient(e)) if e.client_id() == &client_id =>
return Ok((
Height::new(self.chain_id.version(), height),
Timestamp::from_nanoseconds(timestamp)?,
)),
_ => (),
for tx in response.txs {
for ev in &tx.tx_result.events {
let height = tx.height.value();
let ev =
ibc_event_try_from_abci_event(ev, Height::new(self.id().version(), height));
let timestamp = self
.query_timestamp_at(height)
.await
.map_err(|e| Error::RpcError(format!("{e:?}")))?;
match ev {
Ok(IbcEvent::UpdateClient(e)) if e.client_id() == &client_id =>
return Ok((
Height::new(self.chain_id.version(), height),
Timestamp::from_nanoseconds(timestamp)?,
)),
Ok(IbcEvent::CreateClient(e)) if e.client_id() == &client_id =>
return Ok((
Height::new(self.chain_id.version(), height),
Timestamp::from_nanoseconds(timestamp)?,
)),
_ => (),
}
}
}
}

Err(Error::from("not found".to_string()))
}

Expand Down
3 changes: 3 additions & 0 deletions hyperspace/metrics/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,9 @@ impl MetricsHandler {
_ => (),
}
}
if new_latest_processed_height == 0 {
return Ok(())
}
self.metrics.update_latest_processed_height(new_latest_processed_height)?;
Ok(())
}
Expand Down
28 changes: 8 additions & 20 deletions light-clients/ics10-grandpa-cw/src/contract.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@ use crate::{
error::ContractError,
log,
msg::{
CheckForMisbehaviourMsg, ContractResult, ExportMetadataMsg, QueryMsg, QueryResponse,
StatusMsg, SudoMsg, UpdateStateMsg, UpdateStateOnMisbehaviourMsg, VerifyClientMessage,
VerifyMembershipMsg, VerifyNonMembershipMsg, VerifyUpgradeAndUpdateStateMsg,
CheckForMisbehaviourMsg, CheckSubstituteAndUpdateStateMsg, ContractResult, ExecuteMsg,
ExportMetadataMsg, InstantiateMsg, QueryMsg, QueryResponse, StatusMsg, UpdateStateMsg,
UpdateStateOnMisbehaviourMsg, VerifyClientMessage, VerifyMembershipMsg, MigrateMsg,
VerifyNonMembershipMsg, VerifyUpgradeAndUpdateStateMsg
},
state::{get_client_state, get_consensus_state},
Bytes,
Expand Down Expand Up @@ -117,23 +118,10 @@ impl grandpa_light_client_primitives::HostFunctions for HostFunctions {
}
}

fn process_instantiate_msg(
msg: InstantiateMessage,
ctx: &mut Context<HostFunctions>,
client_id: ClientId,
) -> Result<Binary, ContractError> {
let any = Any::decode(&mut msg.client_state.as_slice())?;
let client_state = ClientState::decode_vec(&any.value)?;
let any = Any::decode(&mut msg.consensus_state.as_slice())?;
let consensus_state = ConsensusState::decode_vec(&any.value)?;

let height = client_state.latest_height();
ctx.checksum = Some(msg.checksum);
ctx.store_client_state(client_id.clone(), client_state)
.map_err(|e| ContractError::Grandpa(e.to_string()))?;
ctx.store_consensus_state(client_id, height, consensus_state)
.map_err(|e| ContractError::Grandpa(e.to_string()))?;
Ok(to_binary(&ContractResult::success())?)
#[entry_point]
pub fn migrate(_deps: DepsMut, _env: Env, _msg: MigrateMsg) -> Result<Response, ContractError> {
// No state migrations performed, just returned a Response
Ok(Response::default())
}

#[cfg_attr(not(feature = "library"), entry_point)]
Expand Down
3 changes: 3 additions & 0 deletions light-clients/ics10-grandpa-cw/src/msg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,9 @@ pub struct ClientStateCallResponse {
pub result: ContractResult,
}

#[cw_serde]
pub struct MigrateMsg {}

#[cw_serde]
pub struct InitializeState {
pub client_state: WasmClientState<FakeInner, FakeInner, FakeInner>,
Expand Down
Loading

0 comments on commit 9d39c9b

Please sign in to comment.