Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
beta backports (#10576)
Browse files Browse the repository at this point in the history
* Reject crazy timestamps instead of truncating.

* fix(light cull): poll light cull instead of timer (#10559)

* fix(light cull): poll light cull instead of timer

* fix(grumbles): remove error + updated docs

* fix(on-demand request): `expect()` reason

* docs(remove misleading info)
  • Loading branch information
soc1c authored Apr 8, 2019
1 parent 3c85f29 commit b52ac20
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 126 deletions.
14 changes: 12 additions & 2 deletions ethcore/types/src/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

//! Block header.
use std::cmp;
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak};
use heapsize::HeapSizeOf;
use ethereum_types::{H256, U256, Address, Bloom};
Expand Down Expand Up @@ -342,7 +341,7 @@ impl Decodable for Header {
number: r.val_at(8)?,
gas_limit: r.val_at(9)?,
gas_used: r.val_at(10)?,
timestamp: cmp::min(r.val_at::<U256>(11)?, u64::max_value().into()).as_u64(),
timestamp: r.val_at(11)?,
extra_data: r.val_at(12)?,
seal: vec![],
hash: keccak(r.as_raw()).into(),
Expand Down Expand Up @@ -412,4 +411,15 @@ mod tests {

assert_eq!(header_rlp, encoded_header);
}

#[test]
fn reject_header_with_large_timestamp() {
// that's rlp of block header created with ethash engine.
// The encoding contains a large timestamp (295147905179352825856)
let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d891000000000000000000080a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap();

// This should fail decoding timestamp
let header: Result<Header, _> = rlp::decode(&header_rlp);
assert_eq!(header.unwrap_err(), rlp::DecoderError::RlpIsTooBig);
}
}
2 changes: 0 additions & 2 deletions parity/light_helpers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,5 @@
//! Utilities and helpers for the light client.
mod epoch_fetch;
mod queue_cull;

pub use self::epoch_fetch::EpochFetch;
pub use self::queue_cull::QueueCull;
105 changes: 0 additions & 105 deletions parity/light_helpers/queue_cull.rs

This file was deleted.

11 changes: 0 additions & 11 deletions parity/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,17 +295,6 @@ fn execute_light_impl<Cr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq
// spin up event loop
let runtime = Runtime::with_default_thread_count();

// queue cull service.
let queue_cull = Arc::new(::light_helpers::QueueCull {
client: client.clone(),
sync: light_sync.clone(),
on_demand: on_demand.clone(),
txq: txq.clone(),
executor: runtime.executor(),
});

service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?;

// start the network.
light_sync.start_network();

Expand Down
44 changes: 42 additions & 2 deletions rpc/src/v1/helpers/light_fetch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@

//! Helpers for fetching blockchain data either from the light client or the network.
use std::cmp;
use std::clone::Clone;
use std::cmp;
use std::collections::BTreeMap;
use std::sync::Arc;

use types::basic_account::BasicAccount;
Expand Down Expand Up @@ -48,7 +49,6 @@ use ethereum_types::{Address, U256};
use hash::H256;
use parking_lot::{Mutex, RwLock};
use fastmap::H256FastMap;
use std::collections::BTreeMap;
use types::transaction::{Action, Transaction as EthTransaction, PendingTransaction, SignedTransaction, LocalizedTransaction};

use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch};
Expand Down Expand Up @@ -523,6 +523,46 @@ where
}))
}

/// Helper to cull the `light` transaction queue of mined transactions
pub fn light_cull(&self, txq: Arc<RwLock<TransactionQueue>>) -> impl Future <Item = (), Error = Error> + Send {
let senders = txq.read().queued_senders();
if senders.is_empty() {
return Either::B(future::err(errors::internal("No pending local transactions", "")));
}

let sync = self.sync.clone();
let on_demand = self.on_demand.clone();
let best_header = self.client.best_block_header();
let start_nonce = self.client.engine().account_start_nonce(best_header.number());

let account_request = sync.with_context(move |ctx| {
// fetch the nonce of each sender in the queue.
let nonce_reqs = senders.iter()
.map(|&address| request::Account { header: best_header.clone().into(), address })
.collect::<Vec<_>>();

// when they come in, update each sender to the new nonce.
on_demand.request(ctx, nonce_reqs)
.expect(NO_INVALID_BACK_REFS_PROOF)
.map(move |accs| {
let mut txq = txq.write();
accs.into_iter()
.map(|maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce))
.zip(senders)
.for_each(|(nonce, addr)| {
txq.cull(addr, nonce);
});
})
.map_err(errors::on_demand_error)
});

if let Some(fut) = account_request {
Either::A(fut)
} else {
Either::B(future::err(errors::network_disabled()))
}
}

fn send_requests<T, F>(&self, reqs: Vec<OnDemandRequest>, parse_response: F) -> impl Future<Item = T, Error = Error> + Send where
F: FnOnce(Vec<OnDemandResponse>) -> T + Send + 'static,
T: Send + 'static,
Expand Down
15 changes: 11 additions & 4 deletions rpc/src/v1/impls/light/eth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,15 +420,22 @@ where
}

fn transaction_by_hash(&self, hash: H256) -> BoxFuture<Option<Transaction>> {
{
let tx_queue = self.transaction_queue.read();
if let Some(tx) = tx_queue.get(&hash) {
let in_txqueue = self.transaction_queue.read().get(&hash).is_some();

// The transaction is in the `local txqueue` then fetch the latest state from the network and attempt
// to cull the transaction queue.
if in_txqueue {
// Note, this will block (relies on HTTP timeout) to make sure `cull` will finish to avoid having to call
// `eth_getTransactionByHash` more than once to ensure the `txqueue` is up to `date` when it is called
if let Err(e) = self.fetcher().light_cull(self.transaction_queue.clone()).wait() {
debug!(target: "cull", "failed because of: {:?}", e);
}
if let Some(tx) = self.transaction_queue.read().get(&hash) {
return Box::new(future::ok(Some(Transaction::from_pending(
tx.clone(),
))));
}
}

Box::new(self.fetcher().transaction_by_hash(hash).map(|x| x.map(|(tx, _)| tx)))
}

Expand Down

0 comments on commit b52ac20

Please sign in to comment.