Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
[stable] Backports (#8557)
Browse files Browse the repository at this point in the history
* Update wasmi and pwasm-utils (#8493)

* Update wasmi to 0.2

New wasmi supports 32bit platforms and no longer requires a special feature to build for such platforms.

* Update pwasm-utils to 0.1.5

* Fetching logs by hash in blockchain database (#8463)

* Fetch logs by hash in blockchain database

* Fix tests

* Add unit test for branch block logs fetching

* Add docs that blocks must already be sorted

* Handle branch block cases properly

* typo: empty -> is_empty

* Remove return_empty_if_none by using a closure

* Use BTreeSet to avoid sorting again

* Move is_canon to BlockChain

* typo: pass value by reference

* Use loop and wrap inside blocks to simplify the code

Borrowed from #8463 (comment)

* typo: missed a comment

* Pass on storage keys tracing to handle the case when it is not modified (#8491)

* Pass on storage keys even if it is not modified

* typo: account and storage query

`to_pod_diff` builds both `touched_addresses` merge and storage keys merge.

* Fix tests

* Use state query directly because of suicided accounts

* Fix a RefCell borrow issue

* Add tests for unmodified storage trace

* Address grumbles

* typo: remove unwanted empty line

* ensure_cached compiles with the original signature

* Enable WebAssembly and Byzantium for Ellaism (#8520)

* Enable WebAssembly and Byzantium for Ellaism

* Fix indentation

* Remove empty lines

* Fix compilation.
  • Loading branch information
tomusdrw authored and 5chdn committed May 7, 2018
1 parent d9f6aba commit b9ceda3
Show file tree
Hide file tree
Showing 7 changed files with 244 additions and 62 deletions.
19 changes: 13 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 13 additions & 4 deletions ethcore/res/ethereum/ellaism.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@
"eip150Transition": "0x0",
"eip160Transition": "0x0",
"ecip1017EraRounds": 10000000,

"eip161abcTransition": "0x7fffffffffffffff",
"eip161dTransition": "0x7fffffffffffffff"
"eip161dTransition": "0x7fffffffffffffff",
"eip100bTransition": 2000000
}
}
},
Expand All @@ -29,7 +29,12 @@
"chainID": "0x40",
"eip155Transition": "0x0",
"eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
"eip86Transition": "0x7fffffffffffff",
"wasmActivationTransition": 2000000,
"eip140Transition": 2000000,
"eip211Transition": 2000000,
"eip214Transition": 2000000,
"eip658Transition": 2000000
},
"genesis": {
"seal": {
Expand Down Expand Up @@ -67,6 +72,10 @@
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 2000000, "pricing": { "modexp": { "divisor": 20 } } } },
"0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 2000000, "pricing": { "linear": { "base": 500, "word": 0 } } } },
"0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 2000000, "pricing": { "linear": { "base": 40000, "word": 0 } } } },
"0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 2000000, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } }
}
}
60 changes: 53 additions & 7 deletions ethcore/src/blockchain/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ pub trait BlockProvider {
/// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool;

/// Returns true if the given block is known and in the canon chain.
fn is_canon(&self, hash: &H256) -> bool {
let is_canon = || Some(hash == &self.block_hash(self.block_number(hash)?)?);
is_canon().unwrap_or(false)
}

/// Get the first block of the best part of the chain.
/// Return `None` if there is no gap and the first block is the genesis.
/// Any queries of blocks which precede this one are not guaranteed to
Expand Down Expand Up @@ -153,7 +159,7 @@ pub trait BlockProvider {
fn blocks_with_bloom(&self, bloom: &Bloom, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;

/// Returns logs matching given filter.
fn logs<F>(&self, blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
fn logs<F>(&self, blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized;
}

Expand Down Expand Up @@ -360,16 +366,18 @@ impl BlockProvider for BlockChain {
.collect()
}

fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
/// Returns logs matching given filter. The order of logs returned will be the same as the order of the blocks
/// provided. And it's the callers responsibility to sort blocks provided in advance.
fn logs<F>(&self, mut blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized {
// sort in reverse order
blocks.sort_by(|a, b| b.cmp(a));
blocks.reverse();

let mut logs = blocks
.chunks(128)
.flat_map(move |blocks_chunk| {
blocks_chunk.into_par_iter()
.filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash)))
.filter_map(|hash| self.block_number(&hash).map(|r| (r, hash)))
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes())))
.flat_map(|(number, hash, mut receipts, mut hashes)| {
Expand All @@ -396,7 +404,7 @@ impl BlockProvider for BlockChain {
.enumerate()
.map(move |(i, log)| LocalizedLogEntry {
entry: log,
block_hash: hash,
block_hash: *hash,
block_number: number,
transaction_hash: tx_hash,
// iterating in reverse order
Expand Down Expand Up @@ -1957,17 +1965,33 @@ mod tests {
value: 103.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&secret(), None);
let t4 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 104.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&secret(), None);
let tx_hash1 = t1.hash();
let tx_hash2 = t2.hash();
let tx_hash3 = t3.hash();
let tx_hash4 = t4.hash();

let genesis = BlockBuilder::genesis();
let b1 = genesis.add_block_with_transactions(vec![t1, t2]);
let b2 = b1.add_block_with_transactions(iter::once(t3));
let b3 = genesis.add_block_with(|| BlockOptions {
transactions: vec![t4.clone()],
difficulty: U256::from(9),
..Default::default()
}); // Branch block
let b1_hash = b1.last().hash();
let b1_number = b1.last().number();
let b2_hash = b2.last().hash();
let b2_number = b2.last().number();
let b3_hash = b3.last().hash();
let b3_number = b3.last().number();

let db = new_db();
let bc = new_chain(&genesis.last().encoded(), db.clone());
Expand Down Expand Up @@ -1998,10 +2022,21 @@ mod tests {
],
}
]);
insert_block(&db, &bc, &b3.last().encoded(), vec![
Receipt {
outcome: TransactionOutcome::StateRoot(H256::default()),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![5], },
],
}
]);

// when
let logs1 = bc.logs(vec![1, 2], |_| true, None);
let logs2 = bc.logs(vec![1, 2], |_| true, Some(1));
let logs1 = bc.logs(vec![b1_hash, b2_hash], |_| true, None);
let logs2 = bc.logs(vec![b1_hash, b2_hash], |_| true, Some(1));
let logs3 = bc.logs(vec![b3_hash], |_| true, None);

// then
assert_eq!(logs1, vec![
Expand Down Expand Up @@ -2053,6 +2088,17 @@ mod tests {
log_index: 0,
}
]);
assert_eq!(logs3, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![5] },
block_hash: b3_hash,
block_number: b3_number,
transaction_hash: tx_hash4,
transaction_index: 0,
transaction_log_index: 0,
log_index: 0,
}
]);
}

#[test]
Expand Down
92 changes: 76 additions & 16 deletions ethcore/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.

use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
use std::collections::{HashSet, HashMap, BTreeMap, BTreeSet, VecDeque};
use std::str::FromStr;
use std::sync::{Arc, Weak};
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
Expand Down Expand Up @@ -1666,23 +1666,83 @@ impl BlockChainClient for Client {
}

fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
let (from, to) = match (self.block_number_ref(&filter.from_block), self.block_number_ref(&filter.to_block)) {
(Some(from), Some(to)) => (from, to),
_ => return Vec::new(),
};
// Wrap the logic inside a closure so that we can take advantage of question mark syntax.
let fetch_logs = || {
let chain = self.chain.read();

let chain = self.chain.read();
let blocks = filter.bloom_possibilities().iter()
.map(move |bloom| {
chain.blocks_with_bloom(bloom, from, to)
})
.flat_map(|m| m)
// remove duplicate elements
.collect::<HashSet<u64>>()
.into_iter()
.collect::<Vec<u64>>();
// First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the
// optimized version.
let is_canon = |id| {
match id {
&BlockId::Pending => true,
// If it is referred by number, then it is always on the canon chain.
&BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true,
// If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same
// result.
&BlockId::Hash(ref hash) => chain.is_canon(hash),
}
};

let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) {
// If we are on the canon chain, use bloom filter to fetch required hashes.
let from = self.block_number_ref(&filter.from_block)?;
let to = self.block_number_ref(&filter.to_block)?;

filter.bloom_possibilities().iter()
.map(|bloom| {
chain.blocks_with_bloom(bloom, from, to)
})
.flat_map(|m| m)
// remove duplicate elements
.collect::<BTreeSet<u64>>()
.into_iter()
.filter_map(|n| chain.block_hash(n))
.collect::<Vec<H256>>()

} else {
// Otherwise, we use a slower version that finds a link between from_block and to_block.
let from_hash = Self::block_hash(&chain, &*self.miner, filter.from_block)?;
let from_number = chain.block_number(&from_hash)?;
let to_hash = Self::block_hash(&chain, &*self.miner, filter.from_block)?;

let blooms = filter.bloom_possibilities();
let bloom_match = |header: &encoded::Header| {
blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom))
};

let (blocks, last_hash) = {
let mut blocks = Vec::new();
let mut current_hash = to_hash;

loop {
let header = chain.block_header_data(&current_hash)?;
if bloom_match(&header) {
blocks.push(current_hash);
}

// Stop if `from` block is reached.
if header.number() <= from_number {
break;
}
current_hash = header.parent_hash();
}

blocks.reverse();
(blocks, current_hash)
};

// Check if we've actually reached the expected `from` block.
if last_hash != from_hash || blocks.is_empty() {
return None;
}

blocks
};

Some(self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit))
};

self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit)
fetch_logs().unwrap_or_default()
}

fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
Expand Down
Loading

0 comments on commit b9ceda3

Please sign in to comment.