Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Fixing clippy warnings #1660

Merged
merged 1 commit into from
Jul 19, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion ethcore/src/blockchain/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,8 @@ impl BlockChain {
return Some(hash);
}
}
return None;

None
}

/// Set the cache configuration.
Expand Down
20 changes: 10 additions & 10 deletions ethcore/src/ethereum/ethash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ pub struct EthashParams {
pub dao_hardfork_transition: u64,
/// DAO hard-fork refund contract address (C).
pub dao_hardfork_beneficiary: Address,
/// DAO hard-fork DAO accounts list (L)
/// DAO hard-fork DAO accounts list (L)
pub dao_hardfork_accounts: Vec<Address>,
}

Expand All @@ -55,11 +55,11 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
difficulty_bound_divisor: p.difficulty_bound_divisor.into(),
duration_limit: p.duration_limit.into(),
block_reward: p.block_reward.into(),
registrar: p.registrar.map(Into::into).unwrap_or(Address::new()),
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map(Into::into).unwrap_or(0),
dao_hardfork_transition: p.dao_hardfork_transition.map(Into::into).unwrap_or(0x7fffffffffffffff),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map(Into::into).unwrap_or(Address::new()),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or(vec![]).into_iter().map(Into::into).collect(),
registrar: p.registrar.map_or_else(Address::new, Into::into),
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map_or(0, Into::into),
dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
}
}
}
Expand Down Expand Up @@ -131,7 +131,7 @@ impl Engine for Ethash {
if header.number >= self.ethash_params.dao_hardfork_transition &&
header.number <= self.ethash_params.dao_hardfork_transition + 9 {
header.extra_data = b"dao-hard-fork"[..].to_owned();
}
}
header.note_dirty();
// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
}
Expand All @@ -141,7 +141,7 @@ impl Engine for Ethash {
// TODO: enable trigger function maybe?
// if block.fields().header.gas_limit <= 4_000_000.into() {
let mut state = block.fields_mut().state;
for child in self.ethash_params.dao_hardfork_accounts.iter() {
for child in &self.ethash_params.dao_hardfork_accounts {
let b = state.balance(child);
state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b);
}
Expand Down Expand Up @@ -199,8 +199,8 @@ impl Engine for Ethash {

if header.gas_limit > 0x7fffffffffffffffu64.into() {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit })));
}
}

Ok(())
}

Expand Down
2 changes: 1 addition & 1 deletion ethcore/src/migrations/state/v7.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
}
}

/// Version for ArchiveDB.
/// Version for `ArchiveDB`.
#[derive(Default)]
pub struct ArchiveV7(usize);

Expand Down
6 changes: 3 additions & 3 deletions ethcore/src/snapshot/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ pub fn take_snapshot(client: &BlockChainClient, mut path: PathBuf, state_db: &Ha

let mut manifest_file = try!(File::create(&path));

try!(manifest_file.write_all(&manifest_data.to_rlp()));
try!(manifest_file.write_all(&manifest_data.into_rlp()));

Ok(())
}
Expand Down Expand Up @@ -287,7 +287,7 @@ pub struct ManifestData {

impl ManifestData {
/// Encode the manifest data to rlp.
pub fn to_rlp(self) -> Bytes {
pub fn into_rlp(self) -> Bytes {
let mut stream = RlpStream::new_list(5);
stream.append(&self.state_hashes);
stream.append(&self.block_hashes);
Expand Down Expand Up @@ -414,4 +414,4 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu
*out = (hash, thin_rlp);
}
Ok(())
}
}
2 changes: 1 addition & 1 deletion parity/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ fn wait_for_exit(

// Wait for signal
let mutex = Mutex::new(());
let _ = exit.wait(&mut mutex.lock());
exit.wait(&mut mutex.lock());
info!("Finishing work, please wait...");
}

Expand Down
8 changes: 5 additions & 3 deletions parity/modules.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@ use std::sync::Arc;
use ethcore::client::{ChainNotify, BlockChainClient};
use ethcore;

pub type Modules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);

#[cfg(feature="ipc")]
pub fn sync(
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>)
-> Result<(Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>), ethcore::error::Error>
-> Result<Modules, ethcore::error::Error>
{
}

Expand All @@ -33,8 +35,8 @@ pub fn sync(
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>)
-> Result<(Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>), ethcore::error::Error>
-> Result<Modules, ethcore::error::Error>
{
let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(|e| ethcore::error::Error::Util(e)));
let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(ethcore::error::Error::Util));
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
}
1 change: 1 addition & 0 deletions util/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ macro_rules! map_into {

#[macro_export]
macro_rules! flush {
($arg:expr) => ($crate::flush($arg.into()));
($($arg:tt)*) => ($crate::flush(format!("{}", format_args!($($arg)*))));
}

Expand Down
2 changes: 1 addition & 1 deletion util/src/network/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ impl IoHandler<NetworkIoMessage> for HostHandler {
fn message(&self, _io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) {
if let NetworkIoMessage::NetworkStarted(ref public_url) = *message {
let mut url = self.public_url.write();
if url.as_ref().map(|uref| uref != public_url).unwrap_or(true) {
if url.as_ref().map_or(true, |uref| uref != public_url) {
info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(public_url.as_ref()));
}
*url = Some(public_url.to_owned());
Expand Down
13 changes: 7 additions & 6 deletions util/src/trie/triedbmut.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ impl Node {

// encode a node to RLP
// TODO: parallelize
fn to_rlp<F>(self, mut child_cb: F) -> ElasticArray1024<u8>
fn into_rlp<F>(self, mut child_cb: F) -> ElasticArray1024<u8>
where F: FnMut(NodeHandle, &mut RlpStream)
{
match self {
Expand Down Expand Up @@ -183,7 +183,7 @@ enum InsertAction {
}

impl InsertAction {
fn as_action(self) -> Action {
fn into_action(self) -> Action {
match self {
InsertAction::Replace(n) => Action::Replace(n),
InsertAction::Restore(n) => Action::Restore(n),
Expand Down Expand Up @@ -442,13 +442,14 @@ impl<'a> TrieDBMut<'a> {
};
let stored = self.storage.destroy(h);
let (new_stored, changed) = self.inspect(stored, move |trie, stored| {
trie.insert_inspector(stored, partial, value).as_action()
trie.insert_inspector(stored, partial, value).into_action()
}).expect("Insertion never deletes.");

(self.storage.alloc(new_stored), changed)
}

/// the insertion inspector.
#[cfg_attr(feature = "dev", allow(cyclomatic_complexity))]
fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: Bytes) -> InsertAction {
trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty());

Expand Down Expand Up @@ -819,7 +820,7 @@ impl<'a> TrieDBMut<'a> {

match self.storage.destroy(handle) {
Stored::New(node) => {
let root_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream));
let root_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream));
*self.root = self.db.insert(&root_rlp[..]);
self.hash_count += 1;

Expand All @@ -842,7 +843,7 @@ impl<'a> TrieDBMut<'a> {
NodeHandle::InMemory(h) => match self.storage.destroy(h) {
Stored::Cached(_, h) => stream.append(&h),
Stored::New(node) => {
let node_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream));
let node_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream));
if node_rlp.len() >= 32 {
let hash = self.db.insert(&node_rlp[..]);
self.hash_count += 1;
Expand Down Expand Up @@ -1257,4 +1258,4 @@ mod tests {
assert!(t.is_empty());
assert_eq!(*t.root(), SHA3_NULL_RLP);
}
}
}