Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

deserialize using get_data_size(), which refers to blob.data()'s length, instead of using msg.meta.size, which refers to the entire blob's length #755

Merged
merged 3 commits into from
Jul 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/erasure.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE, BLOB_SIZE};
use packet::{BlobRecycler, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
use std::cmp;
use std::mem;
use std::result;
Expand Down Expand Up @@ -575,7 +575,7 @@ pub fn recover(
data_size,
locks[n].data()[0]
);
if data_size > BLOB_SIZE as u64 {
if data_size > BLOB_DATA_SIZE as u64 {
corrupt = true;
}
}
Expand Down
33 changes: 27 additions & 6 deletions src/ledger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
use bincode::{self, deserialize, serialize_into};
use entry::Entry;
use hash::Hash;
use packet::{self, SharedBlob, BLOB_SIZE};
use packet::{self, SharedBlob, BLOB_DATA_SIZE};
use rayon::prelude::*;
use std::collections::VecDeque;
use std::io::Cursor;
Expand Down Expand Up @@ -44,7 +44,7 @@ impl Block for [Entry] {
serialize_into(&mut out, &entry).expect("failed to serialize output");
out.position() as usize
};
assert!(pos < BLOB_SIZE);
assert!(pos < BLOB_DATA_SIZE);
blob.write().unwrap().set_size(pos);
q.push_back(blob);
}
Expand All @@ -57,7 +57,8 @@ pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::R
for blob in blobs {
let entry = {
let msg = blob.read().unwrap();
deserialize(&msg.data()[..msg.meta.size])
let msg_size = msg.get_size();
deserialize(&msg.data()[..msg_size])
};

match entry {
Expand Down Expand Up @@ -149,6 +150,7 @@ pub fn next_entries(
mod tests {
use super::*;
use bincode::serialized_size;
use chrono::prelude::*;
use entry::{next_entry, Entry};
use hash::hash;
use packet::{BlobRecycler, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
Expand All @@ -175,10 +177,29 @@ mod tests {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10_000];
let tx0 = Transaction::new_vote(
&keypair,
Vote {
version: 0,
contact_info_version: 1,
},
one,
1,
);
let tx1 = Transaction::new_timestamp(&keypair, Utc::now(), one);
//
// TODO: this magic number and the mix of transaction types
// is designed to fill up a Blob more or less exactly,
// to get near enough the the threshold that
// deserialization falls over if it uses the wrong size()
// parameter to index into blob.data()
//
// magic numbers -----------------+
// |
// V
let mut transactions = vec![tx0; 362];
transactions.extend(vec![tx1; 100]);
let entries = next_entries(&zero, 0, transactions);

let blob_recycler = BlobRecycler::default();
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
Expand Down
27 changes: 27 additions & 0 deletions src/packet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,12 @@ impl Default for Blob {
}
}

//#[derive(Debug)]
//pub enum BlobError {
// /// the Blob's meta and data are not self-consistent
// BadState,
//}

pub struct Recycler<T> {
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
}
Expand Down Expand Up @@ -373,6 +379,27 @@ impl Blob {
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_HEADER_SIZE..]
}
pub fn get_size(&self) -> usize {
let size = self.get_data_size().unwrap() as usize;

if self.meta.size == size {
size - BLOB_HEADER_SIZE
} else {
0
}

// TODO: return a Result<usize> instead of
// returning zero
//
//let size = self.get_data_size()? as usize;
//if self.meta.size == size {
// Ok(size - BLOB_HEADER_SIZE)
//} else {
// // these don't work...
// Err("bad state")
// // Err(BlobError::BadState)
//}
}
pub fn set_size(&mut self, size: usize) {
let new_size = size + BLOB_HEADER_SIZE;
self.meta.size = new_size;
Expand Down