Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix bug in page skipping #2552

Merged
merged 4 commits into from
Sep 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions parquet/src/arrow/arrow_reader/selection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -595,5 +595,24 @@ mod tests {

// assert_eq!(mask, vec![false, true, true, false, true, true, true]);
assert_eq!(ranges, vec![10..20, 20..30, 40..50, 50..60, 60..70]);

let selection = RowSelection::from(vec![
// Skip first page
RowSelector::skip(10),
// Multiple selects in same page
RowSelector::select(3),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I run this test without this pr change, still passed🤔

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, this test is actually unrelated to the fix. I originally thought the bug was in the scan_ranges so I added this test. But it turned out to be unrelated and I figured I might as well leave another unit test in. Sorry for the confusion :)

RowSelector::skip(3),
RowSelector::select(4),
// Select to remaining in page and first row of next page
RowSelector::skip(5),
RowSelector::select(6),
// Skip remaining
RowSelector::skip(50),
]);

let ranges = selection.scan_ranges(&index);

// assert_eq!(mask, vec![false, true, true, false, true, true, true]);
assert_eq!(ranges, vec![10..20, 20..30, 30..40]);
}
}
124 changes: 124 additions & 0 deletions parquet/src/arrow/async_reader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -798,6 +798,7 @@ mod tests {
use arrow::array::{Array, ArrayRef, Int32Array, StringArray};
use arrow::error::Result as ArrowResult;
use futures::TryStreamExt;
use rand::{thread_rng, Rng};
use std::sync::Mutex;

struct TestReader {
Expand Down Expand Up @@ -936,6 +937,129 @@ mod tests {
assert_eq!(async_batches, sync_batches);
}

#[tokio::test]
async fn test_async_reader_skip_pages() {
let testdata = arrow::util::test_util::parquet_test_data();
let path = format!("{}/alltypes_tiny_pages_plain.parquet", testdata);
let data = Bytes::from(std::fs::read(path).unwrap());

let metadata = parse_metadata(&data).unwrap();
let metadata = Arc::new(metadata);

assert_eq!(metadata.num_row_groups(), 1);

let async_reader = TestReader {
data: data.clone(),
metadata: metadata.clone(),
requests: Default::default(),
};

let options = ArrowReaderOptions::new().with_page_index(true);
let builder =
ParquetRecordBatchStreamBuilder::new_with_options(async_reader, options)
.await
.unwrap();

let selection = RowSelection::from(vec![
RowSelector::skip(21), // Skip first page
RowSelector::select(21), // Select page to boundary
RowSelector::skip(41), // Skip multiple pages
RowSelector::select(41), // Select multiple pages
RowSelector::skip(25), // Skip page across boundary
RowSelector::select(25), // Select across page boundary
RowSelector::skip(7116), // Skip to final page boundary
RowSelector::select(10), // Select final page
]);

let mask = ProjectionMask::leaves(builder.parquet_schema(), vec![9]);

let stream = builder
.with_projection(mask.clone())
.with_row_selection(selection.clone())
.build()
.expect("building stream");

let async_batches: Vec<_> = stream.try_collect().await.unwrap();

let sync_batches = ParquetRecordBatchReaderBuilder::try_new(data)
.unwrap()
.with_projection(mask)
.with_batch_size(1024)
.with_row_selection(selection)
.build()
.unwrap()
.collect::<ArrowResult<Vec<_>>>()
.unwrap();

assert_eq!(async_batches, sync_batches);
}

#[tokio::test]
async fn test_fuzz_async_reader_selection() {
let testdata = arrow::util::test_util::parquet_test_data();
let path = format!("{}/alltypes_tiny_pages_plain.parquet", testdata);
let data = Bytes::from(std::fs::read(path).unwrap());

let metadata = parse_metadata(&data).unwrap();
let metadata = Arc::new(metadata);

assert_eq!(metadata.num_row_groups(), 1);

let mut rand = thread_rng();

for _ in 0..100 {
let mut expected_rows = 0;
let mut total_rows = 0;
let mut skip = false;
let mut selectors = vec![];

while total_rows < 7300 {
let row_count: usize = rand.gen_range(0..100);

let row_count = row_count.min(7300 - total_rows);

selectors.push(RowSelector { row_count, skip });

total_rows += row_count;
if !skip {
expected_rows += row_count;
}

skip = !skip;
}

let selection = RowSelection::from(selectors);

let async_reader = TestReader {
data: data.clone(),
metadata: metadata.clone(),
requests: Default::default(),
};

let options = ArrowReaderOptions::new().with_page_index(true);
let builder =
ParquetRecordBatchStreamBuilder::new_with_options(async_reader, options)
.await
.unwrap();

let col_idx: usize = rand.gen_range(0..13);
let mask = ProjectionMask::leaves(builder.parquet_schema(), vec![col_idx]);

let stream = builder
.with_projection(mask.clone())
.with_row_selection(selection.clone())
.build()
.expect("building stream");

let async_batches: Vec<_> = stream.try_collect().await.unwrap();

let actual_rows: usize =
async_batches.into_iter().map(|b| b.num_rows()).sum();

assert_eq!(actual_rows, expected_rows);
}
}

#[tokio::test]
async fn test_row_filter() {
let a = StringArray::from_iter_values(["a", "b", "b", "b", "c", "c"]);
Expand Down
12 changes: 9 additions & 3 deletions parquet/src/arrow/record_reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,11 @@ where
loop {
// Try to find some records from buffers that has been read into memory
// but not counted as seen records.
let end_of_column = !self.column_reader.as_mut().unwrap().has_next()?;

// Check to see if the column is exhausted. Only peek the next page since in
// case we are reading to a page boundary and do not actually need to read
// the next page.
let end_of_column = !self.column_reader.as_mut().unwrap().peek_next()?;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should ultimately get cleaned up a bit. It is confusing since we need to peek_next but also call has_next below (since the next page needs to get loaded). It feels like the page-level logic wants to be encapsulated inside GenericColumnReader but it inevitable leaks out in places like this.


let (record_count, value_count) =
self.count_records(num_records - records_read, end_of_column);
Expand All @@ -154,7 +158,9 @@ where
self.num_values += value_count;
records_read += record_count;

if records_read == num_records || end_of_column {
if records_read == num_records
|| !self.column_reader.as_mut().unwrap().has_next()?
{
break;
}

Expand Down Expand Up @@ -198,7 +204,7 @@ where
pub fn skip_records(&mut self, num_records: usize) -> Result<usize> {
// First need to clear the buffer
let end_of_column = match self.column_reader.as_mut() {
Some(reader) => !reader.has_next()?,
Some(reader) => !reader.peek_next()?,
None => return Ok(0),
};

Expand Down
42 changes: 41 additions & 1 deletion parquet/src/column/reader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ where

// If dictionary, we must read it
if metadata.is_dict {
self.read_new_page()?;
self.read_dictionary_page()?;
continue;
}

Expand Down Expand Up @@ -362,6 +362,24 @@ where
Ok(num_records - remaining)
}

/// Read the next page as a dictionary page. If the next page is not a dictionary page,
/// this will return an error.
fn read_dictionary_page(&mut self) -> Result<()> {
match self.page_reader.get_next_page()? {
Some(Page::DictionaryPage {
buf,
num_values,
encoding,
is_sorted,
}) => self
.values_decoder
.set_dict(buf, num_values, encoding, is_sorted),
_ => Err(ParquetError::General(
"Invalid page. Expecting dictionary page".to_string(),
)),
}
}

/// Reads a new page and set up the decoders for levels, values or dictionary.
/// Returns false if there's no page left.
fn read_new_page(&mut self) -> Result<bool> {
Expand Down Expand Up @@ -493,6 +511,28 @@ where
}
}

/// Check whether there is more data to read from this column,
/// If the current page is fully decoded, this will NOT load the next page
/// into the buffer
#[inline]
pub(crate) fn peek_next(&mut self) -> Result<bool> {
if self.num_buffered_values == 0
|| self.num_buffered_values == self.num_decoded_values
{
// TODO: should we return false if read_new_page() = true and
// num_buffered_values = 0?
match self.page_reader.peek_next_page()? {
Some(next_page) => Ok(next_page.num_rows != 0),
None => Ok(false),
}
} else {
Ok(true)
}
}

/// Check whether there is more data to read from this column,
/// If the current page is fully decoded, this will load the next page
/// (if it exists) into the buffer
#[inline]
pub(crate) fn has_next(&mut self) -> Result<bool> {
if self.num_buffered_values == 0
Expand Down