Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Simplify file struct abstractions #1120

Merged
merged 2 commits into from
Oct 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions ballista/rust/core/proto/ballista.proto
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ message FilterExecNode {
PhysicalExprNode expr = 2;
}

message FilePartition {
message FileGroup {
repeated PartitionedFile files = 1;
}

Expand All @@ -606,7 +606,7 @@ message ScanLimit {
}

message ParquetScanExecNode {
repeated FilePartition partitions = 1;
repeated FileGroup file_groups = 1;
Schema schema = 2;
uint32 batch_size = 4;
repeated uint32 projection = 6;
Expand Down
43 changes: 22 additions & 21 deletions ballista/rust/core/src/serde/physical_plan/from_proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ use datafusion::catalog::catalog::{
};
use datafusion::datasource::object_store::local::LocalFileSystem;
use datafusion::datasource::object_store::{FileMeta, ObjectStoreRegistry, SizedFile};
use datafusion::datasource::{FilePartition, PartitionedFile};
use datafusion::datasource::PartitionedFile;
use datafusion::execution::context::{
ExecutionConfig, ExecutionContextState, ExecutionProps,
};
Expand Down Expand Up @@ -127,8 +127,8 @@ impl TryInto<Arc<dyn ExecutionPlan>> for &protobuf::PhysicalPlanNode {
Arc::new(LocalFileSystem {}),
scan.files
.iter()
.map(|f| f.try_into())
.collect::<Result<Vec<PartitionedFile>, _>>()?,
.map(|f| f.into())
.collect::<Vec<PartitionedFile>>(),
statistics,
schema,
scan.has_header,
Expand All @@ -145,13 +145,10 @@ impl TryInto<Arc<dyn ExecutionPlan>> for &protobuf::PhysicalPlanNode {

Ok(Arc::new(ParquetExec::new(
Arc::new(LocalFileSystem {}),
scan.partitions
scan.file_groups
.iter()
.map(|p| {
let it = p.files.iter().map(|f| f.try_into());
it.collect::<Result<Vec<PartitionedFile>, _>>()
})
.collect::<Result<Vec<Vec<PartitionedFile>>, _>>()?,
.map(|p| p.into())
.collect::<Vec<Vec<PartitionedFile>>>(),
statistics,
schema,
Some(projection),
Expand All @@ -170,8 +167,8 @@ impl TryInto<Arc<dyn ExecutionPlan>> for &protobuf::PhysicalPlanNode {
Arc::new(LocalFileSystem {}),
scan.files
.iter()
.map(|f| f.try_into())
.collect::<Result<Vec<PartitionedFile>, _>>()?,
.map(|f| f.into())
.collect::<Vec<PartitionedFile>>(),
statistics,
schema,
Some(projection),
Expand Down Expand Up @@ -741,23 +738,27 @@ pub fn parse_protobuf_hash_partitioning(
}
}

impl TryInto<PartitionedFile> for &protobuf::PartitionedFile {
type Error = BallistaError;

fn try_into(self) -> Result<PartitionedFile, Self::Error> {
Ok(PartitionedFile {
impl From<&protobuf::PartitionedFile> for PartitionedFile {
fn from(val: &protobuf::PartitionedFile) -> Self {
PartitionedFile {
file_meta: FileMeta {
sized_file: SizedFile {
path: self.path.clone(),
size: self.size,
path: val.path.clone(),
size: val.size,
},
last_modified: if self.last_modified_ns == 0 {
last_modified: if val.last_modified_ns == 0 {
None
} else {
Some(Utc.timestamp_nanos(self.last_modified_ns as i64))
Some(Utc.timestamp_nanos(val.last_modified_ns as i64))
},
},
})
}
}
}

impl From<&protobuf::FileGroup> for Vec<PartitionedFile> {
fn from(val: &protobuf::FileGroup) -> Self {
val.files.iter().map(|f| f.into()).collect()
}
}

Expand Down
20 changes: 13 additions & 7 deletions ballista/rust/core/src/serde/physical_plan/to_proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,18 +275,16 @@ impl TryInto<protobuf::PhysicalPlanNode> for Arc<dyn ExecutionPlan> {
)),
})
} else if let Some(exec) = plan.downcast_ref::<ParquetExec>() {
let partitions = exec
.partitions()
.into_iter()
.map(|p| protobuf::FilePartition {
files: p.iter().map(|f| f.into()).collect(),
})
let file_groups = exec
.file_groups()
.iter()
.map(|p| p.as_slice().into())
.collect();

Ok(protobuf::PhysicalPlanNode {
physical_plan_type: Some(PhysicalPlanType::ParquetScan(
protobuf::ParquetScanExecNode {
partitions,
file_groups,
statistics: Some((&exec.statistics()).into()),
limit: exec
.limit()
Expand Down Expand Up @@ -688,6 +686,14 @@ impl From<&PartitionedFile> for protobuf::PartitionedFile {
}
}

impl From<&[PartitionedFile]> for protobuf::FileGroup {
fn from(gr: &[PartitionedFile]) -> protobuf::FileGroup {
protobuf::FileGroup {
files: gr.iter().map(|f| f.into()).collect(),
}
}
}

impl From<&ColumnStatistics> for protobuf::ColumnStats {
fn from(cs: &ColumnStatistics) -> protobuf::ColumnStats {
protobuf::ColumnStats {
Expand Down
3 changes: 3 additions & 0 deletions datafusion/src/datasource/datasource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ pub trait TableProvider: Sync + Send {
}

/// Create an ExecutionPlan that will scan the table.
/// The table provider will be usually responsible of grouping
/// the source data into partitions that can be efficiently
/// parallelized or distributed.
async fn scan(
&self,
projection: &Option<Vec<usize>>,
Expand Down
16 changes: 0 additions & 16 deletions datafusion/src/datasource/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,22 +155,6 @@ impl std::fmt::Display for PartitionedFile {
}
}

#[derive(Debug, Clone)]
/// A collection of files that should be read in a single task
pub struct FilePartition {
/// The index of the partition among all partitions
pub index: usize,
/// The contained files of the partition
pub files: Vec<PartitionedFile>,
}

impl std::fmt::Display for FilePartition {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let files: Vec<String> = self.files.iter().map(|f| f.to_string()).collect();
write!(f, "{}", files.join(", "))
}
}

fn create_max_min_accs(
schema: &Schema,
) -> (Vec<Option<MaxAccumulator>>, Vec<Option<MinAccumulator>>) {
Expand Down
23 changes: 23 additions & 0 deletions datafusion/src/physical_plan/file_format/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,26 @@ pub use self::parquet::ParquetExec;
pub use avro::AvroExec;
pub use csv::CsvExec;
pub use json::NdJsonExec;

use crate::datasource::PartitionedFile;
use std::fmt::{Display, Formatter, Result};

/// A wrapper to customize partitioned file display
#[derive(Debug)]
struct FileGroupsDisplay<'a>(&'a [Vec<PartitionedFile>]);

impl<'a> Display for FileGroupsDisplay<'a> {
fn fmt(&self, f: &mut Formatter) -> Result {
let parts: Vec<_> = self
.0
.iter()
.map(|pp| {
pp.iter()
.map(|pf| pf.file_meta.path())
.collect::<Vec<_>>()
.join(", ")
})
.collect();
write!(f, "[{}]", parts.join(", "))
}
}
81 changes: 16 additions & 65 deletions datafusion/src/physical_plan/file_format/parquet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use std::{any::Any, convert::TryInto};

use crate::datasource::file_format::parquet::ChunkObjectReader;
use crate::datasource::object_store::ObjectStore;
use crate::datasource::PartitionedFile;
use crate::{
error::{DataFusionError, Result},
logical_plan::{Column, Expr},
Expand Down Expand Up @@ -59,14 +60,13 @@ use tokio::{

use async_trait::async_trait;

use crate::datasource::{FilePartition, PartitionedFile};

/// Execution plan for scanning one or more Parquet partitions
#[derive(Debug, Clone)]
pub struct ParquetExec {
object_store: Arc<dyn ObjectStore>,
/// Parquet partitions to read
partitions: Vec<ParquetPartition>,
/// Grouped list of files. Each group will be processed together by one
/// partition of the `ExecutionPlan`.
file_groups: Vec<Vec<PartitionedFile>>,
/// Schema after projection is applied
schema: SchemaRef,
/// Projection for which columns to load
Expand All @@ -83,23 +83,6 @@ pub struct ParquetExec {
limit: Option<usize>,
}

/// Represents one partition of a Parquet data set and this currently means one Parquet file.
///
/// In the future it would be good to support subsets of files based on ranges of row groups
/// so that we can better parallelize reads of large files across available cores (see
/// [ARROW-10995](https://issues.apache.org/jira/browse/ARROW-10995)).
///
/// We may also want to support reading Parquet files that are partitioned based on a key and
/// in this case we would want this partition struct to represent multiple files for a given
/// partition key (see [ARROW-11019](https://issues.apache.org/jira/browse/ARROW-11019)).
#[derive(Debug, Clone)]
pub struct ParquetPartition {
Comment on lines -86 to -96
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These comments were mostly outdated and the other features mentioned are now planned in the ListingTable provider

/// The Parquet filename for this partition
pub file_partition: FilePartition,
/// Execution metrics
metrics: ExecutionPlanMetricsSet,
}

/// Stores metrics about the parquet execution for a particular parquet file
#[derive(Debug, Clone)]
struct ParquetFileMetrics {
Expand All @@ -115,24 +98,16 @@ impl ParquetExec {
#[allow(clippy::too_many_arguments)]
pub fn new(
object_store: Arc<dyn ObjectStore>,
files: Vec<Vec<PartitionedFile>>,
file_groups: Vec<Vec<PartitionedFile>>,
statistics: Statistics,
schema: SchemaRef,
projection: Option<Vec<usize>>,
predicate: Option<Expr>,
batch_size: usize,
limit: Option<usize>,
) -> Self {
debug!("Creating ParquetExec, desc: {:?}, projection {:?}, predicate: {:?}, limit: {:?}",
files, projection, predicate, limit);

let metrics = ExecutionPlanMetricsSet::new();

let partitions = files
.into_iter()
.enumerate()
.map(|(i, f)| ParquetPartition::new(f, i, metrics.clone()))
.collect::<Vec<_>>();
debug!("Creating ParquetExec, files: {:?}, projection {:?}, predicate: {:?}, limit: {:?}",
file_groups, projection, predicate, limit);

let metrics = ExecutionPlanMetricsSet::new();
let predicate_creation_errors =
Expand Down Expand Up @@ -162,7 +137,7 @@ impl ParquetExec {

Self {
object_store,
partitions,
file_groups,
schema: projected_schema,
projection,
metrics,
Expand Down Expand Up @@ -204,11 +179,8 @@ impl ParquetExec {
}

/// List of data files
pub fn partitions(&self) -> Vec<&[PartitionedFile]> {
self.partitions
.iter()
.map(|fp| fp.file_partition.files.as_slice())
.collect()
pub fn file_groups(&self) -> &[Vec<PartitionedFile>] {
Copy link
Contributor Author

@rdettai rdettai Oct 15, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We replace partitions with file_groups to try decrease the overuse of the term "partition" which represents different (yet similar 😅) things in different contexts:

  • on the listing table side, a partition refer to a "hive partition", that is to say a set of files grouped into a folder because they share a common attribute
  • on the execution plan side, a partition is a unit of parallelism. Files are grouped together to provide a good workload for one thread/executor.

&self.file_groups
}
/// Optional projection for which columns to load
pub fn projection(&self) -> &[usize] {
Expand All @@ -225,20 +197,6 @@ impl ParquetExec {
}
}

impl ParquetPartition {
/// Create a new parquet partition
pub fn new(
files: Vec<PartitionedFile>,
index: usize,
metrics: ExecutionPlanMetricsSet,
) -> Self {
Self {
file_partition: FilePartition { index, files },
metrics,
}
}
}

impl ParquetFileMetrics {
/// Create new metrics
pub fn new(
Expand Down Expand Up @@ -279,7 +237,7 @@ impl ExecutionPlan for ParquetExec {

/// Get the output partitioning of this plan
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(self.partitions.len())
Partitioning::UnknownPartitioning(self.file_groups.len())
}

fn with_new_children(
Expand All @@ -304,7 +262,7 @@ impl ExecutionPlan for ParquetExec {
Receiver<ArrowResult<RecordBatch>>,
) = channel(2);

let partition = self.partitions[partition_index].clone();
let partition = self.file_groups[partition_index].clone();
let metrics = self.metrics.clone();
let projection = self.projection.clone();
let predicate_builder = self.predicate_builder.clone();
Expand Down Expand Up @@ -338,18 +296,12 @@ impl ExecutionPlan for ParquetExec {
) -> std::fmt::Result {
match t {
DisplayFormatType::Default => {
let files: Vec<_> = self
.partitions
.iter()
.map(|pp| format!("{}", pp.file_partition))
.collect();

write!(
f,
"ParquetExec: batch_size={}, limit={:?}, partitions=[{}]",
"ParquetExec: batch_size={}, limit={:?}, partitions={}",
self.batch_size,
self.limit,
files.join(", ")
super::FileGroupsDisplay(&self.file_groups)
)
}
}
Expand Down Expand Up @@ -493,7 +445,7 @@ fn build_row_group_predicate(
fn read_partition(
object_store: &dyn ObjectStore,
partition_index: usize,
partition: ParquetPartition,
partition: Vec<PartitionedFile>,
metrics: ExecutionPlanMetricsSet,
projection: &[usize],
predicate_builder: &Option<PruningPredicate>,
Expand All @@ -502,8 +454,7 @@ fn read_partition(
limit: Option<usize>,
) -> Result<()> {
let mut total_rows = 0;
let all_files = partition.file_partition.files;
'outer: for partitioned_file in all_files {
'outer: for partitioned_file in partition {
let file_metrics = ParquetFileMetrics::new(
partition_index,
&*partitioned_file.file_meta.path(),
Expand Down