Skip to content
This repository has been archived by the owner on Oct 2, 2024. It is now read-only.

Pa Dataset registration for raysql #49

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions raysql/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,8 @@ def register_csv(self, table_name: str, path: str, has_header: bool):
def register_parquet(self, table_name: str, path: str):
self.ctx.register_parquet(table_name, path)

def register_data_lake(self, table_name: str, paths: List[str]):
self.ctx.register_datalake_table(table_name, paths)
def register_dataset(self, table_name: str, dataset: pa.Dataset):
self.ctx.register_dataset(table_name, dataset)

def sql(self, sql: str) -> pa.RecordBatch:
# TODO we should parse sql and inspect the plan rather than
Expand Down
16 changes: 10 additions & 6 deletions src/context.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
use crate::dataset::Dataset;
use crate::planner::{make_execution_graph, PyExecutionGraph};
use crate::shuffle::{RayShuffleReaderExec, ShuffleCodec};
use crate::utils::wait_for_future;
use datafusion::arrow::pyarrow::FromPyArrow;
use datafusion::arrow::pyarrow::ToPyArrow;
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::config::Extensions;
use datafusion::datasource::TableProvider;
use datafusion::error::{DataFusionError, Result};
use datafusion::execution::context::TaskContext;
use datafusion::execution::disk_manager::DiskManagerConfig;
Expand Down Expand Up @@ -75,12 +77,14 @@ impl PyContext {
Ok(())
}

pub fn register_datalake_table(&self, name: &str, path: Vec<&str>, py: Python) -> PyResult<()> {
// let options = ParquetReadOptions::default();
// let listing_options = options.to_listing_options(&self.ctx.state().config());
// wait_for_future(py, self.ctx.register_listing_table(name, path, listing_options, None, None))?;
// Ok(())
unimplemented!()
fn register_dataset(&self, name: &str, dataset: &PyAny, py: Python) -> PyResult<()> {
let table: Arc<dyn TableProvider> = Arc::new(Dataset::new(dataset, py)?);

self.ctx
.register_table(name, table)
.map_err(DataFusionError::from)?;

Ok(())
}

/// Execute SQL directly against the DataFusion context. Useful for statements
Expand Down
126 changes: 126 additions & 0 deletions src/dataset.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use pyo3::exceptions::PyValueError;
/// Implements a Datafusion TableProvider that delegates to a PyArrow Dataset
/// This allows us to use PyArrow Datasets as Datafusion tables while pushing down projections and filters
use pyo3::prelude::*;
use pyo3::types::PyType;

use std::any::Any;
use std::sync::Arc;

use async_trait::async_trait;

use datafusion::arrow::datatypes::SchemaRef;
use datafusion::arrow::pyarrow::PyArrowType;
use datafusion::datasource::{TableProvider, TableType};
use datafusion::error::{DataFusionError, Result as DFResult};
use datafusion::execution::context::SessionState;
use datafusion::logical_expr::TableProviderFilterPushDown;
use datafusion::physical_plan::ExecutionPlan;
use datafusion_expr::Expr;

use crate::dataset_exec::DatasetExec;
use crate::pyarrow_filter_expression::PyArrowFilterExpression;

// Wraps a pyarrow.dataset.Dataset class and implements a Datafusion TableProvider around it
#[derive(Debug, Clone)]
pub(crate) struct Dataset {
dataset: PyObject,
}

impl Dataset {
// Creates a Python PyArrow.Dataset
pub fn new(dataset: &PyAny, py: Python) -> PyResult<Self> {
// Ensure that we were passed an instance of pyarrow.dataset.Dataset
let ds = PyModule::import(py, "pyarrow.dataset")?;
let ds_type: &PyType = ds.getattr("Dataset")?.downcast()?;
if dataset.is_instance(ds_type)? {
Ok(Dataset {
dataset: dataset.into(),
})
} else {
Err(PyValueError::new_err(
"dataset argument must be a pyarrow.dataset.Dataset object",
))
}
}
}

#[async_trait]
impl TableProvider for Dataset {
/// Returns the table provider as [`Any`](std::any::Any) so that it can be
/// downcast to a specific implementation.
fn as_any(&self) -> &dyn Any {
self
}

/// Get a reference to the schema for this table
fn schema(&self) -> SchemaRef {
Python::with_gil(|py| {
let dataset = self.dataset.as_ref(py);
// This can panic but since we checked that self.dataset is a pyarrow.dataset.Dataset it should never
Arc::new(
dataset
.getattr("schema")
.unwrap()
.extract::<PyArrowType<_>>()
.unwrap()
.0,
)
})
}

/// Get the type of this table for metadata/catalog purposes.
fn table_type(&self) -> TableType {
TableType::Base
}

/// Create an ExecutionPlan that will scan the table.
/// The table provider will be usually responsible of grouping
/// the source data into partitions that can be efficiently
/// parallelized or distributed.
async fn scan(
&self,
_ctx: &SessionState,
projection: Option<&Vec<usize>>,
filters: &[Expr],
// limit can be used to reduce the amount scanned
// from the datasource as a performance optimization.
// If set, it contains the amount of rows needed by the `LogicalPlan`,
// The datasource should return *at least* this number of rows if available.
_limit: Option<usize>,
) -> DFResult<Arc<dyn ExecutionPlan>> {
Python::with_gil(|py| {
let plan: Arc<dyn ExecutionPlan> = Arc::new(
DatasetExec::new(py, self.dataset.as_ref(py), projection.cloned(), filters)
.map_err(|err| DataFusionError::External(Box::new(err)))?,
);
Ok(plan)
})
}

/// Tests whether the table provider can make use of a filter expression
/// to optimise data retrieval.
fn supports_filter_pushdown(&self, filter: &Expr) -> DFResult<TableProviderFilterPushDown> {
match PyArrowFilterExpression::try_from(filter) {
Ok(_) => Ok(TableProviderFilterPushDown::Exact),
_ => Ok(TableProviderFilterPushDown::Unsupported),
}
}
}
1 change: 1 addition & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pub mod planner;
pub mod query_stage;
pub mod shuffle;
pub mod utils;
pub mod dataset;

/// A Python module implemented in Rust.
#[pymodule]
Expand Down
Loading