Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle TimestampType separately when convert to pandas' dtype. #798

Merged
merged 2 commits into from
Sep 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions databricks/koalas/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,11 @@
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Window
from pyspark.sql.types import DoubleType, FloatType, LongType, StringType, TimestampType, \
to_arrow_type
from pyspark.sql.types import DoubleType, FloatType, LongType, StringType, TimestampType

from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.typedef import pandas_wraps
from databricks.koalas.typedef import pandas_wraps, spark_type_to_pandas_dtype
from databricks.koalas.utils import align_diff_series, scol_for


Expand Down Expand Up @@ -219,10 +218,7 @@ def dtype(self):
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
if type(self.spark_type) == TimestampType:
return np.dtype('datetime64[ns]')
else:
return np.dtype(to_arrow_type(self.spark_type).to_pandas_dtype())
return spark_type_to_pandas_dtype(self.spark_type)

@property
def empty(self):
Expand Down
4 changes: 2 additions & 2 deletions databricks/koalas/internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.config import get_option
from databricks.koalas.typedef import infer_pd_series_spark_type
from databricks.koalas.typedef import infer_pd_series_spark_type, spark_type_to_pandas_dtype
from databricks.koalas.utils import column_index_level, default_session, lazy_property, scol_for


Expand Down Expand Up @@ -634,7 +634,7 @@ def pandas_df(self):
sdf = self.spark_internal_df
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
pdf = pdf.astype({field.name: spark_type_to_pandas_dtype(field.dataType)
for field in sdf.schema})

index_columns = self.index_columns
Expand Down
9 changes: 9 additions & 0 deletions databricks/koalas/tests/test_dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# limitations under the License.
#

from datetime import date, datetime
import inspect

import numpy as np
Expand Down Expand Up @@ -1642,3 +1643,11 @@ def test_transform(self):

with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
kdf.transform(1)

def test_empty_timestamp(self):
pdf = pd.DataFrame({'t': [datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0)]})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf[kdf['t'] != kdf['t']], pdf[pdf['t'] != pdf['t']])
self.assert_eq(kdf[kdf['t'] != kdf['t']].dtypes, pdf[pdf['t'] != pdf['t']].dtypes)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually this test doesn't reproduce the error in the description since pyarrow is properly initialized while running other tests.

8 changes: 8 additions & 0 deletions databricks/koalas/typedef.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,14 @@ def as_spark_type(tpe) -> types.DataType:
return _known_types.get(tpe, None)


def spark_type_to_pandas_dtype(spark_type):
""" Return the given Spark DataType to pandas dtype. """
if isinstance(spark_type, types.TimestampType):
return np.dtype('datetime64[ns]')
else:
return np.dtype(types.to_arrow_type(spark_type).to_pandas_dtype())


def as_python_type(spark_tpe):
return _py_conversions.get(spark_tpe, None)

Expand Down