Skip to content

Commit

Permalink
ARROW-6674: [Python] Fix or ignore the test warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
jorisvandenbossche committed Sep 24, 2019
1 parent 61637dd commit 2a2bb14
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 24 deletions.
18 changes: 5 additions & 13 deletions python/pyarrow/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@
'plasma',
's3',
'tensorflow',
'flight'
'flight',
'slow',
'requires_testing_data',
]


Expand All @@ -70,6 +72,8 @@
's3': False,
'tensorflow': False,
'flight': False,
'slow': False,
'requires_testing_data': True,
}

try:
Expand Down Expand Up @@ -166,18 +170,6 @@ def bool_env(name, default=None):
action='store_true', default=default,
help=('Run only the {} test group'.format(group)))

parser.addoption('--runslow', action='store_true',
default=False, help='run slow tests')


def pytest_collection_modifyitems(config, items):
if not config.getoption('--runslow'):
skip_slow = pytest.mark.skip(reason='need --runslow option to run')

for item in items:
if 'slow' in item.keywords:
item.add_marker(skip_slow)


def pytest_runtest_setup(item):
only_set = False
Expand Down
12 changes: 6 additions & 6 deletions python/pyarrow/tests/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,25 +152,25 @@ def test_to_pandas_zero_copy():
arr = pa.array(range(10))

for i in range(10):
np_arr = arr.to_pandas()
assert sys.getrefcount(np_arr) == 2
np_arr = None # noqa
series = arr.to_pandas()
assert sys.getrefcount(series) == 2
series = None # noqa

assert sys.getrefcount(arr) == 2

for i in range(10):
arr = pa.array(range(10))
np_arr = arr.to_pandas()
series = arr.to_pandas()
arr = None
gc.collect()

# Ensure base is still valid

# Because of py.test's assert inspection magic, if you put getrefcount
# on the line being examined, it will be 1 higher than you expect
base_refcount = sys.getrefcount(np_arr.base)
base_refcount = sys.getrefcount(series.values.base)
assert base_refcount == 2
np_arr.sum()
series.sum()


@pytest.mark.nopandas
Expand Down
2 changes: 1 addition & 1 deletion python/pyarrow/tests/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -1191,7 +1191,7 @@ def test_compressed_recordbatch_stream(compression):
else:
raise
writer = pa.RecordBatchStreamWriter(stream, table.schema)
writer.write_table(table, chunksize=3)
writer.write_table(table, max_chunksize=3)
writer.close()
stream.close() # Flush data
buf = raw.getvalue()
Expand Down
6 changes: 2 additions & 4 deletions python/pyarrow/tests/test_parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -2352,9 +2352,7 @@ def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.DatetimeIndex(start='2017-01-01',
freq='1n',
periods=n))
index=pd.date_range('2017-01-01', freq='1n', periods=n))
tb = pa.Table.from_pandas(df)

filename = tempdir / 'written.parquet'
Expand Down Expand Up @@ -3025,7 +3023,7 @@ def test_write_nested_zero_length_array_chunk_failure():
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))
my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
Expand Down
2 changes: 2 additions & 0 deletions python/pyarrow/tests/test_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,8 @@ def deserializer(data):
assert np.alltrue(new_x.view(np.ndarray) == np.zeros(3))


@pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning")
def test_numpy_matrix_serialization(tmpdir):
class CustomType(object):
def __init__(self, val):
Expand Down

0 comments on commit 2a2bb14

Please sign in to comment.