-
Notifications
You must be signed in to change notification settings - Fork 3.6k
/
Copy patharray.pxi
4837 lines (4080 loc) · 149 KB
/
array.pxi
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New
import os
import warnings
from cython import sizeof
cdef _sequence_to_array(object sequence, object mask, object size,
DataType type, CMemoryPool* pool, c_bool from_pandas):
cdef:
int64_t c_size
PyConversionOptions options
shared_ptr[CChunkedArray] chunked
if type is not None:
options.type = type.sp_type
if size is not None:
options.size = size
options.from_pandas = from_pandas
options.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False)
with nogil:
chunked = GetResultValue(
ConvertPySequence(sequence, mask, options, pool)
)
if chunked.get().num_chunks() == 1:
return pyarrow_wrap_array(chunked.get().chunk(0))
else:
return pyarrow_wrap_chunked_array(chunked)
cdef inline _is_array_like(obj):
if np is None:
return False
if isinstance(obj, np.ndarray):
return True
return pandas_api._have_pandas_internal() and pandas_api.is_array_like(obj)
def _ndarray_to_arrow_type(object values, DataType type):
return pyarrow_wrap_data_type(_ndarray_to_type(values, type))
cdef shared_ptr[CDataType] _ndarray_to_type(object values,
DataType type) except *:
cdef shared_ptr[CDataType] c_type
dtype = values.dtype
if type is None and dtype != object:
c_type = GetResultValue(NumPyDtypeToArrow(dtype))
if type is not None:
c_type = type.sp_type
return c_type
cdef _ndarray_to_array(object values, object mask, DataType type,
c_bool from_pandas, c_bool safe, CMemoryPool* pool):
cdef:
shared_ptr[CChunkedArray] chunked_out
shared_ptr[CDataType] c_type = _ndarray_to_type(values, type)
CCastOptions cast_options = CCastOptions(safe)
with nogil:
check_status(NdarrayToArrow(pool, values, mask, from_pandas,
c_type, cast_options, &chunked_out))
if chunked_out.get().num_chunks() > 1:
return pyarrow_wrap_chunked_array(chunked_out)
else:
return pyarrow_wrap_array(chunked_out.get().chunk(0))
cdef _codes_to_indices(object codes, object mask, DataType type,
MemoryPool memory_pool):
"""
Convert the codes of a pandas Categorical to indices for a pyarrow
DictionaryArray, taking into account missing values + mask
"""
if mask is None:
mask = codes == -1
else:
mask = mask | (codes == -1)
return array(codes, mask=mask, type=type, memory_pool=memory_pool)
def _handle_arrow_array_protocol(obj, type, mask, size):
if mask is not None or size is not None:
raise ValueError(
"Cannot specify a mask or a size when passing an object that is "
"converted with the __arrow_array__ protocol.")
res = obj.__arrow_array__(type=type)
if not isinstance(res, (Array, ChunkedArray)):
raise TypeError("The object's __arrow_array__ method does not "
"return a pyarrow Array or ChunkedArray.")
if isinstance(res, ChunkedArray) and res.num_chunks==1:
res = res.chunk(0)
if type is not None and res.type != type:
res = res.cast(type)
return res
def array(object obj, type=None, mask=None, size=None, from_pandas=None,
bint safe=True, MemoryPool memory_pool=None):
"""
Create pyarrow.Array instance from a Python object.
Parameters
----------
obj : sequence, iterable, ndarray, pandas.Series, Arrow-compatible array
If both type and size are specified may be a single use iterable. If
not strongly-typed, Arrow type will be inferred for resulting array.
Any Arrow-compatible array that implements the Arrow PyCapsule Protocol
(has an ``__arrow_c_array__`` or ``__arrow_c_device_array__`` method)
can be passed as well.
type : pyarrow.DataType
Explicit type to attempt to coerce to, otherwise will be inferred from
the data.
mask : array[bool], optional
Indicate which values are null (True) or not null (False).
size : int64, optional
Size of the elements. If the input is larger than size bail at this
length. For iterators, if size is larger than the input iterator this
will be treated as a "max size", but will involve an initial allocation
of size followed by a resize to the actual size (so if you know the
exact size specifying it correctly will give you better performance).
from_pandas : bool, default None
Use pandas's semantics for inferring nulls from values in
ndarray-like data. If passed, the mask tasks precedence, but
if a value is unmasked (not-null), but still null according to
pandas semantics, then it is null. Defaults to False if not
passed explicitly by user, or True if a pandas object is
passed in.
safe : bool, default True
Check for overflows or other unsafe conversions.
memory_pool : pyarrow.MemoryPool, optional
If not passed, will allocate memory from the currently-set default
memory pool.
Returns
-------
array : pyarrow.Array or pyarrow.ChunkedArray
A ChunkedArray instead of an Array is returned if:
- the object data overflowed binary storage.
- the object's ``__arrow_array__`` protocol method returned a chunked
array.
Notes
-----
Timezone will be preserved in the returned array for timezone-aware data,
else no timezone will be returned for naive timestamps.
Internally, UTC values are stored for timezone-aware data with the
timezone set in the data type.
Pandas's DateOffsets and dateutil.relativedelta.relativedelta are by
default converted as MonthDayNanoIntervalArray. relativedelta leapdays
are ignored as are all absolute fields on both objects. datetime.timedelta
can also be converted to MonthDayNanoIntervalArray but this requires
passing MonthDayNanoIntervalType explicitly.
Converting to dictionary array will promote to a wider integer type for
indices if the number of distinct values cannot be represented, even if
the index type was explicitly set. This means that if there are more than
127 values the returned dictionary array's index type will be at least
pa.int16() even if pa.int8() was passed to the function. Note that an
explicit index type will not be demoted even if it is wider than required.
Examples
--------
>>> import pandas as pd
>>> import pyarrow as pa
>>> pa.array(pd.Series([1, 2]))
<pyarrow.lib.Int64Array object at ...>
[
1,
2
]
>>> pa.array(["a", "b", "a"], type=pa.dictionary(pa.int8(), pa.string()))
<pyarrow.lib.DictionaryArray object at ...>
...
-- dictionary:
[
"a",
"b"
]
-- indices:
[
0,
1,
0
]
>>> import numpy as np
>>> pa.array(pd.Series([1, 2]), mask=np.array([0, 1], dtype=bool))
<pyarrow.lib.Int64Array object at ...>
[
1,
null
]
>>> arr = pa.array(range(1024), type=pa.dictionary(pa.int8(), pa.int64()))
>>> arr.type.index_type
DataType(int16)
"""
cdef:
CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
bint is_pandas_object = False
bint c_from_pandas
type = ensure_type(type, allow_none=True)
extension_type = None
if type is not None and type.id == _Type_EXTENSION:
extension_type = type
type = type.storage_type
if from_pandas is None:
c_from_pandas = False
else:
c_from_pandas = from_pandas
if isinstance(obj, Array):
if type is not None and not obj.type.equals(type):
obj = obj.cast(type, safe=safe, memory_pool=memory_pool)
return obj
if hasattr(obj, '__arrow_array__'):
return _handle_arrow_array_protocol(obj, type, mask, size)
elif hasattr(obj, '__arrow_c_device_array__'):
if type is not None:
requested_type = type.__arrow_c_schema__()
else:
requested_type = None
schema_capsule, array_capsule = obj.__arrow_c_device_array__(requested_type)
out_array = Array._import_from_c_device_capsule(schema_capsule, array_capsule)
if type is not None and out_array.type != type:
# PyCapsule interface type coercion is best effort, so we need to
# check the type of the returned array and cast if necessary
out_array = array.cast(type, safe=safe, memory_pool=memory_pool)
return out_array
elif hasattr(obj, '__arrow_c_array__'):
if type is not None:
requested_type = type.__arrow_c_schema__()
else:
requested_type = None
schema_capsule, array_capsule = obj.__arrow_c_array__(requested_type)
out_array = Array._import_from_c_capsule(schema_capsule, array_capsule)
if type is not None and out_array.type != type:
# PyCapsule interface type coercion is best effort, so we need to
# check the type of the returned array and cast if necessary
out_array = array.cast(type, safe=safe, memory_pool=memory_pool)
return out_array
elif _is_array_like(obj):
if mask is not None:
if _is_array_like(mask):
mask = get_values(mask, &is_pandas_object)
else:
raise TypeError("Mask must be a numpy array "
"when converting numpy arrays")
values = get_values(obj, &is_pandas_object)
if is_pandas_object and from_pandas is None:
c_from_pandas = True
if isinstance(values, np.ma.MaskedArray):
if mask is not None:
raise ValueError("Cannot pass a numpy masked array and "
"specify a mask at the same time")
else:
# don't use shrunken masks
mask = None if values.mask is np.ma.nomask else values.mask
values = values.data
if mask is not None:
if mask.dtype != np.bool_:
raise TypeError("Mask must be boolean dtype")
if mask.ndim != 1:
raise ValueError("Mask must be 1D array")
if len(values) != len(mask):
raise ValueError(
"Mask is a different length from sequence being converted")
if hasattr(values, '__arrow_array__'):
return _handle_arrow_array_protocol(values, type, mask, size)
elif (pandas_api.is_categorical(values) and
type is not None and type.id != Type_DICTIONARY):
result = _ndarray_to_array(
np.asarray(values), mask, type, c_from_pandas, safe, pool
)
elif pandas_api.is_categorical(values):
if type is not None:
index_type = type.index_type
value_type = type.value_type
if values.ordered != type.ordered:
raise ValueError(
"The 'ordered' flag of the passed categorical values "
"does not match the 'ordered' of the specified type. ")
else:
index_type = None
value_type = None
indices = _codes_to_indices(
values.codes, mask, index_type, memory_pool)
try:
dictionary = array(
values.categories.values, type=value_type,
memory_pool=memory_pool)
except TypeError:
# TODO when removing the deprecation warning, this whole
# try/except can be removed (to bubble the TypeError of
# the first array(..) call)
if value_type is not None:
warnings.warn(
"The dtype of the 'categories' of the passed "
"categorical values ({0}) does not match the "
"specified type ({1}). For now ignoring the specified "
"type, but in the future this mismatch will raise a "
"TypeError".format(
values.categories.dtype, value_type),
FutureWarning, stacklevel=2)
dictionary = array(
values.categories.values, memory_pool=memory_pool)
else:
raise
return DictionaryArray.from_arrays(
indices, dictionary, ordered=values.ordered, safe=safe)
else:
if pandas_api.have_pandas:
values, type = pandas_api.compat.get_datetimetz_type(
values, obj.dtype, type)
if type and type.id == _Type_RUN_END_ENCODED:
arr = _ndarray_to_array(
values, mask, type.value_type, c_from_pandas, safe, pool)
result = _pc().run_end_encode(arr, run_end_type=type.run_end_type,
memory_pool=memory_pool)
else:
result = _ndarray_to_array(values, mask, type, c_from_pandas, safe,
pool)
else:
if type and type.id == _Type_RUN_END_ENCODED:
arr = _sequence_to_array(
obj, mask, size, type.value_type, pool, from_pandas)
result = _pc().run_end_encode(arr, run_end_type=type.run_end_type,
memory_pool=memory_pool)
# ConvertPySequence does strict conversion if type is explicitly passed
else:
result = _sequence_to_array(obj, mask, size, type, pool, c_from_pandas)
if extension_type is not None:
result = ExtensionArray.from_storage(extension_type, result)
return result
def asarray(values, type=None):
"""
Convert to pyarrow.Array, inferring type if not provided.
Parameters
----------
values : array-like
This can be a sequence, numpy.ndarray, pyarrow.Array or
pyarrow.ChunkedArray. If a ChunkedArray is passed, the output will be
a ChunkedArray, otherwise the output will be a Array.
type : string or DataType
Explicitly construct the array with this type. Attempt to cast if
indicated type is different.
Returns
-------
arr : Array or ChunkedArray
"""
if isinstance(values, (Array, ChunkedArray)):
if type is not None and not values.type.equals(type):
values = values.cast(type)
return values
else:
return array(values, type=type)
def nulls(size, type=None, MemoryPool memory_pool=None):
"""
Create a strongly-typed Array instance with all elements null.
Parameters
----------
size : int
Array length.
type : pyarrow.DataType, default None
Explicit type for the array. By default use NullType.
memory_pool : MemoryPool, default None
Arrow MemoryPool to use for allocations. Uses the default memory
pool if not passed.
Returns
-------
arr : Array
Examples
--------
>>> import pyarrow as pa
>>> pa.nulls(10)
<pyarrow.lib.NullArray object at ...>
10 nulls
>>> pa.nulls(3, pa.uint32())
<pyarrow.lib.UInt32Array object at ...>
[
null,
null,
null
]
"""
cdef:
CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
int64_t length = size
shared_ptr[CDataType] ty
shared_ptr[CArray] arr
type = ensure_type(type, allow_none=True)
if type is None:
type = null()
ty = pyarrow_unwrap_data_type(type)
with nogil:
arr = GetResultValue(MakeArrayOfNull(ty, length, pool))
return pyarrow_wrap_array(arr)
def repeat(value, size, MemoryPool memory_pool=None):
"""
Create an Array instance whose slots are the given scalar.
Parameters
----------
value : Scalar-like object
Either a pyarrow.Scalar or any python object coercible to a Scalar.
size : int
Number of times to repeat the scalar in the output Array.
memory_pool : MemoryPool, default None
Arrow MemoryPool to use for allocations. Uses the default memory
pool if not passed.
Returns
-------
arr : Array
Examples
--------
>>> import pyarrow as pa
>>> pa.repeat(10, 3)
<pyarrow.lib.Int64Array object at ...>
[
10,
10,
10
]
>>> pa.repeat([1, 2], 2)
<pyarrow.lib.ListArray object at ...>
[
[
1,
2
],
[
1,
2
]
]
>>> pa.repeat("string", 3)
<pyarrow.lib.StringArray object at ...>
[
"string",
"string",
"string"
]
>>> pa.repeat(pa.scalar({'a': 1, 'b': [1, 2]}), 2)
<pyarrow.lib.StructArray object at ...>
-- is_valid: all not null
-- child 0 type: int64
[
1,
1
]
-- child 1 type: list<item: int64>
[
[
1,
2
],
[
1,
2
]
]
"""
cdef:
CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
int64_t length = size
shared_ptr[CArray] c_array
shared_ptr[CScalar] c_scalar
if not isinstance(value, Scalar):
value = scalar(value, memory_pool=memory_pool)
c_scalar = (<Scalar> value).unwrap()
with nogil:
c_array = GetResultValue(
MakeArrayFromScalar(deref(c_scalar), length, pool)
)
return pyarrow_wrap_array(c_array)
def infer_type(values, mask=None, from_pandas=False):
"""
Attempt to infer Arrow data type that can hold the passed Python
sequence type in an Array object
Parameters
----------
values : array-like
Sequence to infer type from.
mask : ndarray (bool type), optional
Optional exclusion mask where True marks null, False non-null.
from_pandas : bool, default False
Use pandas's NA/null sentinel values for type inference.
Returns
-------
type : DataType
"""
cdef:
shared_ptr[CDataType] out
c_bool use_pandas_sentinels = from_pandas
if mask is not None and not isinstance(mask, np.ndarray):
mask = np.array(mask, dtype=bool)
out = GetResultValue(InferArrowType(values, mask, use_pandas_sentinels))
return pyarrow_wrap_data_type(out)
def _normalize_slice(object arrow_obj, slice key):
"""
Slices with step not equal to 1 (or None) will produce a copy
rather than a zero-copy view
"""
cdef:
Py_ssize_t start, stop, step
Py_ssize_t n = len(arrow_obj)
start, stop, step = key.indices(n)
if step != 1:
indices = np.arange(start, stop, step)
return arrow_obj.take(indices)
else:
length = max(stop - start, 0)
return arrow_obj.slice(start, length)
cdef Py_ssize_t _normalize_index(Py_ssize_t index,
Py_ssize_t length) except -1:
if index < 0:
index += length
if index < 0:
raise IndexError("index out of bounds")
elif index >= length:
raise IndexError("index out of bounds")
return index
cdef wrap_datum(const CDatum& datum):
if datum.kind() == DatumType_ARRAY:
return pyarrow_wrap_array(MakeArray(datum.array()))
elif datum.kind() == DatumType_CHUNKED_ARRAY:
return pyarrow_wrap_chunked_array(datum.chunked_array())
elif datum.kind() == DatumType_RECORD_BATCH:
return pyarrow_wrap_batch(datum.record_batch())
elif datum.kind() == DatumType_TABLE:
return pyarrow_wrap_table(datum.table())
elif datum.kind() == DatumType_SCALAR:
return pyarrow_wrap_scalar(datum.scalar())
else:
raise ValueError("Unable to wrap Datum in a Python object")
cdef _append_array_buffers(const CArrayData* ad, list res):
"""
Recursively append Buffer wrappers from *ad* and its children.
"""
cdef size_t i, n
assert ad != NULL
n = ad.buffers.size()
for i in range(n):
buf = ad.buffers[i]
res.append(pyarrow_wrap_buffer(buf)
if buf.get() != NULL else None)
n = ad.child_data.size()
for i in range(n):
_append_array_buffers(ad.child_data[i].get(), res)
cdef _reduce_array_data(const CArrayData* ad):
"""
Recursively dissect ArrayData to (pickable) tuples.
"""
cdef size_t i, n
assert ad != NULL
n = ad.buffers.size()
buffers = []
for i in range(n):
buf = ad.buffers[i]
buffers.append(pyarrow_wrap_buffer(buf)
if buf.get() != NULL else None)
children = []
n = ad.child_data.size()
for i in range(n):
children.append(_reduce_array_data(ad.child_data[i].get()))
if ad.dictionary.get() != NULL:
dictionary = _reduce_array_data(ad.dictionary.get())
else:
dictionary = None
return pyarrow_wrap_data_type(ad.type), ad.length, ad.null_count, \
ad.offset, buffers, children, dictionary
cdef shared_ptr[CArrayData] _reconstruct_array_data(data):
"""
Reconstruct CArrayData objects from the tuple structure generated
by _reduce_array_data.
"""
cdef:
int64_t length, null_count, offset, i
DataType dtype
Buffer buf
vector[shared_ptr[CBuffer]] c_buffers
vector[shared_ptr[CArrayData]] c_children
shared_ptr[CArrayData] c_dictionary
dtype, length, null_count, offset, buffers, children, dictionary = data
for i in range(len(buffers)):
buf = buffers[i]
if buf is None:
c_buffers.push_back(shared_ptr[CBuffer]())
else:
c_buffers.push_back(buf.buffer)
for i in range(len(children)):
c_children.push_back(_reconstruct_array_data(children[i]))
if dictionary is not None:
c_dictionary = _reconstruct_array_data(dictionary)
return CArrayData.MakeWithChildrenAndDictionary(
dtype.sp_type,
length,
c_buffers,
c_children,
c_dictionary,
null_count,
offset)
def _restore_array(data):
"""
Reconstruct an Array from pickled ArrayData.
"""
cdef shared_ptr[CArrayData] ad = _reconstruct_array_data(data)
return pyarrow_wrap_array(MakeArray(ad))
cdef class _PandasConvertible(_Weakrefable):
def to_pandas(
self,
memory_pool=None,
categories=None,
bint strings_to_categorical=False,
bint zero_copy_only=False,
bint integer_object_nulls=False,
bint date_as_object=True,
bint timestamp_as_object=False,
bint use_threads=True,
bint deduplicate_objects=True,
bint ignore_metadata=False,
bint safe=True,
bint split_blocks=False,
bint self_destruct=False,
str maps_as_pydicts=None,
types_mapper=None,
bint coerce_temporal_nanoseconds=False
):
"""
Convert to a pandas-compatible NumPy array or DataFrame, as appropriate
Parameters
----------
memory_pool : MemoryPool, default None
Arrow MemoryPool to use for allocations. Uses the default memory
pool if not passed.
categories : list, default empty
List of fields that should be returned as pandas.Categorical. Only
applies to table-like data structures.
strings_to_categorical : bool, default False
Encode string (UTF8) and binary types to pandas.Categorical.
zero_copy_only : bool, default False
Raise an ArrowException if this function call would require copying
the underlying data.
integer_object_nulls : bool, default False
Cast integers with nulls to objects
date_as_object : bool, default True
Cast dates to objects. If False, convert to datetime64 dtype with
the equivalent time unit (if supported). Note: in pandas version
< 2.0, only datetime64[ns] conversion is supported.
timestamp_as_object : bool, default False
Cast non-nanosecond timestamps (np.datetime64) to objects. This is
useful in pandas version 1.x if you have timestamps that don't fit
in the normal date range of nanosecond timestamps (1678 CE-2262 CE).
Non-nanosecond timestamps are supported in pandas version 2.0.
If False, all timestamps are converted to datetime64 dtype.
use_threads : bool, default True
Whether to parallelize the conversion using multiple threads.
deduplicate_objects : bool, default True
Do not create multiple copies Python objects when created, to save
on memory use. Conversion will be slower.
ignore_metadata : bool, default False
If True, do not use the 'pandas' metadata to reconstruct the
DataFrame index, if present
safe : bool, default True
For certain data types, a cast is needed in order to store the
data in a pandas DataFrame or Series (e.g. timestamps are always
stored as nanoseconds in pandas). This option controls whether it
is a safe cast or not.
split_blocks : bool, default False
If True, generate one internal "block" for each column when
creating a pandas.DataFrame from a RecordBatch or Table. While this
can temporarily reduce memory note that various pandas operations
can trigger "consolidation" which may balloon memory use.
self_destruct : bool, default False
EXPERIMENTAL: If True, attempt to deallocate the originating Arrow
memory while converting the Arrow object to pandas. If you use the
object after calling to_pandas with this option it will crash your
program.
Note that you may not see always memory usage improvements. For
example, if multiple columns share an underlying allocation,
memory can't be freed until all columns are converted.
maps_as_pydicts : str, optional, default `None`
Valid values are `None`, 'lossy', or 'strict'.
The default behavior (`None`), is to convert Arrow Map arrays to
Python association lists (list-of-tuples) in the same order as the
Arrow Map, as in [(key1, value1), (key2, value2), ...].
If 'lossy' or 'strict', convert Arrow Map arrays to native Python dicts.
This can change the ordering of (key, value) pairs, and will
deduplicate multiple keys, resulting in a possible loss of data.
If 'lossy', this key deduplication results in a warning printed
when detected. If 'strict', this instead results in an exception
being raised when detected.
types_mapper : function, default None
A function mapping a pyarrow DataType to a pandas ExtensionDtype.
This can be used to override the default pandas type for conversion
of built-in pyarrow types or in absence of pandas_metadata in the
Table schema. The function receives a pyarrow DataType and is
expected to return a pandas ExtensionDtype or ``None`` if the
default conversion should be used for that type. If you have
a dictionary mapping, you can pass ``dict.get`` as function.
coerce_temporal_nanoseconds : bool, default False
Only applicable to pandas version >= 2.0.
A legacy option to coerce date32, date64, duration, and timestamp
time units to nanoseconds when converting to pandas. This is the
default behavior in pandas version 1.x. Set this option to True if
you'd like to use this coercion when using pandas version >= 2.0
for backwards compatibility (not recommended otherwise).
Returns
-------
pandas.Series or pandas.DataFrame depending on type of object
Examples
--------
>>> import pyarrow as pa
>>> import pandas as pd
Convert a Table to pandas DataFrame:
>>> table = pa.table([
... pa.array([2, 4, 5, 100]),
... pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
... ], names=['n_legs', 'animals'])
>>> table.to_pandas()
n_legs animals
0 2 Flamingo
1 4 Horse
2 5 Brittle stars
3 100 Centipede
>>> isinstance(table.to_pandas(), pd.DataFrame)
True
Convert a RecordBatch to pandas DataFrame:
>>> import pyarrow as pa
>>> n_legs = pa.array([2, 4, 5, 100])
>>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
>>> batch = pa.record_batch([n_legs, animals],
... names=["n_legs", "animals"])
>>> batch
pyarrow.RecordBatch
n_legs: int64
animals: string
----
n_legs: [2,4,5,100]
animals: ["Flamingo","Horse","Brittle stars","Centipede"]
>>> batch.to_pandas()
n_legs animals
0 2 Flamingo
1 4 Horse
2 5 Brittle stars
3 100 Centipede
>>> isinstance(batch.to_pandas(), pd.DataFrame)
True
Convert a Chunked Array to pandas Series:
>>> import pyarrow as pa
>>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]])
>>> n_legs.to_pandas()
0 2
1 2
2 4
3 4
4 5
5 100
dtype: int64
>>> isinstance(n_legs.to_pandas(), pd.Series)
True
"""
options = dict(
pool=memory_pool,
strings_to_categorical=strings_to_categorical,
zero_copy_only=zero_copy_only,
integer_object_nulls=integer_object_nulls,
date_as_object=date_as_object,
timestamp_as_object=timestamp_as_object,
use_threads=use_threads,
deduplicate_objects=deduplicate_objects,
safe=safe,
split_blocks=split_blocks,
self_destruct=self_destruct,
maps_as_pydicts=maps_as_pydicts,
coerce_temporal_nanoseconds=coerce_temporal_nanoseconds
)
return self._to_pandas(options, categories=categories,
ignore_metadata=ignore_metadata,
types_mapper=types_mapper)
cdef PandasOptions _convert_pandas_options(dict options):
cdef PandasOptions result
result.pool = maybe_unbox_memory_pool(options['pool'])
result.strings_to_categorical = options['strings_to_categorical']
result.zero_copy_only = options['zero_copy_only']
result.integer_object_nulls = options['integer_object_nulls']
result.date_as_object = options['date_as_object']
result.timestamp_as_object = options['timestamp_as_object']
result.use_threads = options['use_threads']
result.deduplicate_objects = options['deduplicate_objects']
result.safe_cast = options['safe']
result.split_blocks = options['split_blocks']
result.self_destruct = options['self_destruct']
result.coerce_temporal_nanoseconds = options['coerce_temporal_nanoseconds']
result.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False)
maps_as_pydicts = options['maps_as_pydicts']
if maps_as_pydicts is None:
result.maps_as_pydicts = MapConversionType.DEFAULT
elif maps_as_pydicts == "lossy":
result.maps_as_pydicts = MapConversionType.LOSSY
elif maps_as_pydicts == "strict":
result.maps_as_pydicts = MapConversionType.STRICT_
else:
raise ValueError(
"Invalid value for 'maps_as_pydicts': "
+ "valid values are 'lossy', 'strict' or `None` (default). "
+ f"Received '{maps_as_pydicts}'."
)
return result
cdef class Array(_PandasConvertible):
"""
The base class for all Arrow arrays.
"""
def __init__(self):
raise TypeError("Do not call {}'s constructor directly, use one of "
"the `pyarrow.Array.from_*` functions instead."
.format(self.__class__.__name__))
cdef void init(self, const shared_ptr[CArray]& sp_array) except *:
self.sp_array = sp_array
self.ap = sp_array.get()
self.type = pyarrow_wrap_data_type(self.sp_array.get().type())
def _debug_print(self):
with nogil:
check_status(DebugPrint(deref(self.ap), 0))
def diff(self, Array other):
"""
Compare contents of this array against another one.
Return a string containing the result of diffing this array
(on the left side) against the other array (on the right side).
Parameters
----------
other : Array
The other array to compare this array with.
Returns
-------
diff : str
A human-readable printout of the differences.
Examples
--------
>>> import pyarrow as pa
>>> left = pa.array(["one", "two", "three"])
>>> right = pa.array(["two", None, "two-and-a-half", "three"])
>>> print(left.diff(right)) # doctest: +SKIP
@@ -0, +0 @@
-"one"
@@ -2, +1 @@
+null
+"two-and-a-half"
"""
self._assert_cpu()
cdef c_string result
with nogil:
result = self.ap.Diff(deref(other.ap))
return frombytes(result, safe=True)
def cast(self, object target_type=None, safe=None, options=None, memory_pool=None):
"""
Cast array values to another data type
See :func:`pyarrow.compute.cast` for usage.
Parameters
----------
target_type : DataType, default None
Type to cast array to.
safe : boolean, default True
Whether to check for conversion errors such as overflow.
options : CastOptions, default None
Additional checks pass by CastOptions
memory_pool : MemoryPool, optional
memory pool to use for allocations during function execution.
Returns
-------