Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace np.sctypes for numpy 2.0 compat #1250

Merged
merged 51 commits into from
Nov 2, 2023
Merged
Show file tree
Hide file tree
Changes from 33 commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
2623523
replace np.sctypes with np.core.sctypes
mscheltienne Aug 28, 2023
d550152
rm unused imports
mscheltienne Aug 28, 2023
6e873c6
run blue instead of black
mscheltienne Aug 28, 2023
0f5ad6e
add author entries
mscheltienne Aug 28, 2023
1dac328
manually define the mapping between str and scalar types in casting.py
mscheltienne Aug 29, 2023
7966160
rm unused imports
mscheltienne Aug 29, 2023
07cea85
rm unused variable definition
mscheltienne Aug 29, 2023
d3d23db
fix blue
mscheltienne Aug 29, 2023
ac9e16f
fix missing import
mscheltienne Aug 29, 2023
6c30a84
try without using the sized aliases
mscheltienne Aug 29, 2023
3639711
Revert "try without using the sized aliases"
mscheltienne Aug 29, 2023
4cdba01
try with sized aliases again and np.longdouble instead of np.float128
mscheltienne Aug 29, 2023
2398cc3
use combination of getattr and hasattr, include float96 and complex19…
mscheltienne Aug 29, 2023
fcd8dd0
rm use of np.maximum_sctype
mscheltienne Sep 12, 2023
edd95db
rm unused import
mscheltienne Sep 12, 2023
53655ec
fix quotes for blue style
mscheltienne Sep 12, 2023
e97c992
fix np.sctypeDict calls
mscheltienne Sep 12, 2023
65106d9
better var name
mscheltienne Sep 12, 2023
363b403
rm unused imports
mscheltienne Sep 12, 2023
9fc2d34
Merge branch 'master' into np.sctypes
larsoner Oct 18, 2023
4815ee5
fix blue
mscheltienne Oct 18, 2023
49048c2
fix spelling
mscheltienne Oct 18, 2023
c76fe32
rm unused imports
mscheltienne Oct 18, 2023
079ddc8
try test fix suggested by larsoner
mscheltienne Oct 18, 2023
319f23f
try simpler
mscheltienne Oct 18, 2023
2c3b43d
FIX: Only need legacy if on 2.0
larsoner Oct 25, 2023
9214846
FIX: Cast
larsoner Oct 25, 2023
ae0e36e
FIX: Consistency
larsoner Oct 25, 2023
aca58c3
FIX: Newbyteorder
larsoner Oct 25, 2023
c60a234
fix typo
mscheltienne Oct 25, 2023
1a9ebad
fix more stuff
mscheltienne Oct 26, 2023
97e3aa9
more fix
mscheltienne Oct 26, 2023
a765af0
fix more stuff
mscheltienne Oct 26, 2023
df96ae3
FIX: check
larsoner Oct 26, 2023
83111ea
FIX: check
larsoner Oct 26, 2023
6ffea1b
FIX: Python types
larsoner Oct 26, 2023
86b0597
FIX: Preserve
larsoner Oct 26, 2023
fbbd801
FIX: Simplify
larsoner Oct 26, 2023
4630e0d
FIX: Maybe
larsoner Oct 26, 2023
49b1d41
FIX: Better
larsoner Oct 26, 2023
e61e7a0
Merge branch 'master' into np.sctypes
larsoner Oct 26, 2023
5eb5e54
FIX: Revert
larsoner Oct 26, 2023
a1ddae8
FIX: ComplexWarning
larsoner Oct 26, 2023
cd362aa
FIX: Context
larsoner Oct 26, 2023
c32b0d2
FIX: One more
larsoner Oct 26, 2023
d4596b7
FIX: Explicit
larsoner Oct 26, 2023
0f746c0
Apply suggestions from code review
larsoner Nov 1, 2023
e3c72e1
FIX: Style
larsoner Nov 1, 2023
e3a7495
STY: Flake
larsoner Nov 1, 2023
1bc593c
FIX: Test val equiv
larsoner Nov 1, 2023
a71eebf
FIX: Version
larsoner Nov 1, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .zenodo.json
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,11 @@
{
"name": "Suter, Peter"
}
{
"affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland",
"name": "Mathieu Scheltienne",
"orcid": "0000-0001-8316-7436"
},
],
"keywords": [
"neuroimaging"
Expand Down
2 changes: 1 addition & 1 deletion doc/source/gitwash/development_workflow.rst
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ Rewriting commit history

Do this only for your own feature branches.

There's an embarassing typo in a commit you made? Or perhaps the you
There's an embarrassing typo in a commit you made? Or perhaps the you
made several false starts you would like the posterity not to see.

This can be done via *interactive rebasing*.
Expand Down
1 change: 1 addition & 0 deletions doc/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ contributed code and discussion (in rough order of appearance):
* Jacob Roberts
* Horea Christian
* Fabian Perez
* Mathieu Scheltienne

License reprise
===============
Expand Down
53 changes: 46 additions & 7 deletions nibabel/casting.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,45 @@ class CastingError(Exception):
_test_val = 2**63 + 2**11 # Should be exactly representable in float64
TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val

# np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead.
sctypes = {
'int': [
getattr(np, dtype) for dtype in ('int8', 'int16', 'int32', 'int64') if hasattr(np, dtype)
],
'uint': [
getattr(np, dtype)
for dtype in ('uint8', 'uint16', 'uint32', 'uint64')
if hasattr(np, dtype)
],
'float': [
getattr(np, dtype)
for dtype in ('float16', 'float32', 'float64', 'float96', 'float128')
if hasattr(np, dtype)
],
'complex': [
getattr(np, dtype)
for dtype in ('complex64', 'complex128', 'complex192', 'complex256')
if hasattr(np, dtype)
],
'others': [bool, object, bytes, str, np.void],
}
# fmt: off
sctypes_aliases = {
getattr(np, dtype)
for dtype in (
'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong',
'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501
'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501
'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501
# other names of the built-in scalar types
'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501
# other
'object_', 'void',
)
if hasattr(np, dtype)
}
# fmt: on
larsoner marked this conversation as resolved.
Show resolved Hide resolved


def float_to_int(arr, int_type, nan2zero=True, infmax=False):
"""Convert floating point array `arr` to type `int_type`
Expand Down Expand Up @@ -252,7 +291,7 @@ def type_info(np_type):
return ret
info_64 = np.finfo(np.float64)
if dt.kind == 'c':
assert np_type is np.longcomplex
assert np_type is np.clongdouble
vals = (nmant, nexp, width / 2)
else:
assert np_type is np.longdouble
Expand Down Expand Up @@ -280,7 +319,7 @@ def type_info(np_type):
# Oh dear, we don't recognize the type information. Try some known types
# and then give up. At this stage we're expecting exotic longdouble or
# their complex equivalent.
if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):
if np_type not in (np.longdouble, np.clongdouble) or width not in (16, 32):
raise FloatingError(f'We had not expected type {np_type}')
if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024):
# double pair on PPC. The _check_nmant routine does not work for this
Expand All @@ -290,13 +329,13 @@ def type_info(np_type):
# Got float64 despite everything
pass
elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384):
# binary 128, but with some busted type information. np.longcomplex
# binary 128, but with some busted type information. np.clongdouble
# seems to break here too, so we need to use np.longdouble and
# complexify
two = np.longdouble(2)
# See: https://matthew-brett.github.io/pydagogue/floating_point.html
max_val = (two**113 - 1) / (two**112) * two**16383
if np_type is np.longcomplex:
if np_type is np.clongdouble:
max_val += 0j
ret = dict(
min=-max_val,
Expand Down Expand Up @@ -714,7 +753,7 @@ def ok_floats():
Remove longdouble if it has no higher precision than float64
"""
# copy float list so we don't change the numpy global
floats = np.sctypes['float'][:]
floats = sctypes['float'][:]
if best_float() != np.longdouble and np.longdouble in floats:
floats.remove(np.longdouble)
return sorted(floats, key=lambda f: type_info(f)['nmant'])
Expand Down Expand Up @@ -750,10 +789,10 @@ def able_int_type(values):
mn = min(values)
mx = max(values)
if mn >= 0:
for ityp in np.sctypes['uint']:
for ityp in sctypes['uint']:
if mx <= np.iinfo(ityp).max:
return ityp
for ityp in np.sctypes['int']:
for ityp in sctypes['int']:
info = np.iinfo(ityp)
if mn >= info.min and mx <= info.max:
return ityp
Expand Down
7 changes: 7 additions & 0 deletions nibabel/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
import numpy as np
import pytest

# Ignore warning requesting help with nicom
with pytest.warns(UserWarning):
import nibabel.nicom


def pytest_configure(config):
"""Configure pytest options."""
if int(np.__version__[0]) >= 2:
np.set_printoptions(legacy=125)
larsoner marked this conversation as resolved.
Show resolved Hide resolved
2 changes: 1 addition & 1 deletion nibabel/ecat.py
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,7 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None):
endianness = native_code

stream.seek(pos)
make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream)
make_array_writer(data.view(data.dtype.newbyteorder(endianness)), dtype).to_fileobj(stream)

def to_file_map(self, file_map=None):
"""Write ECAT7 image to `file_map` or contained ``self.file_map``
Expand Down
2 changes: 1 addition & 1 deletion nibabel/freesurfer/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _fread3(fobj):
n : int
A 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, '>u1', 3)
b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(np.int64)
return (b1 << 16) + (b2 << 8) + b3


Expand Down
9 changes: 5 additions & 4 deletions nibabel/freesurfer/tests/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,12 @@
import struct
import time
import unittest
import warnings
from os.path import isdir
from os.path import join as pjoin

import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from numpy.testing import assert_allclose

from ...fileslice import strided_scalar
from ...testing import clear_and_catch_warnings
Expand Down Expand Up @@ -112,8 +111,10 @@ def test_geometry():
assert np.array_equal(faces, faces2)

# Validate byte ordering
coords_swapped = coords.byteswap().newbyteorder()
faces_swapped = faces.byteswap().newbyteorder()
coords_swapped = coords.byteswap()
coords_swapped = coords_swapped.view(coords_swapped.dtype.newbyteorder())
faces_swapped = faces.byteswap()
faces_swapped = faces_swapped.view(faces_swapped.dtype.newbyteorder())
assert np.array_equal(coords_swapped, coords)
assert np.array_equal(faces_swapped, faces)

Expand Down
2 changes: 1 addition & 1 deletion nibabel/freesurfer/tests/test_mghformat.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def test_mghheader_default_structarr():
for endianness in (None,) + BIG_CODES:
hdr2 = MGHHeader.default_structarr(endianness=endianness)
assert hdr2 == hdr
assert hdr2.newbyteorder('>') == hdr
assert hdr2.view(hdr2.dtype.newbyteorder('>')) == hdr

for endianness in LITTLE_CODES:
with pytest.raises(ValueError):
Expand Down
10 changes: 7 additions & 3 deletions nibabel/nifti1.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@
if have_binary128():
# Only enable 128 bit floats if we really have IEEE binary 128 longdoubles
_float128t: type[np.generic] = np.longdouble
_complex256t: type[np.generic] = np.longcomplex
_complex256t: type[np.generic] = np.clongdouble

Check warning on line 92 in nibabel/nifti1.py

View check run for this annotation

Codecov / codecov/patch

nibabel/nifti1.py#L92

Added line #L92 was not covered by tests
else:
_float128t = np.void
_complex256t = np.void
Expand Down Expand Up @@ -2443,9 +2443,13 @@
return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32')

mn, mx = arr.min(), arr.max()
if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32):
if (isinstance(mn, int) and isinstance(mx, int)) or (
larsoner marked this conversation as resolved.
Show resolved Hide resolved
np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32)
):
return np.dtype('int32')
if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32):
if (isinstance(mn, float) and isinstance(mx, float)) or (
np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32)
):
return np.dtype('float32')

raise ValueError(
Expand Down
4 changes: 3 additions & 1 deletion nibabel/quaternions.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@

import numpy as np

MAX_FLOAT = np.maximum_sctype(float)
from .casting import sctypes

MAX_FLOAT = sctypes['float'][-1]
FLOAT_EPS = np.finfo(float).eps


Expand Down
3 changes: 2 additions & 1 deletion nibabel/spatialimages.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@
import numpy as np

from .arrayproxy import ArrayLike
from .casting import sctypes_aliases
from .dataobj_images import DataobjImage
from .filebasedimages import FileBasedHeader, FileBasedImage
from .fileholders import FileMap
Expand Down Expand Up @@ -333,7 +334,7 @@ def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]:
else:
raise e
supported = set()
for np_type in set(np.sctypeDict.values()):
for np_type in sctypes_aliases:
try:
obj.set_data_dtype(np_type)
except HeaderDataError:
Expand Down
2 changes: 1 addition & 1 deletion nibabel/streamlines/trk.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ def _read_header(fileobj):
endianness = swapped_code

# Swap byte order
header_rec = header_rec.newbyteorder()
header_rec = header_rec.view(header_rec.dtype.newbyteorder())
if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
msg = (
f"Invalid hdr_size: {header_rec['hdr_size']} "
Expand Down
7 changes: 2 additions & 5 deletions nibabel/tests/test_analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@
from .. import imageglobals
from ..analyze import AnalyzeHeader, AnalyzeImage
from ..arraywriters import WriterError
from ..casting import as_int
from ..loadsave import read_img_data
from ..casting import as_int, sctypes_aliases
from ..nifti1 import Nifti1Header
from ..optpkg import optional_package
from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types
Expand All @@ -52,9 +51,7 @@
def add_duplicate_types(supported_np_types):
# Update supported numpy types with named scalar types that map to the same set of dtypes
dtypes = {np.dtype(t) for t in supported_np_types}
supported_np_types.update(
scalar for scalar in set(np.sctypeDict.values()) if np.dtype(scalar) in dtypes
)
supported_np_types.update(scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes)


class TestAnalyzeHeader(tws._TestLabeledWrapStruct):
Expand Down
1 change: 0 additions & 1 deletion nibabel/tests/test_arrayproxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import contextlib
import gzip
import pickle
import warnings
from io import BytesIO
from unittest import mock

Expand Down
13 changes: 7 additions & 6 deletions nibabel/tests/test_arraywriters.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@
get_slope_inter,
make_array_writer,
)
from ..casting import int_abs, on_powerpc, shared_range, type_info
from ..casting import int_abs, sctypes, shared_range, type_info
from ..testing import assert_allclose_safely, suppress_warnings
from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file

FLOAT_TYPES = np.sctypes['float']
COMPLEX_TYPES = np.sctypes['complex']
INT_TYPES = np.sctypes['int']
UINT_TYPES = np.sctypes['uint']
FLOAT_TYPES = sctypes['float']
COMPLEX_TYPES = sctypes['complex']
INT_TYPES = sctypes['int']
UINT_TYPES = sctypes['uint']
CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES
IUINT_TYPES = INT_TYPES + UINT_TYPES
NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES
Expand Down Expand Up @@ -61,7 +61,8 @@ def test_arraywriters():
assert aw.out_dtype == arr.dtype
assert_array_equal(arr, round_trip(aw))
# Byteswapped should be OK
bs_arr = arr.byteswap().newbyteorder('S')
bs_arr = arr.byteswap()
bs_arr = bs_arr.view(bs_arr.dtype.newbyteorder('S'))
bs_aw = klass(bs_arr)
bs_aw_rt = round_trip(bs_aw)
# assert against original array because POWER7 was running into
Expand Down
17 changes: 9 additions & 8 deletions nibabel/tests/test_casting.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_array_equal

from ..casting import (
CastingError,
Expand All @@ -17,15 +17,16 @@
int_abs,
int_to_float,
longdouble_precision_improved,
sctypes,
shared_range,
ulp,
)
from ..testing import suppress_warnings


def test_shared_range():
for ft in np.sctypes['float']:
for it in np.sctypes['int'] + np.sctypes['uint']:
for ft in sctypes['float']:
for it in sctypes['int'] + sctypes['uint']:
# Test that going a bit above or below the calculated min and max
# either generates the same number when cast, or the max int value
# (if this system generates that) or something smaller (because of
Expand Down Expand Up @@ -54,7 +55,7 @@ def test_shared_range():
assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax))
else:
assert np.all(bit_bigger <= casted_mx)
if it in np.sctypes['uint']:
if it in sctypes['uint']:
assert mn == 0
continue
# And something larger for the minimum
Expand Down Expand Up @@ -90,8 +91,8 @@ def test_shared_range_inputs():


def test_casting():
for ft in np.sctypes['float']:
for it in np.sctypes['int'] + np.sctypes['uint']:
for ft in sctypes['float']:
for it in sctypes['int'] + sctypes['uint']:
ii = np.iinfo(it)
arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6]
farr_orig = np.array(arr, dtype=ft)
Expand Down Expand Up @@ -140,7 +141,7 @@ def test_casting():


def test_int_abs():
for itype in np.sctypes['int']:
for itype in sctypes['int']:
info = np.iinfo(itype)
in_arr = np.array([info.min, info.max], dtype=itype)
idtype = np.dtype(itype)
Expand Down Expand Up @@ -188,7 +189,7 @@ def test_able_int_type():

def test_able_casting():
# Check the able_int_type function guesses numpy out type
types = np.sctypes['int'] + np.sctypes['uint']
types = sctypes['int'] + sctypes['uint']
for in_type in types:
in_info = np.iinfo(in_type)
in_mn, in_mx = in_info.min, in_info.max
Expand Down
Loading
Loading