Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support passing data in numpy int8, int16, uint8 and uint16 dtypes to GMT #1963

Merged
merged 4 commits into from
Jun 16, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 13 additions & 9 deletions pygmt/clib/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,18 @@
REGISTRATIONS = ["GMT_GRID_PIXEL_REG", "GMT_GRID_NODE_REG"]

DTYPES = {
np.float64: "GMT_DOUBLE",
np.float32: "GMT_FLOAT",
np.int64: "GMT_LONG",
np.int8: "GMT_CHAR",
np.int16: "GMT_SHORT",
np.int32: "GMT_INT",
np.uint64: "GMT_ULONG",
np.int64: "GMT_LONG",
np.uint8: "GMT_UCHAR",
np.uint16: "GMT_USHORT",
np.uint32: "GMT_UINT",
np.datetime64: "GMT_DATETIME",
np.uint64: "GMT_ULONG",
np.float32: "GMT_FLOAT",
np.float64: "GMT_DOUBLE",
np.str_: "GMT_TEXT",
np.datetime64: "GMT_DATETIME",
}


Expand Down Expand Up @@ -732,8 +736,8 @@ def put_vector(self, dataset, column, vector):
The dataset must be created by :meth:`pygmt.clib.Session.create_data`
first. Use ``family='GMT_IS_DATASET|GMT_VIA_VECTOR'``.

Not at all numpy dtypes are supported, only: float64, float32, int64,
int32, uint64, uint32, datetime64 and str\_.
Not at all numpy dtypes are supported, only: int8, int16, int32, int64,
seisman marked this conversation as resolved.
Show resolved Hide resolved
uint8, uint16, uint32, uint64, float32, float64, str\_ and datetime64.

.. warning::
The numpy array must be C contiguous in memory. If it comes from a
Expand Down Expand Up @@ -856,8 +860,8 @@ def put_matrix(self, dataset, matrix, pad=0):
The dataset must be created by :meth:`pygmt.clib.Session.create_data`
first. Use ``|GMT_VIA_MATRIX'`` in the family.

Not at all numpy dtypes are supported, only: float64, float32, int64,
int32, uint64, and uint32.
Not at all numpy dtypes are supported, only: int8, int16, int32, int64,
uint8, uint16, uint32, uint64, float32, float64, str\_ and datetime64.
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just realized that passing str_ type matrix to GMT is allowed in the PyGMT put_matrix function, but was not documented here. But I'm wondering what happens if passing a str matrix to GMT.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Not at all numpy dtypes are supported, only: int8, int16, int32, int64,
uint8, uint16, uint32, uint64, float32, float64, str\_ and datetime64.
Not all numpy dtypes are supported, only: int8, int16, int32, int64,
uint8, uint16, uint32, uint64, float32, float64, str\_ and datetime64.

I just realized that passing str_ type matrix to GMT is allowed in the PyGMT put_matrix function, but was not documented here. But I'm wondering what happens if passing a str matrix to GMT.

Maybe try and see?

seisman marked this conversation as resolved.
Show resolved Hide resolved

.. warning::
The numpy array must be C contiguous in memory. Use
Expand Down
23 changes: 13 additions & 10 deletions pygmt/tests/test_clib.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ def data():
return np.loadtxt(POINTS_DATA)


@pytest.fixture(scope="module", name="dtypes")
def fixture_dtypes():
"""
List of supported numpy dtypes.
"""
return "int8 int16 int32 int64 uint8 uint16 uint32 uint64 float32 float64".split()


@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Expand Down Expand Up @@ -339,11 +347,10 @@ def test_create_data_fails():
)


def test_virtual_file():
def test_virtual_file(dtypes):
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
Expand Down Expand Up @@ -497,11 +504,10 @@ def test_virtualfile_from_data_fail_non_valid_data(data):
)


def test_virtualfile_from_vectors():
def test_virtualfile_from_vectors(dtypes):
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
Expand Down Expand Up @@ -588,11 +594,10 @@ def test_virtualfile_from_vectors_diff_size():
print("This should have failed")


def test_virtualfile_from_matrix():
def test_virtualfile_from_matrix(dtypes):
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
Expand All @@ -606,11 +611,10 @@ def test_virtualfile_from_matrix():
assert output == expected


def test_virtualfile_from_matrix_slice():
def test_virtualfile_from_matrix_slice(dtypes):
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
Expand All @@ -627,11 +631,10 @@ def test_virtualfile_from_matrix_slice():
assert output == expected


def test_virtualfile_from_vectors_pandas():
def test_virtualfile_from_vectors_pandas(dtypes):
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
Expand Down
14 changes: 10 additions & 4 deletions pygmt/tests/test_clib_put_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,18 @@
from pygmt.tests.test_clib import mock


def test_put_matrix():
@pytest.fixture(scope="module", name="dtypes")
def fixture_dtypes():
"""
List of supported numpy dtypes.
"""
return "int8 int16 int32 int64 uint8 uint16 uint32 uint64 float32 float64".split()


def test_put_matrix(dtypes):
"""
Check that assigning a numpy 2d array to a dataset works.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (3, 4)
for dtype in dtypes:
with clib.Session() as lib:
Expand Down Expand Up @@ -57,11 +64,10 @@ def test_put_matrix_fails():
lib.put_matrix(dataset=None, matrix=np.empty((10, 2)), pad=0)


def test_put_matrix_grid():
def test_put_matrix_grid(dtypes):
"""
Check that assigning a numpy 2d array to an ASCII and NetCDF grid works.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
wesn = [10, 15, 30, 40, 0, 0]
inc = [1, 1]
shape = ((wesn[3] - wesn[2]) // inc[1] + 1, (wesn[1] - wesn[0]) // inc[0] + 1)
Expand Down
14 changes: 10 additions & 4 deletions pygmt/tests/test_clib_put_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,18 @@
from pygmt.helpers import GMTTempFile


def test_put_vector():
@pytest.fixture(scope="module", name="dtypes")
def fixture_dtypes():
"""
List of supported numpy dtypes.
"""
return "int8 int16 int32 int64 uint8 uint16 uint32 uint64 float32 float64".split()


def test_put_vector(dtypes):
"""
Check that assigning a numpy array to a dataset works.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtype in dtypes:
with clib.Session() as lib:
dataset = lib.create_data(
Expand Down Expand Up @@ -50,13 +57,12 @@ def test_put_vector():
npt.assert_allclose(newz, z)


def test_put_vector_mixed_dtypes():
def test_put_vector_mixed_dtypes(dtypes):
"""
Passing a numpy array of mixed dtypes to a dataset.

See https://github.com/GenericMappingTools/pygmt/issues/255
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtypex, dtypey in itertools.permutations(dtypes, r=2):
with clib.Session() as lib:
dataset = lib.create_data(
Expand Down