From 39962de0241590f0a9597b1f00df74ee593ac0a4 Mon Sep 17 00:00:00 2001 From: Steve Dower Date: Mon, 12 Feb 2024 17:47:36 +0000 Subject: [PATCH] Switch to Py_ssize_t consistently instead of int and size_t --- Doc/c-api/long.rst | 4 ++-- Include/cpython/longobject.h | 4 ++-- Lib/test/test_capi/test_long.py | 8 +------- Modules/_testcapi/long.c | 16 +++------------- Objects/longobject.c | 31 +++++++++++++------------------ 5 files changed, 21 insertions(+), 42 deletions(-) diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst index 06bb2435565804..c39823e5e6787f 100644 --- a/Doc/c-api/long.rst +++ b/Doc/c-api/long.rst @@ -354,12 +354,12 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. Returns ``NULL`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. -.. c:function:: int PyLong_AsNativeBytes(PyObject *pylong, void* buffer, size_t n_bytes, int endianness) +.. c:function:: Py_ssize_t PyLong_AsNativeBytes(PyObject *pylong, void* buffer, Py_ssize_t n_bytes, int endianness) Copy the Python integer value to a native *buffer* of size *n_bytes*:: int value; - int bytes = PyLong_CopyBits(v, &value, sizeof(value), -1); + Py_ssize_t bytes = PyLong_CopyBits(v, &value, sizeof(value), -1); if (bytes < 0) { // Error occurred return NULL; diff --git a/Include/cpython/longobject.h b/Include/cpython/longobject.h index 1675ee6d0dbaf6..07251db6bcc203 100644 --- a/Include/cpython/longobject.h +++ b/Include/cpython/longobject.h @@ -23,8 +23,8 @@ PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base); may be larger than necessary - this function is not an accurate way to calculate the bit length of an integer object. */ -PyAPI_FUNC(int) PyLong_AsNativeBytes(PyObject* v, void* buffer, size_t n_bytes, - int endianness); +PyAPI_FUNC(Py_ssize_t) PyLong_AsNativeBytes(PyObject* v, void* buffer, + Py_ssize_t n_bytes, int endianness); /* PyLong_FromNativeBytes: Create an int value from a native integer n_bytes is the number of bytes to read from the buffer. Passing 0 will diff --git a/Lib/test/test_capi/test_long.py b/Lib/test/test_capi/test_long.py index 3bcb95edbc1220..fc82cbfa66ea7a 100644 --- a/Lib/test/test_capi/test_long.py +++ b/Lib/test/test_capi/test_long.py @@ -428,8 +428,7 @@ def test_long_asnativebytes(self): import math from _testcapi import ( pylong_asnativebytes as asnativebytes, - pylong_asnativebytes_too_big_n, - SIZE_MAX + SIZE_MAX, ) # Abbreviate sizeof(Py_ssize_t) to SZ because we use it a lot @@ -529,11 +528,6 @@ def test_long_asnativebytes(self): with self.assertRaises(TypeError): asnativebytes('not a number', buffer, 0, -1) - # We pass any number we like, but the function will pass an n_bytes - # that is too big to make sure we fail - with self.assertRaises(SystemError): - pylong_asnativebytes_too_big_n(100) - def test_long_fromnativebytes(self): import math from _testcapi import ( diff --git a/Modules/_testcapi/long.c b/Modules/_testcapi/long.c index 15f5085935541d..dc21cf9f475228 100644 --- a/Modules/_testcapi/long.c +++ b/Modules/_testcapi/long.c @@ -790,23 +790,14 @@ pylong_asnativebytes(PyObject *module, PyObject *args) PyBuffer_Release(&buffer); return NULL; } - // Allow n > INT_MAX for tests - it will error out without writing to the - // buffer, so no overrun issues. - if (buffer.len < n && n <= INT_MAX) { + if (buffer.len < n) { PyErr_SetString(PyExc_ValueError, "buffer must be at least 'n' bytes"); PyBuffer_Release(&buffer); return NULL; } - int res = PyLong_AsNativeBytes(v, buffer.buf, n, (int)endianness); + Py_ssize_t res = PyLong_AsNativeBytes(v, buffer.buf, n, (int)endianness); PyBuffer_Release(&buffer); - return res >= 0 ? PyLong_FromLong(res) : NULL; -} - -static PyObject * -pylong_asnativebytes_too_big_n(PyObject *module, PyObject *v) -{ - int res = PyLong_AsNativeBytes(v, NULL, (size_t)INT_MAX + 1, -1); - return res >= 0 ? PyLong_FromLong(res) : NULL; + return res >= 0 ? PyLong_FromSsize_t(res) : NULL; } static PyObject * @@ -859,7 +850,6 @@ static PyMethodDef test_methods[] = { {"pylong_asdouble", pylong_asdouble, METH_O}, {"pylong_asvoidptr", pylong_asvoidptr, METH_O}, {"pylong_asnativebytes", pylong_asnativebytes, METH_VARARGS}, - {"pylong_asnativebytes_too_big_n", pylong_asnativebytes_too_big_n, METH_O}, {"pylong_fromnativebytes", pylong_fromnativebytes, METH_VARARGS}, {NULL}, }; diff --git a/Objects/longobject.c b/Objects/longobject.c index f006a1fe7955c0..932111f58425f2 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -1093,8 +1093,8 @@ _resolve_endianness(int *endianness) return 0; } -int -PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) +Py_ssize_t +PyLong_AsNativeBytes(PyObject* vv, void* buffer, Py_ssize_t n, int endianness) { PyLongObject *v; union { @@ -1102,18 +1102,13 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) unsigned char b[sizeof(Py_ssize_t)]; } cv; int do_decref = 0; - int res = 0; + Py_ssize_t res = 0; - if (vv == NULL) { + if (vv == NULL || n < 0) { PyErr_BadInternalCall(); return -1; } - if ((size_t)(int)n != n || (int)n < 0) { - PyErr_SetString(PyExc_SystemError, "n_bytes too big to convert"); - return -1; - } - int little_endian = endianness; if (_resolve_endianness(&little_endian) < 0) { return -1; @@ -1146,13 +1141,13 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) memcpy(buffer, cv.b, n); } else { - for (size_t i = 0; i < n; ++i) { + for (Py_ssize_t i = 0; i < n; ++i) { ((unsigned char*)buffer)[n - i - 1] = cv.b[i]; } } #else if (little_endian) { - for (size_t i = 0; i < n; ++i) { + for (Py_ssize_t i = 0; i < n; ++i) { ((unsigned char*)buffer)[i] = cv.b[sizeof(cv.b) - i - 1]; } } @@ -1163,7 +1158,7 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) /* If we fit, return the requested number of bytes */ if (_fits_in_n_bits(cv.v, n * 8)) { - res = (int)n; + res = n; } } else { @@ -1175,20 +1170,20 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) } else { unsigned char *b = (unsigned char *)buffer; - for (size_t i = 0; i < n - sizeof(cv.b); ++i) { + for (Py_ssize_t i = 0; i < n - (int)sizeof(cv.b); ++i) { *b++ = fill; } - for (size_t i = sizeof(cv.b); i > 0; --i) { + for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { *b++ = cv.b[i - 1]; } } #else if (little_endian) { unsigned char *b = (unsigned char *)buffer; - for (size_t i = sizeof(cv.b); i > 0; --i) { + for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { *b++ = cv.b[i - 1]; } - for (size_t i = 0; i < n - sizeof(cv.b); ++i) { + for (Py_ssize_t i = 0; i < n - sizeof(cv.b); ++i) { *b++ = fill; } } @@ -1201,7 +1196,7 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) } else { if (n > 0) { - _PyLong_AsByteArray(v, buffer, n, little_endian, 1, 0); + _PyLong_AsByteArray(v, buffer, (size_t)n, little_endian, 1, 0); } // More efficient calculation for number of bytes required? @@ -1210,7 +1205,7 @@ PyLong_AsNativeBytes(PyObject* vv, void* buffer, size_t n, int endianness) * multiples of 8 to the next byte, but we add an implied bit for * the sign and it cancels out. */ size_t n_needed = (nb / 8) + 1; - res = (int)n_needed; + res = (Py_ssize_t)n_needed; if ((size_t)res != n_needed) { PyErr_SetString(PyExc_OverflowError, "value too large to convert");