From 60327d1ee61a641ddfcc0574e0e1ebd6d265f62f Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:20:22 +0400 Subject: [PATCH 1/5] Make use of `[wrapping_]byte_{add,sub}` ...replacing `.cast().wrapping_offset().cast()` & similar code. --- compiler/rustc_arena/src/lib.rs | 3 ++- library/alloc/src/vec/into_iter.rs | 13 +++++-------- library/alloc/src/vec/mod.rs | 4 ++-- library/core/src/ptr/const_ptr.rs | 2 +- library/core/src/ptr/mut_ptr.rs | 2 +- library/core/src/slice/iter/macros.rs | 2 +- library/core/tests/atomic.rs | 2 +- library/core/tests/ptr.rs | 2 +- library/std/src/io/error/repr_bitpacked.rs | 4 ++-- library/std/src/lib.rs | 1 + 10 files changed, 17 insertions(+), 18 deletions(-) diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 6529f11100d2d..83ccbb9def68b 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -16,6 +16,7 @@ #![feature(maybe_uninit_slice)] #![feature(min_specialization)] #![feature(decl_macro)] +#![feature(pointer_byte_offsets)] #![feature(rustc_attrs)] #![cfg_attr(test, feature(test))] #![feature(strict_provenance)] @@ -209,7 +210,7 @@ impl TypedArena { unsafe { if mem::size_of::() == 0 { - self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); + self.ptr.set(self.ptr.get().wrapping_byte_add(1)); let ptr = ptr::NonNull::::dangling().as_ptr(); // Don't drop the object. This `write` is equivalent to `forget`. ptr::write(ptr, object); diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 1b483e3fc7793..7703de8ea5d19 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -4,7 +4,6 @@ use crate::alloc::{Allocator, Global}; use crate::raw_vec::RawVec; use core::array; use core::fmt; -use core::intrinsics::arith_offset; use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; @@ -154,7 +153,7 @@ impl Iterator for IntoIter { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -184,7 +183,7 @@ impl Iterator for IntoIter { // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound // effectively results in unsigned pointers representing positions 0..usize::MAX, // which is valid for ZSTs. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T } + self.ptr = self.ptr.wrapping_byte_add(step_size); } else { // SAFETY: the min() above ensures that step_size is in bounds self.ptr = unsafe { self.ptr.add(step_size) }; @@ -217,7 +216,7 @@ impl Iterator for IntoIter { return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) }); } - self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(N); // Safety: ditto return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) }); } @@ -267,7 +266,7 @@ impl DoubleEndedIterator for IntoIter { None } else if mem::size_of::() == 0 { // See above for why 'ptr.offset' isn't used - self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; + self.end = self.ptr.wrapping_byte_sub(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -283,9 +282,7 @@ impl DoubleEndedIterator for IntoIter { let step_size = self.len().min(n); if mem::size_of::() == 0 { // SAFETY: same as for advance_by() - self.end = unsafe { - arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T - } + self.end = self.end.wrapping_byte_sub(step_size); } else { // SAFETY: same as for advance_by() self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) }; diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index fa9f2131c0c1d..1d8580fbece0c 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -59,7 +59,7 @@ use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; -use core::intrinsics::{arith_offset, assume}; +use core::intrinsics::assume; use core::iter; #[cfg(not(no_global_oom_handling))] use core::iter::FromIterator; @@ -2677,7 +2677,7 @@ impl IntoIterator for Vec { let alloc = ManuallyDrop::new(ptr::read(me.allocator())); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { - arith_offset(begin as *const i8, me.len() as isize) as *const T + begin.wrapping_byte_add(me.len()) } else { begin.add(me.len()) as *const T }; diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index c25b159c533a1..feba3283e468c 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -249,7 +249,7 @@ impl *const T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index fff06b458c7c1..a70f8747916c3 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -255,7 +255,7 @@ impl *mut T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index c05242222dde7..47455760a4bd7 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -64,7 +64,7 @@ macro_rules! iterator { // backwards by `n`. `n` must not exceed `self.len()`. macro_rules! zst_shrink { ($self: ident, $n: ident) => { - $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T; + $self.end = $self.end.wrapping_byte_offset(-$n); } } diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 13b12db209a76..94b0310603bf4 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -155,7 +155,7 @@ fn ptr_add_data() { assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); - let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); + let bytes_from_n = |b| n.wrapping_byte_add(b); assert_eq!(atom.fetch_byte_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), bytes_from_n(1)); diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 12861794c2d2c..97a369810056d 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -650,7 +650,7 @@ fn thin_box() { .unwrap_or_else(|| handle_alloc_error(layout)) .cast::>(); ptr.as_ptr().write(meta); - ptr.cast::().as_ptr().add(offset).cast::().write(value); + ptr.as_ptr().byte_add(offset).cast::().write(value); Self { ptr, phantom: PhantomData } } } diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 292bf4826fd23..781ae03ad45dc 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -269,10 +269,10 @@ where } TAG_SIMPLE_MESSAGE => ErrorData::SimpleMessage(&*ptr.cast::().as_ptr()), TAG_CUSTOM => { - // It would be correct for us to use `ptr::sub` here (see the + // It would be correct for us to use `ptr::byte_sub` here (see the // comment above the `wrapping_add` call in `new_custom` for why), // but it isn't clear that it makes a difference, so we don't. - let custom = ptr.as_ptr().cast::().wrapping_sub(TAG_CUSTOM).cast::(); + let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::(); ErrorData::Custom(make_custom(custom)) } _ => { diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 475a1d9fd9920..55f17ae522490 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -284,6 +284,7 @@ #![feature(panic_can_unwind)] #![feature(panic_info_message)] #![feature(panic_internals)] +#![feature(pointer_byte_offsets)] #![feature(portable_simd)] #![feature(prelude_2024)] #![feature(provide_any)] From bece0af744968a7833d76b612a6debc46bc8ff80 Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:23:00 +0400 Subject: [PATCH 2/5] Remove useless pointer cast --- library/alloc/src/vec/into_iter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 7703de8ea5d19..b63375863c211 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -147,7 +147,7 @@ impl Iterator for IntoIter { #[inline] fn next(&mut self) -> Option { - if self.ptr as *const _ == self.end { + if self.ptr == self.end { None } else if mem::size_of::() == 0 { // purposefully don't use 'ptr.offset' because for From e13fa1c90d2009c4d3adbf35acd2ef08462381b7 Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:26:37 +0400 Subject: [PATCH 3/5] Make use of `pointer::is_aligned[_to]` --- library/alloc/tests/lib.rs | 1 + library/alloc/tests/thin_box.rs | 8 ++++---- library/core/src/intrinsics.rs | 2 +- library/std/src/sys/sgx/abi/usercalls/alloc.rs | 4 ++-- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs index d83cd29ddbad9..99bfb2a45ed90 100644 --- a/library/alloc/tests/lib.rs +++ b/library/alloc/tests/lib.rs @@ -38,6 +38,7 @@ #![feature(const_str_from_utf8)] #![feature(nonnull_slice_from_raw_parts)] #![feature(panic_update_hook)] +#![feature(pointer_is_aligned)] #![feature(slice_flatten)] #![feature(thin_box)] #![feature(bench_black_box)] diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs index 368aa564f9436..e008b0cc35718 100644 --- a/library/alloc/tests/thin_box.rs +++ b/library/alloc/tests/thin_box.rs @@ -48,11 +48,11 @@ fn verify_aligned(ptr: *const T) { // practice these checks are mostly just smoke-detectors for an extremely // broken `ThinBox` impl, since it's an extremely subtle piece of code. let ptr = core::hint::black_box(ptr); - let align = core::mem::align_of::(); assert!( - (ptr.addr() & (align - 1)) == 0 && !ptr.is_null(), - "misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}", - core::any::type_name::(), + ptr.is_aligned() && !ptr.is_null(), + "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}", + ty = core::any::type_name::(), + align = core::mem::align_of::(), ); } diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 15467e0191dbf..5cae6da9a567b 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -2139,7 +2139,7 @@ pub(crate) use assert_unsafe_precondition; /// Checks whether `ptr` is properly aligned with respect to /// `align_of::()`. pub(crate) fn is_aligned_and_not_null(ptr: *const T) -> bool { - !ptr.is_null() && ptr.addr() % mem::align_of::() == 0 + !ptr.is_null() && ptr.is_aligned() } /// Checks whether the regions of memory starting at `src` and `dst` of size diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs index 66fa1efbf103f..a2c8ab7f38d54 100644 --- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -115,7 +115,7 @@ pub unsafe trait UserSafe { /// * the pointer is null. /// * the pointed-to range is not in user memory. unsafe fn check_ptr(ptr: *const Self) { - let is_aligned = |p: *const u8| -> bool { 0 == p.addr() & (Self::align_of() - 1) }; + let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) }; assert!(is_aligned(ptr as *const u8)); assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr }))); @@ -367,7 +367,7 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) unsafe { copy_bytewise_to_userspace(src, dst, len); } - } else if len % 8 == 0 && dst as usize % 8 == 0 { + } else if len % 8 == 0 && dst.is_aligned_to(8) { // Copying 8-byte aligned quadwords: copy quad word per quad word unsafe { copy_aligned_quadwords_to_userspace(src, dst, len); From e3ee9af6b085269f2ef35ec8ea4e298e0b804dc0 Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:33:06 +0400 Subject: [PATCH 4/5] Aggressively replace `pointer::offset` with `add` and `sub` --- compiler/rustc_arena/src/lib.rs | 2 +- .../example/alloc_system.rs | 2 +- .../rustc_codegen_gcc/example/alloc_system.rs | 2 +- compiler/rustc_serialize/src/serialize.rs | 2 +- library/alloc/src/alloc/tests.rs | 2 +- .../alloc/src/collections/vec_deque/mod.rs | 4 +- library/alloc/src/slice.rs | 6 +- library/alloc/src/vec/in_place_collect.rs | 2 +- library/alloc/src/vec/into_iter.rs | 6 +- library/alloc/src/vec/mod.rs | 14 +- library/alloc/src/vec/spec_extend.rs | 2 +- library/alloc/tests/str.rs | 10 +- library/core/src/fmt/num.rs | 124 +++++++++--------- library/core/src/intrinsics.rs | 2 +- library/core/src/slice/memchr.rs | 4 +- library/core/src/slice/mod.rs | 2 +- library/core/src/slice/sort.rs | 36 ++--- library/core/src/str/validations.rs | 4 +- library/panic_abort/src/android.rs | 2 +- library/panic_unwind/src/dwarf/eh.rs | 2 +- library/std/src/os/unix/net/addr.rs | 2 +- .../std/src/sys/sgx/abi/usercalls/alloc.rs | 28 ++-- .../std/src/sys/sgx/abi/usercalls/tests.rs | 4 +- library/std/src/sys/windows/alloc.rs | 4 +- library/std/src/sys/windows/fs.rs | 6 +- library/std/src/sys/windows/os.rs | 6 +- 26 files changed, 140 insertions(+), 140 deletions(-) diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 83ccbb9def68b..78f6e65099a1f 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -218,7 +218,7 @@ impl TypedArena { } else { let ptr = self.ptr.get(); // Advance the pointer. - self.ptr.set(self.ptr.get().offset(1)); + self.ptr.set(self.ptr.get().add(1)); // Write into uninitialized memory. ptr::write(ptr, object); &mut *ptr diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs index cf95c89bc3156..50261c1939739 100644 --- a/compiler/rustc_codegen_cranelift/example/alloc_system.rs +++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs @@ -94,7 +94,7 @@ mod platform { struct Header(*mut u8); const HEAP_ZERO_MEMORY: DWORD = 0x00000008; unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { - &mut *(ptr as *mut Header).offset(-1) + &mut *(ptr as *mut Header).sub(1) } unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { let aligned = ptr.add(align - (ptr as usize & (align - 1))); diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs index 5f66ca67f2d40..89661918d05a5 100644 --- a/compiler/rustc_codegen_gcc/example/alloc_system.rs +++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs @@ -156,7 +156,7 @@ mod platform { struct Header(*mut u8); const HEAP_ZERO_MEMORY: DWORD = 0x00000008; unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { - &mut *(ptr as *mut Header).offset(-1) + &mut *(ptr as *mut Header).sub(1) } unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { let aligned = ptr.add(align - (ptr as usize & (align - 1))); diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs index 36585b8d77e0a..9bd5550038fc6 100644 --- a/compiler/rustc_serialize/src/serialize.rs +++ b/compiler/rustc_serialize/src/serialize.rs @@ -273,7 +273,7 @@ impl> Decodable for Vec { unsafe { let ptr: *mut T = vec.as_mut_ptr(); for i in 0..len { - std::ptr::write(ptr.offset(i as isize), Decodable::decode(d)); + std::ptr::write(ptr.add(i), Decodable::decode(d)); } vec.set_len(len); } diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs index 7d560964d85be..b2f0194599b28 100644 --- a/library/alloc/src/alloc/tests.rs +++ b/library/alloc/src/alloc/tests.rs @@ -15,7 +15,7 @@ fn allocate_zeroed() { let end = i.add(layout.size()); while i < end { assert_eq!(*i, 0); - i = i.offset(1); + i = i.add(1); } Global.deallocate(ptr.as_non_null_ptr(), layout); } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 4d895d83745b2..57ab74e01590b 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -2447,8 +2447,8 @@ impl VecDeque { let mut right_offset = 0; for i in left_edge..right_edge { right_offset = (i - left_edge) % (cap - right_edge); - let src: isize = (right_edge + right_offset) as isize; - ptr::swap(buf.add(i), buf.offset(src)); + let src = right_edge + right_offset; + ptr::swap(buf.add(i), buf.add(src)); } let n_ops = right_edge - left_edge; left_edge += n_ops; diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 63d4d94529008..5733124ec7565 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -1024,7 +1024,7 @@ where // Consume the greater side. // If equal, prefer the right run to maintain stability. unsafe { - let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) { + let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) { decrement_and_get(left) } else { decrement_and_get(right) @@ -1038,12 +1038,12 @@ where unsafe fn get_and_increment(ptr: &mut *mut T) -> *mut T { let old = *ptr; - *ptr = unsafe { ptr.offset(1) }; + *ptr = unsafe { ptr.add(1) }; old } unsafe fn decrement_and_get(ptr: &mut *mut T) -> *mut T { - *ptr = unsafe { ptr.offset(-1) }; + *ptr = unsafe { ptr.sub(1) }; *ptr } diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index 55dcb84ad16f9..b211421b20270 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -267,7 +267,7 @@ where // one slot in the underlying storage will have been freed up and we can immediately // write back the result. unsafe { - let dst = dst_buf.offset(i as isize); + let dst = dst_buf.add(i); debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation"); ptr::write(dst, self.__iterator_get_unchecked(i)); // Since this executes user code which can panic we have to bump the pointer diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index b63375863c211..ed049194dd068 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -159,7 +159,7 @@ impl Iterator for IntoIter { Some(unsafe { mem::zeroed() }) } else { let old = self.ptr; - self.ptr = unsafe { self.ptr.offset(1) }; + self.ptr = unsafe { self.ptr.add(1) }; Some(unsafe { ptr::read(old) }) } @@ -271,7 +271,7 @@ impl DoubleEndedIterator for IntoIter { // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) } else { - self.end = unsafe { self.end.offset(-1) }; + self.end = unsafe { self.end.sub(1) }; Some(unsafe { ptr::read(self.end) }) } @@ -285,7 +285,7 @@ impl DoubleEndedIterator for IntoIter { self.end = self.end.wrapping_byte_sub(step_size); } else { // SAFETY: same as for advance_by() - self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) }; + self.end = unsafe { self.end.sub(step_size) }; } let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size); // SAFETY: same as for advance_by() diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 1d8580fbece0c..2c26bb06e3f08 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -542,8 +542,8 @@ impl Vec { /// /// unsafe { /// // Overwrite memory with 4, 5, 6 - /// for i in 0..len as isize { - /// ptr::write(p.offset(i), 4 + i); + /// for i in 0..len { + /// ptr::write(p.add(i), 4 + i); /// } /// /// // Put everything back together into a Vec @@ -702,8 +702,8 @@ impl Vec { /// /// unsafe { /// // Overwrite memory with 4, 5, 6 - /// for i in 0..len as isize { - /// ptr::write(p.offset(i), 4 + i); + /// for i in 0..len { + /// ptr::write(p.add(i), 4 + i); /// } /// /// // Put everything back together into a Vec @@ -1393,7 +1393,7 @@ impl Vec { if index < len { // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) - ptr::copy(p, p.offset(1), len - index); + ptr::copy(p, p.add(1), len - index); } else if index == len { // No elements need shifting. } else { @@ -1455,7 +1455,7 @@ impl Vec { ret = ptr::read(ptr); // Shift everything down to fill in that spot. - ptr::copy(ptr.offset(1), ptr, len - index - 1); + ptr::copy(ptr.add(1), ptr, len - index - 1); } self.set_len(len - 1); ret @@ -2408,7 +2408,7 @@ impl Vec { // Write all elements except the last one for _ in 1..n { ptr::write(ptr, value.next()); - ptr = ptr.offset(1); + ptr = ptr.add(1); // Increment the length in every step in case next() panics local_len.increment_len(1); } diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs index 506ee0ecfa279..1ea9c827afd70 100644 --- a/library/alloc/src/vec/spec_extend.rs +++ b/library/alloc/src/vec/spec_extend.rs @@ -39,7 +39,7 @@ where let mut local_len = SetLenOnDrop::new(&mut self.len); iterator.for_each(move |element| { ptr::write(ptr, element); - ptr = ptr.offset(1); + ptr = ptr.add(1); // Since the loop executes user code which can panic we have to bump the pointer // after each step. // NB can't overflow since we would have had to alloc the address space diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs index 7379569dd68fe..e30329aa1cb6c 100644 --- a/library/alloc/tests/str.rs +++ b/library/alloc/tests/str.rs @@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() { fn test_as_ptr() { let buf = "hello".as_ptr(); unsafe { - assert_eq!(*buf.offset(0), b'h'); - assert_eq!(*buf.offset(1), b'e'); - assert_eq!(*buf.offset(2), b'l'); - assert_eq!(*buf.offset(3), b'l'); - assert_eq!(*buf.offset(4), b'o'); + assert_eq!(*buf.add(0), b'h'); + assert_eq!(*buf.add(1), b'e'); + assert_eq!(*buf.add(2), b'l'); + assert_eq!(*buf.add(3), b'l'); + assert_eq!(*buf.add(4), b'o'); } } diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs index 25789d37c2696..b11ed6b0b42ab 100644 --- a/library/core/src/fmt/num.rs +++ b/library/core/src/fmt/num.rs @@ -211,7 +211,7 @@ macro_rules! impl_Display { fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { // 2^128 is about 3*10^38, so 39 gives an extra byte of space let mut buf = [MaybeUninit::::uninit(); 39]; - let mut curr = buf.len() as isize; + let mut curr = buf.len(); let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); @@ -228,7 +228,7 @@ macro_rules! impl_Display { // eagerly decode 4 characters at a time while n >= 10000 { - let rem = (n % 10000) as isize; + let rem = (n % 10000) as usize; n /= 10000; let d1 = (rem / 100) << 1; @@ -238,29 +238,29 @@ macro_rules! impl_Display { // We are allowed to copy to `buf_ptr[curr..curr + 3]` here since // otherwise `curr < 0`. But then `n` was originally at least `10000^10` // which is `10^40 > 2^128 > n`. - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(curr + 2), 2); } // if we reach here numbers are <= 9999, so at most 4 chars long - let mut n = n as isize; // possibly reduce 64bit math + let mut n = n as usize; // possibly reduce 64bit math // decode 2 more chars, if > 2 chars if n >= 100 { let d1 = (n % 100) << 1; n /= 100; curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } // decode last 1 or 2 chars if n < 10 { curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; } else { let d1 = n << 1; curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } } @@ -268,7 +268,7 @@ macro_rules! impl_Display { // UTF-8 since `DEC_DIGITS_LUT` is let buf_slice = unsafe { str::from_utf8_unchecked( - slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize)) + slice::from_raw_parts(buf_ptr.add(curr), buf.len() - curr)) }; f.pad_integral(is_nonnegative, "", buf_slice) } @@ -339,18 +339,18 @@ macro_rules! impl_Exp { // Since `curr` always decreases by the number of digits copied, this means // that `curr >= 0`. let mut buf = [MaybeUninit::::uninit(); 40]; - let mut curr = buf.len() as isize; //index for buf + let mut curr = buf.len(); //index for buf let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); // decode 2 chars at a time while n >= 100 { - let d1 = ((n % 100) as isize) << 1; + let d1 = ((n % 100) << 1) as usize; curr -= 2; // SAFETY: `d1 <= 198`, so we can copy from `lut_ptr[d1..d1 + 2]` since // `DEC_DIGITS_LUT` has a length of 200. unsafe { - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } n /= 100; exponent += 2; @@ -362,7 +362,7 @@ macro_rules! impl_Exp { curr -= 1; // SAFETY: Safe since `40 > curr >= 0` (see comment) unsafe { - *buf_ptr.offset(curr) = (n as u8 % 10_u8) + b'0'; + *buf_ptr.add(curr) = (n as u8 % 10_u8) + b'0'; } n /= 10; exponent += 1; @@ -372,7 +372,7 @@ macro_rules! impl_Exp { curr -= 1; // SAFETY: Safe since `40 > curr >= 0` unsafe { - *buf_ptr.offset(curr) = b'.'; + *buf_ptr.add(curr) = b'.'; } } @@ -380,10 +380,10 @@ macro_rules! impl_Exp { let buf_slice = unsafe { // decode last character curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; let len = buf.len() - curr as usize; - slice::from_raw_parts(buf_ptr.offset(curr), len) + slice::from_raw_parts(buf_ptr.add(curr), len) }; // stores 'e' (or 'E') and the up to 2-digit exponent @@ -392,13 +392,13 @@ macro_rules! impl_Exp { // SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]` // is contained within `exp_buf` since `len <= 3`. let exp_slice = unsafe { - *exp_ptr.offset(0) = if upper { b'E' } else { b'e' }; + *exp_ptr.add(0) = if upper { b'E' } else { b'e' }; let len = if exponent < 10 { - *exp_ptr.offset(1) = (exponent as u8) + b'0'; + *exp_ptr.add(1) = (exponent as u8) + b'0'; 2 } else { let off = exponent << 1; - ptr::copy_nonoverlapping(lut_ptr.offset(off), exp_ptr.offset(1), 2); + ptr::copy_nonoverlapping(lut_ptr.add(off), exp_ptr.add(1), 2); 3 }; slice::from_raw_parts(exp_ptr, len) @@ -479,7 +479,7 @@ mod imp { impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128); /// Helper function for writing a u64 into `buf` going from last to first, with `curr`. -fn parse_u64_into(mut n: u64, buf: &mut [MaybeUninit; N], curr: &mut isize) { +fn parse_u64_into(mut n: u64, buf: &mut [MaybeUninit; N], curr: &mut usize) { let buf_ptr = MaybeUninit::slice_as_mut_ptr(buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); assert!(*curr > 19); @@ -494,41 +494,41 @@ fn parse_u64_into(mut n: u64, buf: &mut [MaybeUninit; N], cu n /= 1e16 as u64; // Some of these are nops but it looks more elegant this way. - let d1 = ((to_parse / 1e14 as u64) % 100) << 1; - let d2 = ((to_parse / 1e12 as u64) % 100) << 1; - let d3 = ((to_parse / 1e10 as u64) % 100) << 1; - let d4 = ((to_parse / 1e8 as u64) % 100) << 1; - let d5 = ((to_parse / 1e6 as u64) % 100) << 1; - let d6 = ((to_parse / 1e4 as u64) % 100) << 1; - let d7 = ((to_parse / 1e2 as u64) % 100) << 1; - let d8 = ((to_parse / 1e0 as u64) % 100) << 1; + let d1 = (((to_parse / 1e14 as u64) % 100) << 1) as usize; + let d2 = (((to_parse / 1e12 as u64) % 100) << 1) as usize; + let d3 = (((to_parse / 1e10 as u64) % 100) << 1) as usize; + let d4 = (((to_parse / 1e8 as u64) % 100) << 1) as usize; + let d5 = (((to_parse / 1e6 as u64) % 100) << 1) as usize; + let d6 = (((to_parse / 1e4 as u64) % 100) << 1) as usize; + let d7 = (((to_parse / 1e2 as u64) % 100) << 1) as usize; + let d8 = (((to_parse / 1e0 as u64) % 100) << 1) as usize; *curr -= 16; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d5 as isize), buf_ptr.offset(*curr + 8), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d6 as isize), buf_ptr.offset(*curr + 10), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d7 as isize), buf_ptr.offset(*curr + 12), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d8 as isize), buf_ptr.offset(*curr + 14), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d3), buf_ptr.add(*curr + 4), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d4), buf_ptr.add(*curr + 6), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d5), buf_ptr.add(*curr + 8), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d6), buf_ptr.add(*curr + 10), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d7), buf_ptr.add(*curr + 12), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d8), buf_ptr.add(*curr + 14), 2); } if n >= 1e8 as u64 { let to_parse = n % 1e8 as u64; n /= 1e8 as u64; // Some of these are nops but it looks more elegant this way. - let d1 = ((to_parse / 1e6 as u64) % 100) << 1; - let d2 = ((to_parse / 1e4 as u64) % 100) << 1; - let d3 = ((to_parse / 1e2 as u64) % 100) << 1; - let d4 = ((to_parse / 1e0 as u64) % 100) << 1; + let d1 = (((to_parse / 1e6 as u64) % 100) << 1) as usize; + let d2 = (((to_parse / 1e4 as u64) % 100) << 1) as usize; + let d3 = (((to_parse / 1e2 as u64) % 100) << 1) as usize; + let d4 = (((to_parse / 1e0 as u64) % 100) << 1) as usize; *curr -= 8; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d3), buf_ptr.add(*curr + 4), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d4), buf_ptr.add(*curr + 6), 2); } // `n` < 1e8 < (1 << 32) let mut n = n as u32; @@ -536,31 +536,31 @@ fn parse_u64_into(mut n: u64, buf: &mut [MaybeUninit; N], cu let to_parse = n % 1e4 as u32; n /= 1e4 as u32; - let d1 = (to_parse / 100) << 1; - let d2 = (to_parse % 100) << 1; + let d1 = ((to_parse / 100) << 1) as usize; + let d2 = ((to_parse % 100) << 1) as usize; *curr -= 4; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(*curr + 2), 2); } // `n` < 1e4 < (1 << 16) let mut n = n as u16; if n >= 100 { - let d1 = (n % 100) << 1; + let d1 = ((n % 100) << 1) as usize; n /= 100; *curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(*curr), 2); } // decode last 1 or 2 chars if n < 10 { *curr -= 1; - *buf_ptr.offset(*curr) = (n as u8) + b'0'; + *buf_ptr.add(*curr) = (n as u8) + b'0'; } else { - let d1 = n << 1; + let d1 = (n << 1) as usize; *curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(*curr), 2); } } } @@ -593,21 +593,21 @@ impl fmt::Display for i128 { fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { // 2^128 is about 3*10^38, so 39 gives an extra byte of space let mut buf = [MaybeUninit::::uninit(); 39]; - let mut curr = buf.len() as isize; + let mut curr = buf.len(); let (n, rem) = udiv_1e19(n); parse_u64_into(rem, &mut buf, &mut curr); if n != 0 { // 0 pad up to point - let target = (buf.len() - 19) as isize; + let target = buf.len() - 19; // SAFETY: Guaranteed that we wrote at most 19 bytes, and there must be space // remaining since it has length 39 unsafe { ptr::write_bytes( - MaybeUninit::slice_as_mut_ptr(&mut buf).offset(target), + MaybeUninit::slice_as_mut_ptr(&mut buf).add(target), b'0', - (curr - target) as usize, + curr - target, ); } curr = target; @@ -616,16 +616,16 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R parse_u64_into(rem, &mut buf, &mut curr); // Should this following branch be annotated with unlikely? if n != 0 { - let target = (buf.len() - 38) as isize; + let target = buf.len() - 38; // The raw `buf_ptr` pointer is only valid until `buf` is used the next time, // buf `buf` is not used in this scope so we are good. let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); // SAFETY: At this point we wrote at most 38 bytes, pad up to that point, // There can only be at most 1 digit remaining. unsafe { - ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize); + ptr::write_bytes(buf_ptr.add(target), b'0', curr - target); curr = target - 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; } } } @@ -634,8 +634,8 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R // UTF-8 since `DEC_DIGITS_LUT` is let buf_slice = unsafe { str::from_utf8_unchecked(slice::from_raw_parts( - MaybeUninit::slice_as_mut_ptr(&mut buf).offset(curr), - buf.len() - curr as usize, + MaybeUninit::slice_as_mut_ptr(&mut buf).add(curr), + buf.len() - curr, )) }; f.pad_integral(is_nonnegative, "", buf_slice) diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 5cae6da9a567b..5cac0cf94075d 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -2211,7 +2211,7 @@ pub(crate) fn is_nonoverlapping(src: *const T, dst: *const T, count: usize) - /// unsafe { /// // The call to offset is always safe because `Vec` will never /// // allocate more than `isize::MAX` bytes. -/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize); +/// let dst_ptr = dst.as_mut_ptr().add(dst_len); /// let src_ptr = src.as_ptr(); /// /// // Truncate `src` without dropping its contents. We do this first, diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index dffeaf6a834e7..d8a4fd6c1ce02 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -124,8 +124,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { - let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); - let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); + let u = *(ptr.add(offset - 2 * chunk_bytes) as *const Chunk); + let v = *(ptr.add(offset - chunk_bytes) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index e79d47c9f98cd..f98a279c02f4c 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -2921,7 +2921,7 @@ impl [T] { let prev_ptr_write = ptr.add(next_write - 1); if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) { if next_read != next_write { - let ptr_write = prev_ptr_write.offset(1); + let ptr_write = prev_ptr_write.add(1); mem::swap(&mut *ptr_read, &mut *ptr_write); } next_write += 1; diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 6a201834b8e68..8b025da2a46ed 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -326,8 +326,8 @@ where unsafe { // Branchless comparison. *end_l = i as u8; - end_l = end_l.offset(!is_less(&*elem, pivot) as isize); - elem = elem.offset(1); + end_l = end_l.add(!is_less(&*elem, pivot) as usize); + elem = elem.add(1); } } } @@ -352,9 +352,9 @@ where // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice. unsafe { // Branchless comparison. - elem = elem.offset(-1); + elem = elem.sub(1); *end_r = i as u8; - end_r = end_r.offset(is_less(&*elem, pivot) as isize); + end_r = end_r.add(is_less(&*elem, pivot) as usize); } } } @@ -365,12 +365,12 @@ where if count > 0 { macro_rules! left { () => { - l.offset(*start_l as isize) + l.add(*start_l as usize) }; } macro_rules! right { () => { - r.offset(-(*start_r as isize) - 1) + r.sub((*start_r as usize) + 1) }; } @@ -398,16 +398,16 @@ where ptr::copy_nonoverlapping(right!(), left!(), 1); for _ in 1..count { - start_l = start_l.offset(1); + start_l = start_l.add(1); ptr::copy_nonoverlapping(left!(), right!(), 1); - start_r = start_r.offset(1); + start_r = start_r.add(1); ptr::copy_nonoverlapping(right!(), left!(), 1); } ptr::copy_nonoverlapping(&tmp, right!(), 1); mem::forget(tmp); - start_l = start_l.offset(1); - start_r = start_r.offset(1); + start_l = start_l.add(1); + start_r = start_r.add(1); } } @@ -420,7 +420,7 @@ where // safe. Otherwise, the debug assertions in the `is_done` case guarantee that // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account // for the smaller number of remaining elements. - l = unsafe { l.offset(block_l as isize) }; + l = unsafe { l.add(block_l) }; } if start_r == end_r { @@ -428,7 +428,7 @@ where // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide, // or `block_r` has been adjusted for the last handful of elements. - r = unsafe { r.offset(-(block_r as isize)) }; + r = unsafe { r.sub(block_r) }; } if is_done { @@ -457,9 +457,9 @@ where // - `offsets_l` contains valid offsets into `v` collected during the partitioning of // the last block, so the `l.offset` calls are valid. unsafe { - end_l = end_l.offset(-1); - ptr::swap(l.offset(*end_l as isize), r.offset(-1)); - r = r.offset(-1); + end_l = end_l.sub(1); + ptr::swap(l.add(*end_l as usize), r.sub(1)); + r = r.sub(1); } } width(v.as_mut_ptr(), r) @@ -470,9 +470,9 @@ where while start_r < end_r { // SAFETY: See the reasoning in [remaining-elements-safety]. unsafe { - end_r = end_r.offset(-1); - ptr::swap(l, r.offset(-(*end_r as isize) - 1)); - l = l.offset(1); + end_r = end_r.sub(1); + ptr::swap(l, r.sub((*end_r as usize) + 1)); + l = l.add(1); } } width(v.as_mut_ptr(), l) diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs index 04bc665233e38..2acef432f2063 100644 --- a/library/core/src/str/validations.rs +++ b/library/core/src/str/validations.rs @@ -216,12 +216,12 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { // SAFETY: since `align - index` and `ascii_block_size` are // multiples of `usize_bytes`, `block = ptr.add(index)` is // always aligned with a `usize` so it's safe to dereference - // both `block` and `block.offset(1)`. + // both `block` and `block.add(1)`. unsafe { let block = ptr.add(index) as *const usize; // break if there is a nonascii byte let zu = contains_nonascii(*block); - let zv = contains_nonascii(*block.offset(1)); + let zv = contains_nonascii(*block.add(1)); if zu || zv { break; } diff --git a/library/panic_abort/src/android.rs b/library/panic_abort/src/android.rs index 18bb932f10ea9..0fd824f8a458d 100644 --- a/library/panic_abort/src/android.rs +++ b/library/panic_abort/src/android.rs @@ -42,7 +42,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) { return; // allocation failure } copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len()); - buf.offset(msg.len() as isize).write(0); + buf.add(msg.len()).write(0); let func = transmute::(func_addr); func(buf); diff --git a/library/panic_unwind/src/dwarf/eh.rs b/library/panic_unwind/src/dwarf/eh.rs index 7394feab82f22..9aa966b5063b1 100644 --- a/library/panic_unwind/src/dwarf/eh.rs +++ b/library/panic_unwind/src/dwarf/eh.rs @@ -75,7 +75,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result let call_site_encoding = reader.read::(); let call_site_table_length = reader.read_uleb128(); - let action_table = reader.ptr.offset(call_site_table_length as isize); + let action_table = reader.ptr.add(call_site_table_length as usize); let ip = context.ip; if !USING_SJLJ_EXCEPTIONS { diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 9aeae4b2cae69..bb313c7597b6c 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -329,7 +329,7 @@ impl SocketAddr { crate::ptr::copy_nonoverlapping( namespace.as_ptr(), - addr.sun_path.as_mut_ptr().offset(1) as *mut u8, + addr.sun_path.as_mut_ptr().add(1) as *mut u8, namespace.len(), ); let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t; diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs index a2c8ab7f38d54..a8cfa1d25d081 100644 --- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -334,8 +334,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) mfence lfence ", - val = in(reg_byte) *src.offset(off as isize), - dst = in(reg) dst.offset(off as isize), + val = in(reg_byte) *src.add(off), + dst = in(reg) dst.add(off), seg_sel = in(reg) &mut seg_sel, options(nostack, att_syntax) ); @@ -359,8 +359,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) assert!(is_enclave_range(src, len)); assert!(is_user_range(dst, len)); assert!(len < isize::MAX as usize); - assert!(!(src as usize).overflowing_add(len).1); - assert!(!(dst as usize).overflowing_add(len).1); + assert!(!src.addr().overflowing_add(len).1); + assert!(!dst.addr().overflowing_add(len).1); if len < 8 { // Can't align on 8 byte boundary: copy safely byte per byte @@ -384,22 +384,22 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) unsafe { // Copy small0 - let small0_size = (8 - dst as usize % 8) as u8; + let small0_size = (8 - dst.addr() % 8) as u8 as usize; let small0_src = src; let small0_dst = dst; - copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _); + copy_bytewise_to_userspace(small0_src, small0_dst, small0_size); // Copy big - let small1_size = ((len - small0_size as usize) % 8) as u8; - let big_size = len - small0_size as usize - small1_size as usize; - let big_src = src.offset(small0_size as _); - let big_dst = dst.offset(small0_size as _); - copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size); + let small1_size = ((len - small0_size) % 8) as u8 as usize; + let big_size = len - small0_size - small1_size; + let big_src = src.add(small0_size); + let big_dst = dst.add(small0_size); + copy_aligned_quadwords_to_userspace(big_src, big_dst, big_size); // Copy small1 - let small1_src = src.offset(big_size as isize + small0_size as isize); - let small1_dst = dst.offset(big_size as isize + small0_size as isize); - copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _); + let small1_src = src.add(big_size + small0_size); + let small1_dst = dst.add(big_size + small0_size); + copy_bytewise_to_userspace(small1_src, small1_dst, small1_size); } } } diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs index cbf7d7d54f7a2..598ad5a083ccd 100644 --- a/library/std/src/sys/sgx/abi/usercalls/tests.rs +++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs @@ -17,12 +17,12 @@ fn test_copy_function() { dst.copy_from_enclave(&[0u8; 100]); // Copy src[0..size] to dst + offset - unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) }; + unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) }; // Verify copy for byte in 0..size { unsafe { - assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]); + assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]); } } } diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs index fdc81cdea7dec..fe00c08aa6a9a 100644 --- a/library/std/src/sys/windows/alloc.rs +++ b/library/std/src/sys/windows/alloc.rs @@ -168,7 +168,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned` // is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before // it, it is safe to write a header directly before it. - unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) }; + unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) }; // SAFETY: The returned pointer does not point to the to the start of an allocated block, // but there is a header readable directly before it containing the location of the start @@ -213,7 +213,7 @@ unsafe impl GlobalAlloc for System { // SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null // and have a header readable directly before it. - unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 } + unsafe { ptr::read((ptr as *mut Header).sub(1)).0 } } }; diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index aed082b3e0abf..1361b9c90c021 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -512,7 +512,7 @@ impl File { )); } }; - let subst_ptr = path_buffer.offset(subst_off as isize); + let subst_ptr = path_buffer.add(subst_off.into()); let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize); // Absolute paths start with an NT internal namespace prefix `\??\` // We should not let it leak through. @@ -1345,10 +1345,10 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> { let v = br"\??\"; let v = v.iter().map(|x| *x as u16); for c in v.chain(original.as_os_str().encode_wide()) { - *buf.offset(i) = c; + *buf.add(i) = c; i += 1; } - *buf.offset(i) = 0; + *buf.add(i) = 0; i += 1; (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT; (*db).ReparseTargetMaximumLength = (i * 2) as c::WORD; diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs index bcac996c024ec..352337ba32237 100644 --- a/library/std/src/sys/windows/os.rs +++ b/library/std/src/sys/windows/os.rs @@ -99,11 +99,11 @@ impl Iterator for Env { } let p = self.cur as *const u16; let mut len = 0; - while *p.offset(len) != 0 { + while *p.add(len) != 0 { len += 1; } - let s = slice::from_raw_parts(p, len as usize); - self.cur = self.cur.offset(len + 1); + let s = slice::from_raw_parts(p, len); + self.cur = self.cur.add(len + 1); // Windows allows environment variables to start with an equals // symbol (in any other position, this is the separator between From e055e600e24bba12ffb7cd54ee194767c92609ba Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:33:46 +0400 Subject: [PATCH 5/5] Make some docs nicer wrt pointer offsets --- library/alloc/src/ffi/c_str.rs | 6 +++--- library/core/src/slice/mod.rs | 2 +- library/core/src/sync/atomic.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs index ae61b1f1e8ed5..be21d8c722d78 100644 --- a/library/alloc/src/ffi/c_str.rs +++ b/library/alloc/src/ffi/c_str.rs @@ -436,9 +436,9 @@ impl CString { /// /// unsafe { /// assert_eq!(b'f', *ptr as u8); - /// assert_eq!(b'o', *ptr.offset(1) as u8); - /// assert_eq!(b'o', *ptr.offset(2) as u8); - /// assert_eq!(b'\0', *ptr.offset(3) as u8); + /// assert_eq!(b'o', *ptr.add(1) as u8); + /// assert_eq!(b'o', *ptr.add(2) as u8); + /// assert_eq!(b'\0', *ptr.add(3) as u8); /// /// // retake pointer to free memory /// let _ = CString::from_raw(ptr); diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index f98a279c02f4c..571d33a94d387 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -2905,7 +2905,7 @@ impl [T] { // `prev_ptr_write` is never less than 0 and is inside the slice. // This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write` // and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)` - // and `prev_ptr_write.offset(1)`. + // and `prev_ptr_write.add(1)`. // // `next_write` is also incremented at most once per loop at most meaning // no element is skipped when it may need to be swapped. diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 40ca9abd6bdc9..21ac9c5c7e3c1 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1555,7 +1555,7 @@ impl AtomicPtr { /// previous pointer. /// /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically - /// perform `ptr = ptr.cast::().wrapping_add(val).cast::()`. + /// perform `ptr = ptr.wrapping_byte_add(val)`. /// /// `fetch_byte_add` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note @@ -1592,7 +1592,7 @@ impl AtomicPtr { /// previous pointer. /// /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically - /// perform `ptr = ptr.cast::().wrapping_sub(val).cast::()`. + /// perform `ptr = ptr.wrapping_byte_sub(val)`. /// /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note