Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Constify copy related functions #83091

Merged
merged 4 commits into from
Mar 16, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Constify mem::swap and ptr::swap[_nonoverlapping]
  • Loading branch information
usbalbin committed Mar 15, 2021
commit 64e2248794caada12469a0bd31d883eb6370b095
12 changes: 0 additions & 12 deletions library/core/src/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1902,18 +1902,6 @@ pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
!ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0
}

/// Checks whether the regions of memory starting at `src` and `dst` of size
/// `count * size_of::<T>()` do *not* overlap.
pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
let src_usize = src as usize;
let dst_usize = dst as usize;
let size = mem::size_of::<T>().checked_mul(count).unwrap();
let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
// If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap.
diff >= size
}

/// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
/// `val`.
///
Expand Down
1 change: 1 addition & 0 deletions library/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@
#![feature(const_slice_from_raw_parts)]
#![feature(const_slice_ptr_len)]
#![feature(const_size_of_val)]
#![feature(const_swap)]
#![feature(const_align_of_val)]
#![feature(const_type_id)]
#![feature(const_type_name)]
Expand Down
3 changes: 2 additions & 1 deletion library/core/src/mem/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -682,7 +682,8 @@ pub unsafe fn uninitialized<T>() -> T {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap<T>(x: &mut T, y: &mut T) {
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const fn swap<T>(x: &mut T, y: &mut T) {
// SAFETY: the raw pointers have been created from safe mutable references satisfying all the
// constraints on `ptr::swap_nonoverlapping_one`
unsafe {
Expand Down
23 changes: 9 additions & 14 deletions library/core/src/ptr/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
use crate::cmp::Ordering;
use crate::fmt;
use crate::hash;
use crate::intrinsics::{self, abort, is_aligned_and_not_null, is_nonoverlapping};
use crate::intrinsics::{self, abort, is_aligned_and_not_null};
use crate::mem::{self, MaybeUninit};

#[stable(feature = "rust1", since = "1.0.0")]
Expand Down Expand Up @@ -394,7 +394,8 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with.
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
let mut tmp = MaybeUninit::<T>::uninit();
Expand Down Expand Up @@ -451,16 +452,8 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
/// ```
#[inline]
#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
if cfg!(debug_assertions)
&& !(is_aligned_and_not_null(x)
&& is_aligned_and_not_null(y)
&& is_nonoverlapping(x, y, count))
{
// Not panicking to keep codegen impact smaller.
abort();
}

#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
let x = x as *mut u8;
let y = y as *mut u8;
let len = mem::size_of::<T>() * count;
Expand All @@ -470,7 +463,8 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
}

#[inline]
pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub(crate) const unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
// For types smaller than the block optimization below,
// just swap directly to avoid pessimizing codegen.
if mem::size_of::<T>() < 32 {
Expand All @@ -488,7 +482,8 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
}

#[inline]
unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
const unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
// The approach here is to utilize simd to swap x & y efficiently. Testing reveals
// that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
// Haswell E processors. LLVM is more able to optimize if we give a struct a
Expand Down