lz4_flex/fastcpy_unsafe.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
//! # FastCpy
//!
//! The Rust Compiler calls `memcpy` for slices of unknown length.
//! This crate provides a faster implementation of `memcpy` for slices up to 32bytes (64bytes with `avx`).
//! If you know most of you copy operations are not too big you can use `fastcpy` to speed up your program.
//!
//! `fastcpy` is designed to contain not too much assembly, so the overhead is low.
//!
//! As fall back the standard `memcpy` is called
//!
//! ## Double Copy Trick
//! `fastcpy` employs a double copy trick to copy slices of length 4-32bytes (64bytes with `avx`).
//! E.g. Slice of length 6 can be copied with two uncoditional copy operations.
//!
//! /// [1, 2, 3, 4, 5, 6]
//! /// [1, 2, 3, 4]
//! /// [3, 4, 5, 6]
//!
#[inline]
pub fn slice_copy(src: *const u8, dst: *mut u8, num_bytes: usize) {
if num_bytes < 4 {
short_copy(src, dst, num_bytes);
return;
}
if num_bytes < 8 {
double_copy_trick::<4>(src, dst, num_bytes);
return;
}
if num_bytes <= 16 {
double_copy_trick::<8>(src, dst, num_bytes);
return;
}
//if num_bytes <= 32 {
//double_copy_trick::<16>(src, dst, num_bytes);
//return;
//}
// /// The code will use the vmovdqu instruction to copy 32 bytes at a time.
//#[cfg(target_feature = "avx")]
//{
//if num_bytes <= 64 {
//double_copy_trick::<32>(src, dst, num_bytes);
//return;
//}
//}
// For larger sizes we use the default, which calls memcpy
// memcpy does some virtual memory tricks to copy large chunks of memory.
//
// The theory should be that the checks above don't cost much relative to the copy call for
// larger copies.
// The bounds checks in `copy_from_slice` are elided.
//unsafe { core::ptr::copy_nonoverlapping(src, dst, num_bytes) }
wild_copy_from_src::<16>(src, dst, num_bytes)
}
// Inline never because otherwise we get a call to memcpy -.-
#[inline]
fn wild_copy_from_src<const SIZE: usize>(
mut source: *const u8,
mut dst: *mut u8,
num_bytes: usize,
) {
// Note: if the compiler auto-vectorizes this it'll hurt performance!
// It's not the case for 16 bytes stepsize, but for 8 bytes.
let l_last = unsafe { source.add(num_bytes - SIZE) };
let r_last = unsafe { dst.add(num_bytes - SIZE) };
let num_bytes = (num_bytes / SIZE) * SIZE;
unsafe {
let dst_ptr_end = dst.add(num_bytes);
loop {
core::ptr::copy_nonoverlapping(source, dst, SIZE);
source = source.add(SIZE);
dst = dst.add(SIZE);
if dst >= dst_ptr_end {
break;
}
}
}
unsafe {
core::ptr::copy_nonoverlapping(l_last, r_last, SIZE);
}
}
#[inline]
fn short_copy(src: *const u8, dst: *mut u8, len: usize) {
unsafe {
*dst = *src;
}
if len >= 2 {
double_copy_trick::<2>(src, dst, len);
}
}
#[inline(always)]
/// [1, 2, 3, 4, 5, 6]
/// [1, 2, 3, 4]
/// [3, 4, 5, 6]
fn double_copy_trick<const SIZE: usize>(src: *const u8, dst: *mut u8, len: usize) {
let l_end = unsafe { src.add(len - SIZE) };
let r_end = unsafe { dst.add(len - SIZE) };
unsafe {
core::ptr::copy_nonoverlapping(src, dst, SIZE);
core::ptr::copy_nonoverlapping(l_end, r_end, SIZE);
}
}
#[cfg(test)]
mod tests {
use super::slice_copy;
use proptest::prelude::*;
proptest! {
#[test]
fn test_fast_short_slice_copy(left: Vec<u8>) {
if left.is_empty() {
return Ok(());
}
let mut right = vec![0u8; left.len()];
slice_copy(left.as_ptr(), right.as_mut_ptr(), left.len());
prop_assert_eq!(&left, &right);
}
}
#[test]
fn test_fast_short_slice_copy_edge_cases() {
for len in 1..(512 * 2) {
let left = (0..len).map(|i| i as u8).collect::<Vec<_>>();
let mut right = vec![0u8; len];
slice_copy(left.as_ptr(), right.as_mut_ptr(), left.len());
assert_eq!(left, right);
}
}
#[test]
fn test_fail2() {
let left = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let mut right = vec![0u8; left.len()];
slice_copy(left.as_ptr(), right.as_mut_ptr(), left.len());
assert_eq!(left, right);
}
#[test]
fn test_fail() {
let left = vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
let mut right = vec![0u8; left.len()];
slice_copy(left.as_ptr(), right.as_mut_ptr(), left.len());
assert_eq!(left, right);
}
}