Skip to content

Commit

Permalink
Auto merge of #46713 - Manishearth:memchr, r=bluss
Browse files Browse the repository at this point in the history
Use memchr to speed up [u8]::contains 3x

None
  • Loading branch information
bors committed Dec 31, 2017
2 parents 885011e + 4ef6847 commit 8c59418
Show file tree
Hide file tree
Showing 9 changed files with 265 additions and 233 deletions.
230 changes: 230 additions & 0 deletions src/libcore/slice/memchr.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// Original implementation taken from rust-memchr
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch

use cmp;
use mem;

const LO_U64: u64 = 0x0101010101010101;
const HI_U64: u64 = 0x8080808080808080;

// use truncation
const LO_USIZE: usize = LO_U64 as usize;
const HI_USIZE: usize = HI_U64 as usize;

/// Return `true` if `x` contains any zero byte.
///
/// From *Matters Computational*, J. Arndt
///
/// "The idea is to subtract one from each of the bytes and then look for
/// bytes where the borrow propagated all the way to the most significant
/// bit."
#[inline]
fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}

#[cfg(target_pointer_width = "16")]
#[inline]
fn repeat_byte(b: u8) -> usize {
(b as usize) << 8 | b as usize
}

#[cfg(target_pointer_width = "32")]
#[inline]
fn repeat_byte(b: u8) -> usize {
let mut rep = (b as usize) << 8 | b as usize;
rep = rep << 16 | rep;
rep
}

#[cfg(target_pointer_width = "64")]
#[inline]
fn repeat_byte(b: u8) -> usize {
let mut rep = (b as usize) << 8 | b as usize;
rep = rep << 16 | rep;
rep = rep << 32 | rep;
rep
}

/// Return the first index matching the byte `a` in `text`.
pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
// - unaligned initial part, before the first word aligned address in text
// - body, scan by 2 words at a time
// - the last remaining part, < 2 word size
let len = text.len();
let ptr = text.as_ptr();
let usize_bytes = mem::size_of::<usize>();

// search up to an aligned boundary
let mut offset = ptr.align_offset(usize_bytes);
if offset > 0 {
offset = cmp::min(offset, len);
if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
return Some(index);
}
}

// search the body of the text
let repeated_x = repeat_byte(x);

if len >= 2 * usize_bytes {
while offset <= len - 2 * usize_bytes {
unsafe {
let u = *(ptr.offset(offset as isize) as *const usize);
let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);

// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
let zv = contains_zero_byte(v ^ repeated_x);
if zu || zv {
break;
}
}
offset += usize_bytes * 2;
}
}

// find the byte after the point the body loop stopped
text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
}

/// Return the last index matching the byte `a` in `text`.
pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
// - unaligned tail, after the last word aligned address in text
// - body, scan by 2 words at a time
// - the first remaining bytes, < 2 word size
let len = text.len();
let ptr = text.as_ptr();
let usize_bytes = mem::size_of::<usize>();

// search to an aligned boundary
let end_align = (ptr as usize + len) & (usize_bytes - 1);
let mut offset;
if end_align > 0 {
offset = if end_align >= len { 0 } else { len - end_align };
if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
return Some(offset + index);
}
} else {
offset = len;
}

// search the body of the text
let repeated_x = repeat_byte(x);

while offset >= 2 * usize_bytes {
unsafe {
let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize);
let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize);

// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
let zv = contains_zero_byte(v ^ repeated_x);
if zu || zv {
break;
}
}
offset -= 2 * usize_bytes;
}

// find the byte before the point the body loop stopped
text[..offset].iter().rposition(|elt| *elt == x)
}

// test fallback implementations on all platforms
#[test]
fn matches_one() {
assert_eq!(Some(0), memchr(b'a', b"a"));
}

#[test]
fn matches_begin() {
assert_eq!(Some(0), memchr(b'a', b"aaaa"));
}

#[test]
fn matches_end() {
assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
}

#[test]
fn matches_nul() {
assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
}

#[test]
fn matches_past_nul() {
assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
}

#[test]
fn no_match_empty() {
assert_eq!(None, memchr(b'a', b""));
}

#[test]
fn no_match() {
assert_eq!(None, memchr(b'a', b"xyz"));
}

#[test]
fn matches_one_reversed() {
assert_eq!(Some(0), memrchr(b'a', b"a"));
}

#[test]
fn matches_begin_reversed() {
assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
}

#[test]
fn matches_end_reversed() {
assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
}

#[test]
fn matches_nul_reversed() {
assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
}

#[test]
fn matches_past_nul_reversed() {
assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
}

#[test]
fn no_match_empty_reversed() {
assert_eq!(None, memrchr(b'a', b""));
}

#[test]
fn no_match_reversed() {
assert_eq!(None, memrchr(b'a', b"xyz"));
}

#[test]
fn each_alignment_reversed() {
let mut data = [1u8; 64];
let needle = 2;
let pos = 40;
data[pos] = needle;
for start in 0..16 {
assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
}
}
31 changes: 30 additions & 1 deletion src/libcore/slice/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ use mem;
use marker::{Copy, Send, Sync, Sized, self};
use iter_private::TrustedRandomAccess;

#[unstable(feature = "slice_internals", issue = "0",
reason = "exposed from core to be reused in std; use the memchr crate")]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;

mod rotate;
mod sort;

Expand Down Expand Up @@ -619,7 +624,7 @@ impl<T> SliceExt for [T] {

#[inline]
fn contains(&self, x: &T) -> bool where T: PartialEq {
self.iter().any(|elt| *x == *elt)
x.slice_contains(self)
}

#[inline]
Expand Down Expand Up @@ -2614,3 +2619,27 @@ unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
}
fn may_have_side_effect() -> bool { false }
}

trait SliceContains: Sized {
fn slice_contains(&self, x: &[Self]) -> bool;
}

impl<T> SliceContains for T where T: PartialEq {
default fn slice_contains(&self, x: &[Self]) -> bool {
x.iter().any(|y| *y == *self)
}
}

impl SliceContains for u8 {
fn slice_contains(&self, x: &[Self]) -> bool {
memchr::memchr(*self, x).is_some()
}
}

impl SliceContains for i8 {
fn slice_contains(&self, x: &[Self]) -> bool {
let byte = *self as u8;
let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
memchr::memchr(byte, bytes).is_some()
}
}
1 change: 1 addition & 0 deletions src/libstd/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@
#![feature(sip_hash_13)]
#![feature(slice_bytes)]
#![feature(slice_concat_ext)]
#![feature(slice_internals)]
#![feature(slice_patterns)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
Expand Down
2 changes: 1 addition & 1 deletion src/libstd/sys/redox/memchr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
// Original implementation taken from rust-memchr
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch

pub use sys_common::memchr::fallback::{memchr, memrchr};
pub use core::slice::memchr::{memchr, memrchr};
2 changes: 1 addition & 1 deletion src/libstd/sys/unix/memchr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {

#[cfg(not(target_os = "linux"))]
fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
::sys_common::memchr::fallback::memrchr(needle, haystack)
::core::slice::memchr::memrchr(needle, haystack)
}

memrchr_specific(needle, haystack)
Expand Down
2 changes: 1 addition & 1 deletion src/libstd/sys/wasm/memchr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

pub use sys_common::memchr::fallback::{memchr, memrchr};
pub use core::slice::memchr::{memchr, memrchr};
2 changes: 1 addition & 1 deletion src/libstd/sys/windows/memchr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch

// Fallback memchr is fastest on windows
pub use sys_common::memchr::fallback::{memchr, memrchr};
pub use core::slice::memchr::{memchr, memrchr};
Loading

0 comments on commit 8c59418

Please sign in to comment.