From 57ff36f6cbd8e44349d352e9b9de1a87f9f762e9 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Mon, 2 Sep 2024 12:00:56 +0200 Subject: [PATCH] simd: reduce pub functions visibility This tweaks all SIMD `pub` functions, moving them to `pub(crate)` instead. --- src/simd/avx2.rs | 4 ++-- src/simd/mod.rs | 22 +++++++++++----------- src/simd/neon.rs | 6 +++--- src/simd/runtime.rs | 6 +++--- src/simd/sse42.rs | 4 ++-- src/simd/swar.rs | 6 +++--- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/simd/avx2.rs b/src/simd/avx2.rs index 6a7edc1..05a3681 100644 --- a/src/simd/avx2.rs +++ b/src/simd/avx2.rs @@ -2,7 +2,7 @@ use crate::iter::Bytes; #[inline] #[target_feature(enable = "avx2", enable = "sse4.2")] -pub unsafe fn match_uri_vectored(bytes: &mut Bytes) { +pub(crate) unsafe fn match_uri_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 32 { let advance = match_url_char_32_avx(bytes.as_ref()); bytes.advance(advance); @@ -57,7 +57,7 @@ unsafe fn match_url_char_32_avx(buf: &[u8]) -> usize { } #[target_feature(enable = "avx2", enable = "sse4.2")] -pub unsafe fn match_header_value_vectored(bytes: &mut Bytes) { +pub(crate) unsafe fn match_header_value_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 32 { let advance = match_header_value_char_32_avx(bytes.as_ref()); bytes.advance(advance); diff --git a/src/simd/mod.rs b/src/simd/mod.rs index 63464b4..399caad 100644 --- a/src/simd/mod.rs +++ b/src/simd/mod.rs @@ -11,7 +11,7 @@ mod swar; ) ), )))] -pub use self::swar::*; +pub(crate) use self::swar::*; #[cfg(all( httparse_simd, @@ -59,7 +59,7 @@ mod runtime; target_arch = "x86_64", ), ))] -pub use self::runtime::*; +pub(crate) use self::runtime::*; #[cfg(all( httparse_simd, @@ -72,18 +72,18 @@ pub use self::runtime::*; ))] mod sse42_compile_time { #[inline(always)] - pub fn match_header_name_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_header_name_vectored(b: &mut crate::iter::Bytes<'_>) { super::swar::match_header_name_vectored(b); } #[inline(always)] - pub fn match_uri_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_uri_vectored(b: &mut crate::iter::Bytes<'_>) { // SAFETY: calls are guarded by a compile time feature check unsafe { crate::simd::sse42::match_uri_vectored(b) } } #[inline(always)] - pub fn match_header_value_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_header_value_vectored(b: &mut crate::iter::Bytes<'_>) { // SAFETY: calls are guarded by a compile time feature check unsafe { crate::simd::sse42::match_header_value_vectored(b) } } @@ -98,7 +98,7 @@ mod sse42_compile_time { target_arch = "x86_64", ), ))] -pub use self::sse42_compile_time::*; +pub(crate) use self::sse42_compile_time::*; #[cfg(all( httparse_simd, @@ -110,18 +110,18 @@ pub use self::sse42_compile_time::*; ))] mod avx2_compile_time { #[inline(always)] - pub fn match_header_name_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_header_name_vectored(b: &mut crate::iter::Bytes<'_>) { super::swar::match_header_name_vectored(b); } #[inline(always)] - pub fn match_uri_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_uri_vectored(b: &mut crate::iter::Bytes<'_>) { // SAFETY: calls are guarded by a compile time feature check unsafe { crate::simd::avx2::match_uri_vectored(b) } } #[inline(always)] - pub fn match_header_value_vectored(b: &mut crate::iter::Bytes<'_>) { + pub(crate) fn match_header_value_vectored(b: &mut crate::iter::Bytes<'_>) { // SAFETY: calls are guarded by a compile time feature check unsafe { crate::simd::avx2::match_header_value_vectored(b) } } @@ -135,7 +135,7 @@ mod avx2_compile_time { target_arch = "x86_64", ), ))] -pub use self::avx2_compile_time::*; +pub(crate) use self::avx2_compile_time::*; #[cfg(all( httparse_simd, @@ -149,4 +149,4 @@ mod neon; target_arch = "aarch64", httparse_simd_neon_intrinsics, ))] -pub use self::neon::*; +pub(crate) use self::neon::*; diff --git a/src/simd/neon.rs b/src/simd/neon.rs index c6b86a8..acbba60 100644 --- a/src/simd/neon.rs +++ b/src/simd/neon.rs @@ -2,7 +2,7 @@ use crate::iter::Bytes; use core::arch::aarch64::*; #[inline] -pub fn match_header_name_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_name_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 16 { // SAFETY: ensured that there are at least 16 bytes remaining unsafe { @@ -18,7 +18,7 @@ pub fn match_header_name_vectored(bytes: &mut Bytes) { } #[inline] -pub fn match_header_value_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_value_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 16 { // SAFETY: ensured that there are at least 16 bytes remaining unsafe { @@ -34,7 +34,7 @@ pub fn match_header_value_vectored(bytes: &mut Bytes) { } #[inline] -pub fn match_uri_vectored(bytes: &mut Bytes) { +pub(crate) fn match_uri_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 16 { // SAFETY: ensured that there are at least 16 bytes remaining unsafe { diff --git a/src/simd/runtime.rs b/src/simd/runtime.rs index c523a92..01cd6ea 100644 --- a/src/simd/runtime.rs +++ b/src/simd/runtime.rs @@ -30,11 +30,11 @@ fn get_runtime_feature() -> u8 { feature } -pub fn match_header_name_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_name_vectored(bytes: &mut Bytes) { super::swar::match_header_name_vectored(bytes); } -pub fn match_uri_vectored(bytes: &mut Bytes) { +pub(crate) fn match_uri_vectored(bytes: &mut Bytes) { // SAFETY: calls are guarded by a feature check unsafe { match get_runtime_feature() { @@ -45,7 +45,7 @@ pub fn match_uri_vectored(bytes: &mut Bytes) { } } -pub fn match_header_value_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_value_vectored(bytes: &mut Bytes) { // SAFETY: calls are guarded by a feature check unsafe { match get_runtime_feature() { diff --git a/src/simd/sse42.rs b/src/simd/sse42.rs index d6fbf02..7b97319 100644 --- a/src/simd/sse42.rs +++ b/src/simd/sse42.rs @@ -1,7 +1,7 @@ use crate::iter::Bytes; #[target_feature(enable = "sse4.2")] -pub unsafe fn match_uri_vectored(bytes: &mut Bytes) { +pub(crate) unsafe fn match_uri_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 16 { let advance = match_url_char_16_sse(bytes.as_ref()); bytes.advance(advance); @@ -62,7 +62,7 @@ unsafe fn match_url_char_16_sse(buf: &[u8]) -> usize { } #[target_feature(enable = "sse4.2")] -pub unsafe fn match_header_value_vectored(bytes: &mut Bytes) { +pub(crate) unsafe fn match_header_value_vectored(bytes: &mut Bytes) { while bytes.as_ref().len() >= 16 { let advance = match_header_value_char_16_sse(bytes.as_ref()); bytes.advance(advance); diff --git a/src/simd/swar.rs b/src/simd/swar.rs index 857fc58..12e58ef 100644 --- a/src/simd/swar.rs +++ b/src/simd/swar.rs @@ -7,7 +7,7 @@ const BLOCK_SIZE: usize = core::mem::size_of::(); type ByteBlock = [u8; BLOCK_SIZE]; #[inline] -pub fn match_uri_vectored(bytes: &mut Bytes) { +pub(crate) fn match_uri_vectored(bytes: &mut Bytes) { loop { if let Some(bytes8) = bytes.peek_n::(BLOCK_SIZE) { let n = match_uri_char_8_swar(bytes8); @@ -35,7 +35,7 @@ pub fn match_uri_vectored(bytes: &mut Bytes) { } #[inline] -pub fn match_header_value_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_value_vectored(bytes: &mut Bytes) { loop { if let Some(bytes8) = bytes.peek_n::(BLOCK_SIZE) { let n = match_header_value_char_8_swar(bytes8); @@ -63,7 +63,7 @@ pub fn match_header_value_vectored(bytes: &mut Bytes) { } #[inline] -pub fn match_header_name_vectored(bytes: &mut Bytes) { +pub(crate) fn match_header_name_vectored(bytes: &mut Bytes) { while let Some(block) = bytes.peek_n::(BLOCK_SIZE) { let n = match_block(is_header_name_token, block); // SAFETY: using peek_n to retrieve the bytes ensures that there are at least n more bytes