diff --git a/libafl/src/observers/map.rs b/libafl/src/observers/map.rs index 488ee58a32..64a15b9a10 100644 --- a/libafl/src/observers/map.rs +++ b/libafl/src/observers/map.rs @@ -9,7 +9,8 @@ use core::{ hash::{BuildHasher, Hasher}, iter::Flatten, marker::PhantomData, - slice::{from_raw_parts, Iter, IterMut}, + mem::size_of, + slice::{self, Iter, IterMut}, }; use ahash::RandomState; @@ -72,9 +73,9 @@ fn init_count_class_16() { fn hash_slice(slice: &[T]) -> u64 { let mut hasher = RandomState::with_seeds(0, 0, 0, 0).build_hasher(); let ptr = slice.as_ptr() as *const u8; - let map_size = slice.len() / core::mem::size_of::(); + let map_size = slice.len() / size_of::(); unsafe { - hasher.write(from_raw_parts(ptr, map_size)); + hasher.write(slice::from_raw_parts(ptr, map_size)); } hasher.finish() } @@ -1234,7 +1235,23 @@ where exit_kind: &ExitKind, ) -> Result<(), Error> { let map = self.as_mut_slice(); - let len = map.len(); + let mut len = map.len(); + let align_offset = map.as_ptr().align_offset(size_of::()); + + // if len == 1, the next branch will already do this lookup + if len > 1 && align_offset != 0 { + debug_assert_eq!( + align_offset, 1, + "Aligning u8 to u16 should always be offset of 1?" + ); + unsafe { + *map.get_unchecked_mut(0) = + *COUNT_CLASS_LOOKUP.get_unchecked(*map.get_unchecked(0) as usize); + } + len -= 1; + } + + // Fix the last element if (len & 1) != 0 { unsafe { *map.get_unchecked_mut(len - 1) = @@ -1243,13 +1260,17 @@ where } let cnt = len / 2; - let map16 = unsafe { core::slice::from_raw_parts_mut(map.as_mut_ptr() as *mut u16, cnt) }; + + let map16 = unsafe { + slice::from_raw_parts_mut(map.as_mut_ptr().add(align_offset) as *mut u16, cnt) + }; // 2022-07: Adding `enumerate` here increases execution speed/register allocation on x86_64. for (_i, item) in map16[0..cnt].iter_mut().enumerate() { unsafe { *item = *COUNT_CLASS_LOOKUP_16.get_unchecked(*item as usize); } } + self.base.post_exec(state, input, exit_kind) } } @@ -1776,9 +1797,9 @@ where for map in &self.maps { let slice = map.as_slice(); let ptr = slice.as_ptr() as *const u8; - let map_size = slice.len() / core::mem::size_of::(); + let map_size = slice.len() / size_of::(); unsafe { - hasher.write(from_raw_parts(ptr, map_size)); + hasher.write(slice::from_raw_parts(ptr, map_size)); } } hasher.finish() diff --git a/libafl_bolts/src/staterestore.rs b/libafl_bolts/src/staterestore.rs index 25e34ab08a..bbebe44dff 100644 --- a/libafl_bolts/src/staterestore.rs +++ b/libafl_bolts/src/staterestore.rs @@ -214,6 +214,11 @@ where fn content_mut(&mut self) -> &mut StateShMemContent { let ptr = self.shmem.as_slice().as_ptr(); + debug_assert_eq!( + ptr.align_offset(size_of::()), + 0, + "Beginning of the page is not aligned at {ptr:?}!" + ); #[allow(clippy::cast_ptr_alignment)] // Beginning of the page will always be aligned unsafe { &mut *(ptr as *mut StateShMemContent)