// | small1 | Chunk smaller than 8 bytes
// +--------+
fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
- let small0_size = if ptr as usize % 8 == 0 { 0 } else { 8 - ptr as usize % 8 };
- let small1_size = (len - small0_size as usize) % 8;
- let big_size = len - small0_size as usize - small1_size as usize;
+ let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 };
+ let small1_size = (len - small0_size) % 8;
+ let big_size = len - small0_size - small1_size;
(small0_size, big_size, small1_size)
}
mfence
lfence
",
- val = in(reg_byte) *src.offset(off as isize),
- dst = in(reg) dst.offset(off as isize),
+ val = in(reg_byte) *src.add(off),
+ dst = in(reg) dst.add(off),
seg_sel = in(reg) &mut seg_sel,
options(nostack, att_syntax)
);
assert!(is_enclave_range(src, len));
assert!(is_user_range(dst, len));
assert!(len < isize::MAX as usize);
- assert!(!(src as usize).overflowing_add(len).1);
- assert!(!(dst as usize).overflowing_add(len).1);
+ assert!(!src.addr().overflowing_add(len).1);
+ assert!(!dst.addr().overflowing_add(len).1);
if len < 8 {
// Can't align on 8 byte boundary: copy safely byte per byte
unsafe {
// Copy small0
- copy_bytewise_to_userspace(src, dst, small0_size as _);
+ copy_bytewise_to_userspace(src, dst, small0_size);
// Copy big
- let big_src = src.offset(small0_size as _);
- let big_dst = dst.offset(small0_size as _);
- copy_quadwords(big_src as _, big_dst, big_size);
+ let big_src = src.add(small0_size);
+ let big_dst = dst.add(small0_size);
+ copy_quadwords(big_src, big_dst, big_size);
// Copy small1
- let small1_src = src.offset(big_size as isize + small0_size as isize);
- let small1_dst = dst.offset(big_size as isize + small0_size as isize);
- copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+ let small1_src = src.add(big_size + small0_size);
+ let small1_dst = dst.add(big_size + small0_size);
+ copy_bytewise_to_userspace(small1_src, small1_dst, small1_size);
}
}
}