1 use core
::mem
::{self, MaybeUninit}
;
2 use core
::ops
::{Deref, DerefMut}
;
3 use core
::{ptr, slice}
;
6 use crate::{PartialAllocStrategy, PhysallocFlags}
;
8 /// An RAII guard of a physical memory allocation. Currently all physically allocated memory are
9 /// page-aligned and take up at least 4k of space (on x86_64).
17 /// Construct a PhysBox from an address and a size.
20 /// This function is unsafe because when dropping, Self has to a valid allocation.
21 pub unsafe fn from_raw_parts(address
: usize, size
: usize) -> Self {
28 /// Retrieve the byte address in physical memory, of this allocation.
29 pub fn address(&self) -> usize {
33 /// Retrieve the size in bytes of the alloc.
34 pub fn size(&self) -> usize {
38 /// Allocate physical memory that must reside in 32-bit space.
39 pub fn new_in_32bit_space(size
: usize) -> Result
<Self> {
40 Self::new_with_flags(size
, PhysallocFlags
::SPACE_32
)
43 pub fn new_with_flags(size
: usize, flags
: PhysallocFlags
) -> Result
<Self> {
44 assert
!(!flags
.contains(PhysallocFlags
::PARTIAL_ALLOC
));
46 let address
= unsafe { crate::physalloc2(size, flags.bits())? }
;
53 /// "Partially" allocate physical memory, in the sense that the allocation may be smaller than
54 /// expected, but still with a minimum limit. This is particularly useful when the physical
55 /// memory space is fragmented, and a device supports scatter-gather I/O. In that case, the
56 /// driver can optimistically request e.g. 1 alloc of 1 MiB, with the minimum of 512 KiB. If
57 /// that first allocation only returns half the size, the driver can do another allocation
58 /// and then let the device use both buffers.
59 pub fn new_partial_allocation(size
: usize, flags
: PhysallocFlags
, strategy
: Option
<PartialAllocStrategy
>, mut min
: usize) -> Result
<Self> {
60 debug_assert
!(!(flags
.contains(PhysallocFlags
::PARTIAL_ALLOC
) && strategy
.is_none()));
62 let address
= unsafe { crate::physalloc3(size, flags.bits() | strategy.map(|s| s as usize).unwrap_or(0), &mut min)? }
;
69 pub fn new(size
: usize) -> Result
<Self> {
70 let address
= unsafe { crate::physalloc(size)? }
;
78 impl Drop
for PhysBox
{
80 let _
= unsafe { crate::physfree(self.address, self.size) }
;
84 pub struct Dma
<T
: ?Sized
> {
90 pub fn from_physbox_uninit(phys
: PhysBox
) -> Result
<Dma
<MaybeUninit
<T
>>> {
91 let virt
= unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? }
as *mut MaybeUninit
<T
>;
98 pub fn from_physbox_zeroed(phys
: PhysBox
) -> Result
<Dma
<MaybeUninit
<T
>>> {
99 let this
= Self::from_physbox_uninit(phys
)?
;
100 unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size) }
104 pub fn from_physbox(phys
: PhysBox
, value
: T
) -> Result
<Self> {
105 let this
= Self::from_physbox_uninit(phys
)?
;
108 ptr
::write(this
.virt
, MaybeUninit
::new(value
));
113 pub fn new(value
: T
) -> Result
<Self> {
114 let phys
= PhysBox
::new(mem
::size_of
::<T
>())?
;
115 Self::from_physbox(phys
, value
)
117 pub fn zeroed() -> Result
<Dma
<MaybeUninit
<T
>>> {
118 let phys
= PhysBox
::new(mem
::size_of
::<T
>())?
;
119 Self::from_physbox_zeroed(phys
)
123 impl<T
> Dma
<MaybeUninit
<T
>> {
124 pub unsafe fn assume_init(self) -> Dma
<T
> {
125 let &Dma { phys: PhysBox { address, size }
, virt
} = &self;
129 phys
: PhysBox { address, size }
,
130 virt
: virt
as *mut T
,
134 impl<T
: ?Sized
> Dma
<T
> {
135 pub fn physical(&self) -> usize {
138 pub fn size(&self) -> usize {
141 pub fn phys(&self) -> &PhysBox
{
147 pub fn from_physbox_uninit_unsized(phys
: PhysBox
, len
: usize) -> Result
<Dma
<[MaybeUninit
<T
>]>> {
148 let max_len
= phys
.size() / mem
::size_of
::<T
>();
149 assert
!(len
<= max_len
);
152 virt
: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut MaybeUninit<T>, len) }
as *mut [MaybeUninit
<T
>],
156 pub fn from_physbox_zeroed_unsized(phys
: PhysBox
, len
: usize) -> Result
<Dma
<[MaybeUninit
<T
>]>> {
157 let this
= Self::from_physbox_uninit_unsized(phys
, len
)?
;
158 unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size()) }
161 /// Creates a new DMA buffer with a size only known at runtime.
163 /// * `T` must be properly aligned.
164 /// * `T` must be valid as zeroed (i.e. no NonNull pointers).
165 pub unsafe fn zeroed_unsized(count
: usize) -> Result
<Self> {
166 let phys
= PhysBox
::new(mem
::size_of
::<T
>() * count
)?
;
167 Ok(Self::from_physbox_zeroed_unsized(phys
, count
)?
.assume_init())
170 impl<T
> Dma
<[MaybeUninit
<T
>]> {
171 pub unsafe fn assume_init(self) -> Dma
<[T
]> {
172 let &Dma { phys: PhysBox { address, size }
, virt
} = &self;
176 phys
: PhysBox { address, size }
,
177 virt
: virt
as *mut [T
],
182 impl<T
: ?Sized
> Deref
for Dma
<T
> {
184 fn deref(&self) -> &T
{
185 unsafe { &*self.virt }
189 impl<T
: ?Sized
> DerefMut
for Dma
<T
> {
190 fn deref_mut(&mut self) -> &mut T
{
191 unsafe { &mut *self.virt }
195 impl<T
: ?Sized
> Drop
for Dma
<T
> {
197 unsafe { ptr::drop_in_place(self.virt) }
198 let _
= unsafe { crate::physunmap(self.virt as *mut u8 as usize) }
;