aboutsummaryrefslogtreecommitdiff
path: root/rendy-memory/src/mapping
diff options
context:
space:
mode:
Diffstat (limited to 'rendy-memory/src/mapping')
-rw-r--r--rendy-memory/src/mapping/mod.rs345
-rw-r--r--rendy-memory/src/mapping/range.rs85
-rw-r--r--rendy-memory/src/mapping/write.rs73
3 files changed, 503 insertions, 0 deletions
diff --git a/rendy-memory/src/mapping/mod.rs b/rendy-memory/src/mapping/mod.rs
new file mode 100644
index 0000000..63b2f34
--- /dev/null
+++ b/rendy-memory/src/mapping/mod.rs
@@ -0,0 +1,345 @@
+mod range;
+pub(crate) mod write;
+
+use {
+ crate::{memory::Memory, util::*},
+ gfx_hal::{device::Device as _, Backend},
+ std::{ops::Range, ptr::NonNull},
+};
+
+pub(crate) use self::range::*;
+use self::write::{Write, WriteCoherent, WriteFlush};
+
+/// Non-coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct NonCoherent;
+
+/// Coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct Coherent;
+
+/// Value that contains either coherent marker or non-coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct MaybeCoherent(bool);
+
+/// Represents range of the memory mapped to the host.
+/// Provides methods for safer host access to the memory.
+#[derive(Debug)]
+pub struct MappedRange<'a, B: Backend, C = MaybeCoherent> {
+ /// Memory object that is mapped.
+ memory: &'a Memory<B>,
+
+ /// Pointer to range mapped memory.
+ ptr: NonNull<u8>,
+
+ /// Range of mapped memory.
+ mapping_range: Range<u64>,
+
+ /// Mapping range requested by caller.
+ /// Must be subrange of `mapping_range`.
+ requested_range: Range<u64>,
+
+ /// Coherency marker
+ coherent: C,
+}
+
+impl<'a, B> MappedRange<'a, B>
+where
+ B: Backend,
+{
+ // /// Map range of memory.
+ // /// `range` is in memory object space.
+ // ///
+ // /// # Safety
+ // ///
+ // /// * Only one range for the given memory object can be mapped.
+ // /// * Memory object must be not mapped.
+ // /// * Memory object must be created with device specified.
+ // pub unsafe fn new(
+ // memory: &'a Memory<B>,
+ // device: &B::Device,
+ // range: Range<u64>,
+ // ) -> Result<Self, gfx_hal::device::MapError> {
+ // assert!(
+ // range.start < range.end,
+ // "Memory mapping region must have valid size"
+ // );
+ // assert!(
+ // fits_usize(range.end - range.start),
+ // "Range length must fit in usize"
+ // );
+ // assert!(memory.host_visible());
+
+ // let ptr = device.map_memory(memory.raw(), range.clone())?;
+ // assert!(
+ // (ptr as usize).wrapping_neg() >= (range.end - range.start) as usize,
+ // "Resulting pointer value + range length must fit in usize. Pointer: {:p}, range {:?}",
+ // ptr,
+ // range,
+ // );
+
+ // Ok(Self::from_raw(memory, NonNull::new_unchecked(ptr), range))
+ // }
+
+ /// Construct mapped range from raw mapping
+ ///
+ /// # Safety
+ ///
+ /// `memory` `range` must be mapped to host memory region pointer by `ptr`.
+ /// `range` is in memory object space.
+ /// `ptr` points to the `range.start` offset from memory origin.
+ pub(crate) unsafe fn from_raw(
+ memory: &'a Memory<B>,
+ ptr: NonNull<u8>,
+ mapping_range: Range<u64>,
+ requested_range: Range<u64>,
+ ) -> Self {
+ debug_assert!(
+ mapping_range.start < mapping_range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ debug_assert!(
+ requested_range.start < requested_range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ if !memory.host_coherent() {
+ debug_assert_eq!(mapping_range.start % memory.non_coherent_atom_size(), 0, "Bounds of non-coherent memory mapping ranges must be multiple of `Limits::non_coherent_atom_size`");
+ debug_assert_eq!(mapping_range.end % memory.non_coherent_atom_size(), 0, "Bounds of non-coherent memory mapping ranges must be multiple of `Limits::non_coherent_atom_size`");
+ debug_assert!(
+ is_sub_range(mapping_range.clone(), requested_range.clone()),
+ "`requested_range` must be sub-range of `mapping_range`",
+ );
+ } else {
+ debug_assert_eq!(mapping_range, requested_range);
+ }
+
+ MappedRange {
+ ptr,
+ mapping_range,
+ requested_range,
+ memory,
+ coherent: MaybeCoherent(memory.host_coherent()),
+ }
+ }
+
+ /// Get pointer to beginning of memory region.
+ /// i.e. to `range().start` offset from memory origin.
+ pub fn ptr(&self) -> NonNull<u8> {
+ mapped_sub_range(
+ self.ptr,
+ self.mapping_range.clone(),
+ self.requested_range.clone(),
+ )
+ .unwrap()
+ }
+
+ /// Get mapped range.
+ pub fn range(&self) -> Range<u64> {
+ self.requested_range.clone()
+ }
+
+ /// Fetch readable slice of sub-range to be read.
+ /// Invalidating range if memory is not coherent.
+ /// `range.end - range.start` must be multiple of `size_of::()`.
+ /// `mapping offset + range.start` must be multiple of `align_of::()`.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to the memory region until the borrowing ends.
+ /// * `T` Must be plain-old-data type compatible with data in mapped region.
+ pub unsafe fn read<'b, T>(
+ &'b mut self,
+ device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<&'b [T], gfx_hal::device::MapError>
+ where
+ 'a: 'b,
+ T: Copy,
+ {
+ debug_assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ debug_assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ if !self.coherent.0 {
+ let aligned_sub_range = align_range(sub_range, self.memory.non_coherent_atom_size());
+ debug_assert!(is_sub_range(
+ self.mapping_range.clone(),
+ aligned_sub_range.clone()
+ ));
+ device.invalidate_mapped_memory_ranges(std::iter::once((
+ self.memory.raw(),
+ gfx_hal::memory::Segment {
+ offset: aligned_sub_range.start,
+ size: Some(aligned_sub_range.end - aligned_sub_range.start),
+ },
+ )))?;
+ }
+
+ let slice = mapped_slice::<T>(ptr, size);
+ Ok(slice)
+ }
+
+ /// Fetch writer to the sub-region.
+ /// This writer will flush data on drop if written at least once.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to or read from the memory region.
+ pub unsafe fn write<'b, T: 'b>(
+ &'b mut self,
+ device: &'b B::Device,
+ range: Range<u64>,
+ ) -> Result<impl Write<T> + 'b, gfx_hal::device::MapError>
+ where
+ 'a: 'b,
+ T: Copy,
+ {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ let slice = mapped_slice_mut::<T>(ptr, size);
+
+ let memory = &self.memory;
+ let flush = if !self.coherent.0 {
+ let aligned_sub_range = align_range(sub_range, self.memory.non_coherent_atom_size());
+ debug_assert!(is_sub_range(
+ self.mapping_range.clone(),
+ aligned_sub_range.clone()
+ ));
+ Some(move || {
+ device
+ .flush_mapped_memory_ranges(std::iter::once((
+ memory.raw(),
+ gfx_hal::memory::Segment {
+ offset: aligned_sub_range.start,
+ size: Some(aligned_sub_range.end - aligned_sub_range.start),
+ },
+ )))
+ .expect("Should flush successfully");
+ })
+ } else {
+ None
+ };
+
+ Ok(WriteFlush { slice, flush })
+ }
+
+ /// Convert into mapped range with statically known coherency.
+ pub fn coherent(self) -> Result<MappedRange<'a, B, Coherent>, MappedRange<'a, B, NonCoherent>> {
+ if self.coherent.0 {
+ Ok(MappedRange {
+ memory: self.memory,
+ ptr: self.ptr,
+ mapping_range: self.mapping_range,
+ requested_range: self.requested_range,
+ coherent: Coherent,
+ })
+ } else {
+ Err(MappedRange {
+ memory: self.memory,
+ ptr: self.ptr,
+ mapping_range: self.mapping_range,
+ requested_range: self.requested_range,
+ coherent: NonCoherent,
+ })
+ }
+ }
+}
+
+impl<'a, B> From<MappedRange<'a, B, Coherent>> for MappedRange<'a, B>
+where
+ B: Backend,
+{
+ fn from(range: MappedRange<'a, B, Coherent>) -> Self {
+ MappedRange {
+ memory: range.memory,
+ ptr: range.ptr,
+ mapping_range: range.mapping_range,
+ requested_range: range.requested_range,
+ coherent: MaybeCoherent(true),
+ }
+ }
+}
+
+impl<'a, B> From<MappedRange<'a, B, NonCoherent>> for MappedRange<'a, B>
+where
+ B: Backend,
+{
+ fn from(range: MappedRange<'a, B, NonCoherent>) -> Self {
+ MappedRange {
+ memory: range.memory,
+ ptr: range.ptr,
+ mapping_range: range.mapping_range,
+ requested_range: range.requested_range,
+ coherent: MaybeCoherent(false),
+ }
+ }
+}
+
+impl<'a, B> MappedRange<'a, B, Coherent>
+where
+ B: Backend,
+{
+ /// Fetch writer to the sub-region.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to or read from the memory region.
+ pub unsafe fn write<'b, U: 'b>(
+ &'b mut self,
+ range: Range<u64>,
+ ) -> Result<impl Write<U> + 'b, gfx_hal::device::MapError>
+ where
+ U: Copy,
+ {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ let slice = mapped_slice_mut::<U>(ptr, size);
+
+ Ok(WriteCoherent { slice })
+ }
+}
diff --git a/rendy-memory/src/mapping/range.rs b/rendy-memory/src/mapping/range.rs
new file mode 100644
index 0000000..f4c49be
--- /dev/null
+++ b/rendy-memory/src/mapping/range.rs
@@ -0,0 +1,85 @@
+use {
+ crate::util::fits_usize,
+ std::{
+ mem::{align_of, size_of},
+ ops::Range,
+ ptr::NonNull,
+ slice::{from_raw_parts, from_raw_parts_mut},
+ },
+};
+
+/// Get sub-range of memory mapping.
+/// `range` and `fitting` are in memory object space.
+/// `ptr` points to the `range.start` offset from memory origin.
+/// returns pointer to `fitting.start` offset from memory origin
+/// if `fitting` is contained in `range`.
+pub(crate) fn mapped_sub_range(
+ ptr: NonNull<u8>,
+ range: Range<u64>,
+ fitting: Range<u64>,
+) -> Option<NonNull<u8>> {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fitting.start < fitting.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(fits_usize(range.end - range.start));
+ assert!(usize::max_value() - (range.end - range.start) as usize >= ptr.as_ptr() as usize);
+
+ if fitting.start < range.start || fitting.end > range.end {
+ None
+ } else {
+ Some(unsafe {
+ // for x > 0 and y >= 0: x + y > 0. No overflow due to checks above.
+ NonNull::new_unchecked(
+ (ptr.as_ptr() as usize + (fitting.start - range.start) as usize) as *mut u8,
+ )
+ })
+ }
+}
+
+/// # Safety
+///
+/// User must ensure that:
+/// * this function won't create aliasing slices.
+/// * returned slice doesn't outlive mapping.
+/// * `T` Must be plain-old-data type compatible with data in mapped region.
+pub(crate) unsafe fn mapped_slice_mut<'a, T>(ptr: NonNull<u8>, size: usize) -> &'a mut [T] {
+ assert_eq!(
+ size % size_of::<T>(),
+ 0,
+ "Range length must be multiple of element size"
+ );
+ let offset = ptr.as_ptr() as usize;
+ assert_eq!(
+ offset % align_of::<T>(),
+ 0,
+ "Range offset must be multiple of element alignment"
+ );
+ assert!(usize::max_value() - size >= ptr.as_ptr() as usize);
+ from_raw_parts_mut(ptr.as_ptr() as *mut T, size)
+}
+
+/// # Safety
+///
+/// User must ensure that:
+/// * returned slice doesn't outlive mapping.
+/// * `T` Must be plain-old-data type compatible with data in mapped region.
+pub(crate) unsafe fn mapped_slice<'a, T>(ptr: NonNull<u8>, size: usize) -> &'a [T] {
+ assert_eq!(
+ size % size_of::<T>(),
+ 0,
+ "Range length must be multiple of element size"
+ );
+ let offset = ptr.as_ptr() as usize;
+ assert_eq!(
+ offset % align_of::<T>(),
+ 0,
+ "Range offset must be multiple of element alignment"
+ );
+ assert!(usize::max_value() - size >= ptr.as_ptr() as usize);
+ from_raw_parts(ptr.as_ptr() as *const T, size)
+}
diff --git a/rendy-memory/src/mapping/write.rs b/rendy-memory/src/mapping/write.rs
new file mode 100644
index 0000000..d067a61
--- /dev/null
+++ b/rendy-memory/src/mapping/write.rs
@@ -0,0 +1,73 @@
+use std::ptr::copy_nonoverlapping;
+
+/// Trait for memory region suitable for host writes.
+pub trait Write<T: Copy> {
+ /// Get mutable slice of `T` bound to mapped range.
+ ///
+ /// # Safety
+ ///
+ /// * Returned slice should not be read.
+ unsafe fn slice(&mut self) -> &mut [T];
+
+ /// Write data into mapped memory sub-region.
+ ///
+ /// # Panic
+ ///
+ /// Panics if `data.len()` is greater than this sub-region len.
+ fn write(&mut self, data: &[T]) {
+ unsafe {
+ let slice = self.slice();
+ assert!(data.len() <= slice.len());
+ copy_nonoverlapping(data.as_ptr(), slice.as_mut_ptr(), data.len());
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(super) struct WriteFlush<'a, T, F: FnOnce() + 'a> {
+ pub(super) slice: &'a mut [T],
+ pub(super) flush: Option<F>,
+}
+
+impl<'a, T, F> Drop for WriteFlush<'a, T, F>
+where
+ T: 'a,
+ F: FnOnce() + 'a,
+{
+ fn drop(&mut self) {
+ if let Some(f) = self.flush.take() {
+ f();
+ }
+ }
+}
+
+impl<'a, T, F> Write<T> for WriteFlush<'a, T, F>
+where
+ T: Copy + 'a,
+ F: FnOnce() + 'a,
+{
+ /// # Safety
+ ///
+ /// [See doc comment for trait method](trait.Write#method.slice)
+ unsafe fn slice(&mut self) -> &mut [T] {
+ self.slice
+ }
+}
+
+#[warn(dead_code)]
+#[derive(Debug)]
+pub(super) struct WriteCoherent<'a, T> {
+ pub(super) slice: &'a mut [T],
+}
+
+impl<'a, T> Write<T> for WriteCoherent<'a, T>
+where
+ T: Copy + 'a,
+{
+ /// # Safety
+ ///
+ /// [See doc comment for trait method](trait.Write#method.slice)
+ unsafe fn slice(&mut self) -> &mut [T] {
+ self.slice
+ }
+}