From b86e97f67a07368877bd18501aebcbe80cf93118 Mon Sep 17 00:00:00 2001 From: tcmal Date: Sun, 25 Aug 2024 17:44:24 +0100 Subject: feat(skeleton): add memory pools added stock memory pools behind a feature gate refactored buffers to use them and to have better APIs. moved some util code out of builders::pipeline updated stockton-render for the changes deactivation is a WIP breaks UI drawing, fix WIP --- stockton-skeleton/src/buffers/dedicated_image.rs | 134 --------- stockton-skeleton/src/buffers/draw.rs | 54 ++++ stockton-skeleton/src/buffers/draw_buffers.rs | 43 --- stockton-skeleton/src/buffers/image.rs | 342 +++++++++++++++++++++++ stockton-skeleton/src/buffers/mod.rs | 67 +---- stockton-skeleton/src/buffers/staged.rs | 177 ++++++++---- stockton-skeleton/src/buffers/staging.rs | 103 +++++++ 7 files changed, 622 insertions(+), 298 deletions(-) delete mode 100644 stockton-skeleton/src/buffers/dedicated_image.rs create mode 100644 stockton-skeleton/src/buffers/draw.rs delete mode 100644 stockton-skeleton/src/buffers/draw_buffers.rs create mode 100644 stockton-skeleton/src/buffers/image.rs create mode 100644 stockton-skeleton/src/buffers/staging.rs (limited to 'stockton-skeleton/src/buffers') diff --git a/stockton-skeleton/src/buffers/dedicated_image.rs b/stockton-skeleton/src/buffers/dedicated_image.rs deleted file mode 100644 index bf49a38..0000000 --- a/stockton-skeleton/src/buffers/dedicated_image.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! A dedicated image. Used for depth buffers. - -use crate::texture::PIXEL_SIZE; -use crate::types::*; - -use std::mem::ManuallyDrop; - -use anyhow::{Context, Result}; -use hal::{ - format::{Format, Swizzle}, - image::{SubresourceRange, Usage, Usage as ImgUsage, ViewKind}, - memory, - memory::Properties, - MemoryTypeId, -}; -use thiserror::Error; - -/// Holds an image that's loaded into GPU memory dedicated only to that image, bypassing the memory allocator. -pub struct DedicatedLoadedImage { - /// The GPU Image handle - image: ManuallyDrop, - - /// The full view of the image - pub image_view: ManuallyDrop, - - /// The memory backing the image - memory: ManuallyDrop, -} - -#[derive(Debug, Error)] -pub enum ImageLoadError { - #[error("No suitable memory type for image memory")] - NoMemoryTypes, -} - -impl DedicatedLoadedImage { - pub fn new( - device: &mut DeviceT, - adapter: &Adapter, - format: Format, - usage: Usage, - resources: SubresourceRange, - width: usize, - height: usize, - ) -> Result { - let (memory, image_ref) = { - // Round up the size to align properly - let initial_row_size = PIXEL_SIZE * width; - let limits = adapter.physical_device.properties().limits; - let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1; - - let row_size = - ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize; - debug_assert!(row_size as usize >= initial_row_size); - - // Make the image - let mut image_ref = unsafe { - use hal::image::{Kind, Tiling, ViewCapabilities}; - - device.create_image( - Kind::D2(width as u32, height as u32, 1, 1), - 1, - format, - Tiling::Optimal, - usage, - memory::SparseFlags::empty(), - ViewCapabilities::empty(), - ) - } - .context("Error creating image")?; - - // Allocate memory - let memory = unsafe { - let requirements = device.get_image_requirements(&image_ref); - - let memory_type_id = adapter - .physical_device - .memory_properties() - .memory_types - .iter() - .enumerate() - .find(|&(id, memory_type)| { - requirements.type_mask & (1 << id) != 0 - && memory_type.properties.contains(Properties::DEVICE_LOCAL) - }) - .map(|(id, _)| MemoryTypeId(id)) - .ok_or(ImageLoadError::NoMemoryTypes)?; - - let memory = device - .allocate_memory(memory_type_id, requirements.size) - .context("Error allocating memory for image")?; - - device - .bind_image_memory(&memory, 0, &mut image_ref) - .context("Error binding memory to image")?; - - memory - }; - - (memory, image_ref) - }; - - // Create ImageView and sampler - let image_view = unsafe { - device.create_image_view( - &image_ref, - ViewKind::D2, - format, - Swizzle::NO, - ImgUsage::DEPTH_STENCIL_ATTACHMENT, - resources, - ) - } - .context("Error creating image view")?; - - Ok(DedicatedLoadedImage { - image: ManuallyDrop::new(image_ref), - image_view: ManuallyDrop::new(image_view), - memory: ManuallyDrop::new(memory), - }) - } - - /// Properly frees/destroys all the objects in this struct - /// Dropping without doing this is a bad idea - pub fn deactivate(self, device: &mut DeviceT) { - unsafe { - use core::ptr::read; - - device.destroy_image_view(ManuallyDrop::into_inner(read(&self.image_view))); - device.destroy_image(ManuallyDrop::into_inner(read(&self.image))); - device.free_memory(ManuallyDrop::into_inner(read(&self.memory))); - } - } -} diff --git a/stockton-skeleton/src/buffers/draw.rs b/stockton-skeleton/src/buffers/draw.rs new file mode 100644 index 0000000..cd571e3 --- /dev/null +++ b/stockton-skeleton/src/buffers/draw.rs @@ -0,0 +1,54 @@ +//! A vertex and index buffer set for drawing + +use super::staged::StagedBuffer; +use crate::{ + context::RenderingContext, + mem::{MappableBlock, MemoryPool}, +}; + +use anyhow::{Context, Result}; +use hal::buffer::Usage; +use std::mem::ManuallyDrop; + +/// Initial size of vertex buffer. TODO: Way of overriding this +pub const INITIAL_VERT_SIZE: u64 = 3 * 3000; + +/// Initial size of index buffer. TODO: Way of overriding this +pub const INITIAL_INDEX_SIZE: u64 = 3000; + +/// A vertex and index buffer set for drawing +pub struct DrawBuffers<'a, T: Sized, P: MemoryPool, SP: MemoryPool> { + pub vertex_buffer: ManuallyDrop>, + pub index_buffer: ManuallyDrop>, +} + +impl<'a, T, P, SP> DrawBuffers<'a, T, P, SP> +where + P: MemoryPool, + SP: MemoryPool, + SP::Block: MappableBlock, +{ + /// Create a new set of drawbuffers given a render context. + /// This will allocate memory from `P` and `SP`, and currently has a fixed size (WIP). + pub fn from_context(context: &mut RenderingContext) -> Result { + let vert = StagedBuffer::from_context(context, Usage::VERTEX, INITIAL_VERT_SIZE) + .context("Error creating vertex buffer")?; + let index = StagedBuffer::from_context(context, Usage::INDEX, INITIAL_INDEX_SIZE) + .context("Error creating index buffer")?; + + Ok(DrawBuffers { + vertex_buffer: ManuallyDrop::new(vert), + index_buffer: ManuallyDrop::new(index), + }) + } + + /// Destroy all Vulkan objects. Should be called before dropping. + pub fn deactivate(self, context: &mut RenderingContext) { + unsafe { + use core::ptr::read; + + ManuallyDrop::into_inner(read(&self.vertex_buffer)).deactivate(context); + ManuallyDrop::into_inner(read(&self.index_buffer)).deactivate(context); + } + } +} diff --git a/stockton-skeleton/src/buffers/draw_buffers.rs b/stockton-skeleton/src/buffers/draw_buffers.rs deleted file mode 100644 index 5baec92..0000000 --- a/stockton-skeleton/src/buffers/draw_buffers.rs +++ /dev/null @@ -1,43 +0,0 @@ -//! A vertex and index buffer set for drawing - -use super::StagedBuffer; -use crate::types::*; - -use anyhow::{Context, Result}; -use hal::buffer::Usage; -use std::mem::ManuallyDrop; - -/// Initial size of vertex buffer. TODO: Way of overriding this -pub const INITIAL_VERT_SIZE: u64 = 3 * 3000; - -/// Initial size of index buffer. TODO: Way of overriding this -pub const INITIAL_INDEX_SIZE: u64 = 3000; - -/// The buffers used for drawing, ie index and vertex buffer -pub struct DrawBuffers<'a, T: Sized> { - pub vertex_buffer: ManuallyDrop>, - pub index_buffer: ManuallyDrop>, -} - -impl<'a, T> DrawBuffers<'a, T> { - pub fn new(device: &mut DeviceT, adapter: &Adapter) -> Result> { - let vert = StagedBuffer::new(device, adapter, Usage::VERTEX, INITIAL_VERT_SIZE) - .context("Error creating vertex buffer")?; - let index = StagedBuffer::new(device, adapter, Usage::INDEX, INITIAL_INDEX_SIZE) - .context("Error creating index buffer")?; - - Ok(DrawBuffers { - vertex_buffer: ManuallyDrop::new(vert), - index_buffer: ManuallyDrop::new(index), - }) - } - - pub fn deactivate(self, device: &mut DeviceT) { - unsafe { - use core::ptr::read; - - ManuallyDrop::into_inner(read(&self.vertex_buffer)).deactivate(device); - ManuallyDrop::into_inner(read(&self.index_buffer)).deactivate(device); - } - } -} diff --git a/stockton-skeleton/src/buffers/image.rs b/stockton-skeleton/src/buffers/image.rs new file mode 100644 index 0000000..34a0a37 --- /dev/null +++ b/stockton-skeleton/src/buffers/image.rs @@ -0,0 +1,342 @@ +//! An image with memory bound to it and an image view into its entirety. +//! This is useful for most types of images. +//! ```rust +//! # use anyhow::Result; +//! # use crate::{mem::DrawAttachments, context::RenderingContext}; +//! fn create_depth_buffer( +//! context: &mut RenderingContext, +//! ) -> Result> { +//! BoundImageView::from_context( +//! context, +//! &ImageSpec { +//! width: 10, +//! height: 10, +//! format: Format::D32Sfloat, +//! usage: Usage::DEPTH_STENCIL_ATTACHMENT, +//! }, +//! ) +//! } +/// ``` +use std::mem::ManuallyDrop; + +use crate::{ + context::RenderingContext, + error::LockPoisoned, + mem::{Block, MemoryPool}, + types::*, + utils::get_pixel_size, +}; +use anyhow::{Context, Result}; +use hal::{ + format::{Aspects, Format, Swizzle}, + image::{SamplerDesc, SubresourceRange, Usage, ViewKind}, + memory::SparseFlags, +}; + +pub const COLOR_RESOURCES: SubresourceRange = SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: Some(1), + layer_start: 0, + layer_count: Some(1), +}; + +pub const DEPTH_RESOURCES: SubresourceRange = SubresourceRange { + aspects: Aspects::DEPTH, + level_start: 0, + level_count: Some(1), + layer_start: 0, + layer_count: Some(1), +}; + +/// An image with memory bound to it and an image view into its entirety +/// Memory is allocated from the memory pool P, see [`crate::mem`] +pub struct BoundImageView { + mem: ManuallyDrop, + img: ManuallyDrop, + img_view: ManuallyDrop, + unpadded_row_size: u32, + row_size: u32, + height: u32, +} + +impl BoundImageView

{ + /// Create an uninitialised image using memory from the specified pool + pub fn from_context(context: &mut RenderingContext, spec: &ImageSpec) -> Result { + // Ensure the memory pool exists before we get a reference to it + context + .ensure_memory_pool::

() + .context("Error creating memory pool requested for BoundImageView")?; + let mut allocator = context + .existing_memory_pool::

() + .unwrap() + .write() + .map_err(|_| LockPoisoned::MemoryPool)?; + + let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?; + let row_alignment_mask = context + .physical_device_properties() + .limits + .optimal_buffer_copy_pitch_alignment as u32 + - 1; + Self::from_device_allocator(&mut device, &mut allocator, row_alignment_mask, spec) + } + + /// Create an uninitialised image using memory from the specified pool, but using a much less convenient signature. + /// Use this when you don't have access to the full context. + pub fn from_device_allocator( + device: &mut DeviceT, + pool: &mut P, + row_alignment_mask: u32, + spec: &ImageSpec, + ) -> Result { + // Calculate buffer size & alignment + let initial_row_size = get_pixel_size(spec.format) * spec.width; + let row_size = (initial_row_size + row_alignment_mask) & !row_alignment_mask; + debug_assert!(row_size >= initial_row_size); + + unsafe { + use hal::image::{Kind, Tiling, ViewCapabilities}; + + // Create the image + let mut img = device + .create_image( + Kind::D2(spec.width, spec.height, 1, 1), + 1, + spec.format, + Tiling::Optimal, + spec.usage, + SparseFlags::empty(), + ViewCapabilities::empty(), + ) + .context("Error creating image")?; + + // Get memory requirements + let requirements = device.get_image_requirements(&img); + + // Allocate memory + let (mem, _) = pool + .alloc(&device, requirements.size, requirements.alignment) + .context("Error allocating memory")?; + + // Bind memory + device + .bind_image_memory(mem.memory(), mem.range().start, &mut img) + .context("Error binding memory to image")?; + + // Create image view + let img_view = device + .create_image_view( + &img, + ViewKind::D2, + spec.format, + Swizzle::NO, + spec.usage, + spec.resources.clone(), + ) + .context("Error creating image view")?; + + Ok(Self { + mem: ManuallyDrop::new(mem), + img: ManuallyDrop::new(img), + img_view: ManuallyDrop::new(img_view), + row_size, + height: spec.height, + unpadded_row_size: spec.width, + }) + } + } + + /// Destroy all vulkan objects. Must be called before dropping. + pub fn deactivate_with_context(self, context: &mut RenderingContext) { + let mut device = context + .device() + .write() + .map_err(|_| LockPoisoned::Device) + .unwrap(); + let mut pool = context + .existing_memory_pool::

() + .unwrap() + .write() + .unwrap(); + + self.deactivate_with_device_pool(&mut device, &mut pool); + } + + /// Destroy all vulkan objects. Must be called before dropping. + pub fn deactivate_with_device_pool(self, device: &mut DeviceT, pool: &mut P) { + use std::ptr::read; + unsafe { + device.destroy_image_view(read(&*self.img_view)); + device.destroy_image(read(&*self.img)); + pool.free(&device, read(&*self.mem)); + } + } + + /// Get a reference to the bound image. + pub fn img(&self) -> &ImageT { + &*self.img + } + + /// Get a reference to the view of the bound image. + pub fn img_view(&self) -> &ImageViewT { + &*self.img_view + } + + /// Get a reference to the memory used by the bound image. + pub fn mem(&self) -> &

::Block { + &*self.mem + } + + /// Get the bound image view's row size. + pub fn row_size(&self) -> u32 { + self.row_size + } + + /// Get the bound image view's height. + pub fn height(&self) -> u32 { + self.height + } + + /// Get the bound image view's unpadded row size. + pub fn unpadded_row_size(&self) -> u32 { + self.unpadded_row_size + } +} + +/// A [`self::BoundImageView`] and accompanying sampler. +pub struct SampledImage { + bound_image: ManuallyDrop>, + sampler: ManuallyDrop, +} + +impl SampledImage

{ + /// Create an uninitialised image using memory from the specified pool + pub fn from_context( + context: &mut RenderingContext, + spec: &ImageSpec, + sampler_desc: &SamplerDesc, + ) -> Result { + // Ensure the memory pool exists before we get a reference to it + context + .ensure_memory_pool::

() + .context("Error creating memory pool requested for BoundImageView")?; + let mut allocator = context + .existing_memory_pool::

() + .unwrap() + .write() + .map_err(|_| LockPoisoned::MemoryPool)?; + + let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?; + let row_alignment_mask = context + .physical_device_properties() + .limits + .optimal_buffer_copy_pitch_alignment as u32 + - 1; + + Self::from_device_allocator( + &mut device, + &mut allocator, + row_alignment_mask, + spec, + sampler_desc, + ) + } + + /// Create an uninitialised image and sampler using memory from the specified pool, but using a much less convenient signature. + /// Use this when you don't have access to the full context. + pub fn from_device_allocator( + device: &mut DeviceT, + pool: &mut P, + row_alignment_mask: u32, + spec: &ImageSpec, + sampler_desc: &SamplerDesc, + ) -> Result { + let sampler = unsafe { device.create_sampler(sampler_desc) }?; + + Ok(SampledImage { + bound_image: ManuallyDrop::new(BoundImageView::from_device_allocator( + device, + pool, + row_alignment_mask, + spec, + )?), + sampler: ManuallyDrop::new(sampler), + }) + } + + /// Destroy all vulkan objects. Must be called before dropping. + pub fn deactivate_with_context(self, context: &mut RenderingContext) { + let mut device = context + .device() + .write() + .map_err(|_| LockPoisoned::Device) + .unwrap(); + let mut pool = context + .existing_memory_pool::

() + .unwrap() + .write() + .unwrap(); + + self.deactivate_with_device_pool(&mut device, &mut pool); + } + + /// Destroy all vulkan objects. Must be called before dropping. + pub fn deactivate_with_device_pool(self, device: &mut DeviceT, pool: &mut P) { + unsafe { + use std::ptr::read; + read(&*self.bound_image).deactivate_with_device_pool(device, pool); + device.destroy_sampler(read(&*self.sampler)); + } + } + + /// Get a reference to the bound image object. + pub fn bound_image(&self) -> &BoundImageView

{ + &self.bound_image + } + + /// Get a reference to the bound image. + pub fn img(&self) -> &ImageT { + self.bound_image.img() + } + + /// Get a reference to the view of the bound image. + pub fn img_view(&self) -> &ImageViewT { + self.bound_image.img_view() + } + + /// Get the bound image view's row size. + pub fn row_size(&self) -> u32 { + self.bound_image.row_size() + } + + /// Get the bound image view's unpadded row size. + pub fn unpadded_row_size(&self) -> u32 { + self.bound_image.unpadded_row_size() + } + + /// Get the bound image view's height. + pub fn height(&self) -> u32 { + self.bound_image.height() + } + + /// Get a reference to the memory used by the bound image. + pub fn mem(&self) -> &

::Block { + self.bound_image.mem() + } + + /// Get a reference to the sampler. + pub fn sampler(&self) -> &SamplerT { + &self.sampler + } +} + +/// Information needed to create an image. +#[derive(Debug, Clone)] +pub struct ImageSpec { + pub width: u32, + pub height: u32, + pub format: Format, + pub usage: Usage, + pub resources: SubresourceRange, +} diff --git a/stockton-skeleton/src/buffers/mod.rs b/stockton-skeleton/src/buffers/mod.rs index 74c5aab..08b2356 100644 --- a/stockton-skeleton/src/buffers/mod.rs +++ b/stockton-skeleton/src/buffers/mod.rs @@ -1,63 +1,6 @@ -//! All sorts of buffers +//! Convenience types wrapping buffers -use std::ops::IndexMut; - -use crate::{error::EnvironmentError, types::*}; - -use anyhow::{Context, Result}; -use hal::{ - buffer::Usage, - memory::{Properties, SparseFlags}, - MemoryTypeId, -}; - -mod dedicated_image; -mod draw_buffers; -mod staged; - -pub use dedicated_image::*; -pub use draw_buffers::*; -pub use staged::*; - -/// Create a buffer of the given specifications, allocating more device memory. -// TODO: Use a different memory allocator? -pub(crate) fn create_buffer( - device: &mut DeviceT, - adapter: &Adapter, - usage: Usage, - properties: Properties, - size: u64, -) -> Result<(BufferT, MemoryT)> { - let mut buffer = unsafe { device.create_buffer(size, usage, SparseFlags::empty()) } - .context("Error creating buffer")?; - - let requirements = unsafe { device.get_buffer_requirements(&buffer) }; - let memory_type_id = adapter - .physical_device - .memory_properties() - .memory_types - .iter() - .enumerate() - .find(|&(id, memory_type)| { - requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(properties) - }) - .map(|(id, _)| MemoryTypeId(id)) - .ok_or(EnvironmentError::NoMemoryTypes)?; - - let memory = unsafe { device.allocate_memory(memory_type_id, requirements.size) } - .context("Error allocating memory")?; - - unsafe { device.bind_buffer_memory(&memory, 0, &mut buffer) } - .context("Error binding memory to buffer")?; - - Ok((buffer, memory)) -} - -/// A buffer that can be modified by the CPU -pub trait ModifiableBuffer: IndexMut { - /// Get a handle to the underlying GPU buffer - fn get_buffer(&mut self) -> &BufferT; - - /// Record the command(s) required to commit changes to this buffer to the given command buffer. - fn record_commit_cmds(&mut self, cmd_buffer: &mut CommandBufferT) -> Result<()>; -} +pub mod draw; +pub mod image; +pub mod staged; +pub mod staging; diff --git a/stockton-skeleton/src/buffers/staged.rs b/stockton-skeleton/src/buffers/staged.rs index 71b5204..ec42102 100644 --- a/stockton-skeleton/src/buffers/staged.rs +++ b/stockton-skeleton/src/buffers/staged.rs @@ -1,7 +1,11 @@ //! A buffer that can be written to by the CPU using staging memory -use super::{create_buffer, ModifiableBuffer}; -use crate::types::*; +use crate::{ + context::RenderingContext, + error::LockPoisoned, + mem::{Block, MappableBlock, MemoryPool}, + types::*, +}; use core::mem::{size_of, ManuallyDrop}; use std::{ @@ -10,72 +14,88 @@ use std::{ }; use anyhow::{Context, Result}; -use hal::{ - buffer::Usage, - command::BufferCopy, - memory::{Properties, Segment}, -}; +use hal::{buffer::Usage, command::BufferCopy, memory::SparseFlags}; -/// A GPU buffer that is written to using a staging buffer -pub struct StagedBuffer<'a, T: Sized> { +/// A GPU buffer that is written to using a staging buffer. The staging buffer and the GPU buffers are the same size, +/// so this isn't optimal in a lot of cases. +pub struct StagedBuffer<'a, T: Sized, P: MemoryPool, SP: MemoryPool> { /// CPU-visible buffer staged_buffer: ManuallyDrop, /// CPU-visible memory - staged_memory: ManuallyDrop, + staged_memory: ManuallyDrop, /// GPU Buffer buffer: ManuallyDrop, /// GPU Memory - memory: ManuallyDrop, + memory: ManuallyDrop, /// Where staged buffer is mapped in CPU memory staged_mapped_memory: &'a mut [T], /// The highest index in the buffer that's been written to. - pub highest_used: usize, + highest_used: usize, } -impl<'a, T: Sized> StagedBuffer<'a, T> { - /// size is the size in T - pub fn new(device: &mut DeviceT, adapter: &Adapter, usage: Usage, size: u64) -> Result { +impl<'a, T, P, SP> StagedBuffer<'a, T, P, SP> +where + T: Sized, + P: MemoryPool, + SP: MemoryPool, + SP::Block: MappableBlock, +{ + /// Create an new staged buffer from the given rendering context. + /// `size` is the size in T. The GPU buffer's usage will be `usage | Usage::TRANSFER_DST` and the staging buffer's usage will be `Usage::TRANSFER_SRC`. + pub fn from_context(context: &mut RenderingContext, usage: Usage, size: u64) -> Result { // Convert size to bytes let size_bytes = size * size_of::() as u64; - // Get CPU-visible buffer - let (staged_buffer, mut staged_memory) = create_buffer( - device, - adapter, - Usage::TRANSFER_SRC, - Properties::CPU_VISIBLE, - size_bytes, - ) - .context("Error creating staging buffer")?; - - // Get GPU Buffer - let (buffer, memory) = create_buffer( - device, - adapter, - Usage::TRANSFER_DST | usage, - Properties::DEVICE_LOCAL | Properties::COHERENT, - size_bytes, - ) - .context("Error creating GPU buffer")?; - - // Map it somewhere and get a slice to that memory + // Make sure our memory pools exist + context.ensure_memory_pool::

()?; + context.ensure_memory_pool::()?; + + // Lock the device and memory pools + let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?; + let mut mempool = context + .existing_memory_pool::

() + .unwrap() + .write() + .map_err(|_| LockPoisoned::MemoryPool)?; + let mut staging_mempool = context + .existing_memory_pool::() + .unwrap() + .write() + .map_err(|_| LockPoisoned::MemoryPool)?; + + // Staging buffer + let (staged_buffer, mut staged_memory) = unsafe { + create_buffer( + &mut device, + size_bytes, + Usage::TRANSFER_SRC, + &mut *staging_mempool, + ) + .context("Error creating staging buffer")? + }; + + // GPU Buffer + let (buffer, memory) = unsafe { + create_buffer( + &mut device, + size_bytes, + usage | Usage::TRANSFER_DST, + &mut *mempool, + ) + .context("Error creating GPU buffer")? + }; + + // Map the staging buffer somewhere let staged_mapped_memory = unsafe { - let ptr = device - .map_memory( - &mut staged_memory, - Segment { - offset: 0, - size: Some(size_bytes), - }, - ) - .context("Error mapping staged memory")?; - - std::slice::from_raw_parts_mut(ptr as *mut T, size.try_into()?) + std::slice::from_raw_parts_mut( + std::mem::transmute(staged_memory.map(&mut device, 0..size_bytes)?), + size.try_into()?, + ) }; Ok(StagedBuffer { @@ -88,26 +108,39 @@ impl<'a, T: Sized> StagedBuffer<'a, T> { }) } - /// Call this before dropping - pub(crate) fn deactivate(mut self, device: &mut DeviceT) { + /// Destroy all Vulkan objects. Should be called before dropping. + pub fn deactivate(mut self, context: &mut RenderingContext) { unsafe { - device.unmap_memory(&mut self.staged_memory); + let device = &mut *context.device().write().unwrap(); - device.free_memory(ManuallyDrop::take(&mut self.staged_memory)); - device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer)); + self.staged_memory.unmap(device).unwrap(); + + context + .existing_memory_pool::() + .unwrap() + .write() + .unwrap() + .free(device, ManuallyDrop::take(&mut self.staged_memory)); - device.free_memory(ManuallyDrop::take(&mut self.memory)); + context + .existing_memory_pool::

() + .unwrap() + .write() + .unwrap() + .free(device, ManuallyDrop::take(&mut self.memory)); + + device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer)); device.destroy_buffer(ManuallyDrop::take(&mut self.buffer)); }; } -} -impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> { - fn get_buffer(&mut self) -> &BufferT { + /// Get a handle to the underlying GPU buffer + pub fn get_buffer(&mut self) -> &BufferT { &self.buffer } - fn record_commit_cmds(&mut self, buf: &mut CommandBufferT) -> Result<()> { + /// Record the command(s) required to commit changes to this buffer to the given command buffer. + pub fn record_commit_cmds(&mut self, buf: &mut CommandBufferT) -> Result<()> { unsafe { buf.copy_buffer( &self.staged_buffer, @@ -122,9 +155,35 @@ impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> { Ok(()) } + + /// Get the highest byte in this buffer that's been written to (by the CPU) + pub fn highest_used(&self) -> usize { + self.highest_used + } +} + +/// Used internally to create a buffer from a memory pool +unsafe fn create_buffer( + device: &mut DeviceT, + size: u64, + usage: Usage, + mempool: &mut P, +) -> Result<(BufferT, P::Block)> { + let mut buffer = device + .create_buffer(size, usage, SparseFlags::empty()) + .context("Error creating buffer")?; + let req = device.get_buffer_requirements(&buffer); + + let (memory, _) = mempool.alloc(device, size, req.alignment)?; + + device + .bind_buffer_memory(memory.memory(), 0, &mut buffer) + .context("Error binding memory to buffer")?; + + Ok((buffer, memory)) } -impl<'a, T: Sized> Index for StagedBuffer<'a, T> { +impl<'a, T: Sized, P: MemoryPool, SP: MemoryPool> Index for StagedBuffer<'a, T, P, SP> { type Output = T; fn index(&self, index: usize) -> &Self::Output { @@ -132,7 +191,7 @@ impl<'a, T: Sized> Index for StagedBuffer<'a, T> { } } -impl<'a, T: Sized> IndexMut for StagedBuffer<'a, T> { +impl<'a, T: Sized, P: MemoryPool, SP: MemoryPool> IndexMut for StagedBuffer<'a, T, P, SP> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { if index > self.highest_used { self.highest_used = index; diff --git a/stockton-skeleton/src/buffers/staging.rs b/stockton-skeleton/src/buffers/staging.rs new file mode 100644 index 0000000..5c80f51 --- /dev/null +++ b/stockton-skeleton/src/buffers/staging.rs @@ -0,0 +1,103 @@ +//! A buffer that can be written to by the CPU + +use crate::{ + context::RenderingContext, + error::LockPoisoned, + mem::{Block, MappableBlock, MemoryPool}, + types::*, +}; + +use std::{mem::ManuallyDrop, ops::Range}; + +use anyhow::{Context, Result}; +use hal::{buffer::Usage, memory::SparseFlags}; + +/// A buffer that can be written to by the CPU. Usage will be `Usage::TRANSFER_SRC`. +pub struct StagingBuffer { + buf: ManuallyDrop, + mem: ManuallyDrop, +} + +impl

StagingBuffer

+where + P: MemoryPool, + P::Block: MappableBlock, +{ + /// Create a new staging buffer from the given RenderingContext. `size` is in bytes. + pub fn from_context(context: &mut RenderingContext, size: u64) -> Result { + context.ensure_memory_pool::

()?; + + let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?; + let mut mempool = context + .existing_memory_pool() + .unwrap() + .write() + .map_err(|_| LockPoisoned::MemoryPool)?; + + Self::from_device_pool(&mut device, &mut mempool, size) + } + + /// Create a new staging buffer from the given device and memory pool. `size` is in bytes. + pub fn from_device_pool(device: &mut DeviceT, mempool: &mut P, size: u64) -> Result { + let mut buffer = + unsafe { device.create_buffer(size, Usage::TRANSFER_SRC, SparseFlags::empty()) } + .context("Error creating buffer")?; + + let requirements = unsafe { device.get_buffer_requirements(&buffer) }; + + let (memory, _) = mempool + .alloc(device, requirements.size, requirements.alignment) + .context("Error allocating staging memory")?; + + unsafe { device.bind_buffer_memory(memory.memory(), memory.range().start, &mut buffer) } + .context("Error binding staging memory to buffer")?; + + Ok(StagingBuffer { + buf: ManuallyDrop::new(buffer), + mem: ManuallyDrop::new(memory), + }) + } + + /// Map the given range to CPU-visible memory, returning a pointer to the start of that range. + /// inner_range is local to this block of memory, not to the container as a whole. + pub fn map(&mut self, device: &mut DeviceT, inner_range: Range) -> Result<*mut u8> { + Ok(<

::Block>::map( + &mut *self.mem, + device, + inner_range, + )?) + } + + /// Remove any mappings present for this staging buffer. + pub unsafe fn unmap(&mut self, device: &mut DeviceT) -> Result<()> { + self.mem.unmap(device) + } + + pub fn deactivate_context(self, context: &mut RenderingContext) { + let mut device = context.device().write().unwrap(); + let mut mempool = context.existing_memory_pool().unwrap().write().unwrap(); + + self.deactivate_device_pool(&mut device, &mut mempool) + } + + /// Destroy all vulkan objects. This should be called before dropping + pub fn deactivate_device_pool(self, device: &mut DeviceT, mempool: &mut P) { + unsafe { + use std::ptr::read; + // Destroy buffer + device.destroy_buffer(read(&*self.buf)); + // Free memory + mempool.free(device, read(&*self.mem)); + } + } + + /// Get a reference to the staging buffer's memory. + pub fn mem(&self) -> &P::Block { + &self.mem + } + + /// Get a reference to the staging buffer. + pub fn buf(&self) -> &ManuallyDrop { + &self.buf + } +} -- cgit v1.2.3