aboutsummaryrefslogtreecommitdiff
path: root/stockton-skeleton/src
diff options
context:
space:
mode:
Diffstat (limited to 'stockton-skeleton/src')
-rw-r--r--stockton-skeleton/src/buffers/dedicated_image.rs134
-rw-r--r--stockton-skeleton/src/buffers/draw.rs54
-rw-r--r--stockton-skeleton/src/buffers/draw_buffers.rs43
-rw-r--r--stockton-skeleton/src/buffers/image.rs342
-rw-r--r--stockton-skeleton/src/buffers/mod.rs67
-rw-r--r--stockton-skeleton/src/buffers/staged.rs177
-rw-r--r--stockton-skeleton/src/buffers/staging.rs103
-rw-r--r--stockton-skeleton/src/builders/pipeline.rs20
-rw-r--r--stockton-skeleton/src/context.rs54
-rw-r--r--stockton-skeleton/src/error.rs12
-rw-r--r--stockton-skeleton/src/lib.rs1
-rw-r--r--stockton-skeleton/src/mem.rs368
-rw-r--r--stockton-skeleton/src/texture/block.rs45
-rw-r--r--stockton-skeleton/src/texture/load.rs223
-rw-r--r--stockton-skeleton/src/texture/loader.rs259
-rw-r--r--stockton-skeleton/src/texture/mod.rs1
-rw-r--r--stockton-skeleton/src/texture/repo.rs80
-rw-r--r--stockton-skeleton/src/texture/staging_buffer.rs59
-rw-r--r--stockton-skeleton/src/utils.rs6
19 files changed, 1328 insertions, 720 deletions
diff --git a/stockton-skeleton/src/buffers/dedicated_image.rs b/stockton-skeleton/src/buffers/dedicated_image.rs
deleted file mode 100644
index bf49a38..0000000
--- a/stockton-skeleton/src/buffers/dedicated_image.rs
+++ /dev/null
@@ -1,134 +0,0 @@
-//! A dedicated image. Used for depth buffers.
-
-use crate::texture::PIXEL_SIZE;
-use crate::types::*;
-
-use std::mem::ManuallyDrop;
-
-use anyhow::{Context, Result};
-use hal::{
- format::{Format, Swizzle},
- image::{SubresourceRange, Usage, Usage as ImgUsage, ViewKind},
- memory,
- memory::Properties,
- MemoryTypeId,
-};
-use thiserror::Error;
-
-/// Holds an image that's loaded into GPU memory dedicated only to that image, bypassing the memory allocator.
-pub struct DedicatedLoadedImage {
- /// The GPU Image handle
- image: ManuallyDrop<ImageT>,
-
- /// The full view of the image
- pub image_view: ManuallyDrop<ImageViewT>,
-
- /// The memory backing the image
- memory: ManuallyDrop<MemoryT>,
-}
-
-#[derive(Debug, Error)]
-pub enum ImageLoadError {
- #[error("No suitable memory type for image memory")]
- NoMemoryTypes,
-}
-
-impl DedicatedLoadedImage {
- pub fn new(
- device: &mut DeviceT,
- adapter: &Adapter,
- format: Format,
- usage: Usage,
- resources: SubresourceRange,
- width: usize,
- height: usize,
- ) -> Result<DedicatedLoadedImage> {
- let (memory, image_ref) = {
- // Round up the size to align properly
- let initial_row_size = PIXEL_SIZE * width;
- let limits = adapter.physical_device.properties().limits;
- let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1;
-
- let row_size =
- ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize;
- debug_assert!(row_size as usize >= initial_row_size);
-
- // Make the image
- let mut image_ref = unsafe {
- use hal::image::{Kind, Tiling, ViewCapabilities};
-
- device.create_image(
- Kind::D2(width as u32, height as u32, 1, 1),
- 1,
- format,
- Tiling::Optimal,
- usage,
- memory::SparseFlags::empty(),
- ViewCapabilities::empty(),
- )
- }
- .context("Error creating image")?;
-
- // Allocate memory
- let memory = unsafe {
- let requirements = device.get_image_requirements(&image_ref);
-
- let memory_type_id = adapter
- .physical_device
- .memory_properties()
- .memory_types
- .iter()
- .enumerate()
- .find(|&(id, memory_type)| {
- requirements.type_mask & (1 << id) != 0
- && memory_type.properties.contains(Properties::DEVICE_LOCAL)
- })
- .map(|(id, _)| MemoryTypeId(id))
- .ok_or(ImageLoadError::NoMemoryTypes)?;
-
- let memory = device
- .allocate_memory(memory_type_id, requirements.size)
- .context("Error allocating memory for image")?;
-
- device
- .bind_image_memory(&memory, 0, &mut image_ref)
- .context("Error binding memory to image")?;
-
- memory
- };
-
- (memory, image_ref)
- };
-
- // Create ImageView and sampler
- let image_view = unsafe {
- device.create_image_view(
- &image_ref,
- ViewKind::D2,
- format,
- Swizzle::NO,
- ImgUsage::DEPTH_STENCIL_ATTACHMENT,
- resources,
- )
- }
- .context("Error creating image view")?;
-
- Ok(DedicatedLoadedImage {
- image: ManuallyDrop::new(image_ref),
- image_view: ManuallyDrop::new(image_view),
- memory: ManuallyDrop::new(memory),
- })
- }
-
- /// Properly frees/destroys all the objects in this struct
- /// Dropping without doing this is a bad idea
- pub fn deactivate(self, device: &mut DeviceT) {
- unsafe {
- use core::ptr::read;
-
- device.destroy_image_view(ManuallyDrop::into_inner(read(&self.image_view)));
- device.destroy_image(ManuallyDrop::into_inner(read(&self.image)));
- device.free_memory(ManuallyDrop::into_inner(read(&self.memory)));
- }
- }
-}
diff --git a/stockton-skeleton/src/buffers/draw.rs b/stockton-skeleton/src/buffers/draw.rs
new file mode 100644
index 0000000..cd571e3
--- /dev/null
+++ b/stockton-skeleton/src/buffers/draw.rs
@@ -0,0 +1,54 @@
+//! A vertex and index buffer set for drawing
+
+use super::staged::StagedBuffer;
+use crate::{
+ context::RenderingContext,
+ mem::{MappableBlock, MemoryPool},
+};
+
+use anyhow::{Context, Result};
+use hal::buffer::Usage;
+use std::mem::ManuallyDrop;
+
+/// Initial size of vertex buffer. TODO: Way of overriding this
+pub const INITIAL_VERT_SIZE: u64 = 3 * 3000;
+
+/// Initial size of index buffer. TODO: Way of overriding this
+pub const INITIAL_INDEX_SIZE: u64 = 3000;
+
+/// A vertex and index buffer set for drawing
+pub struct DrawBuffers<'a, T: Sized, P: MemoryPool, SP: MemoryPool> {
+ pub vertex_buffer: ManuallyDrop<StagedBuffer<'a, T, P, SP>>,
+ pub index_buffer: ManuallyDrop<StagedBuffer<'a, (u16, u16, u16), P, SP>>,
+}
+
+impl<'a, T, P, SP> DrawBuffers<'a, T, P, SP>
+where
+ P: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
+ /// Create a new set of drawbuffers given a render context.
+ /// This will allocate memory from `P` and `SP`, and currently has a fixed size (WIP).
+ pub fn from_context(context: &mut RenderingContext) -> Result<Self> {
+ let vert = StagedBuffer::from_context(context, Usage::VERTEX, INITIAL_VERT_SIZE)
+ .context("Error creating vertex buffer")?;
+ let index = StagedBuffer::from_context(context, Usage::INDEX, INITIAL_INDEX_SIZE)
+ .context("Error creating index buffer")?;
+
+ Ok(DrawBuffers {
+ vertex_buffer: ManuallyDrop::new(vert),
+ index_buffer: ManuallyDrop::new(index),
+ })
+ }
+
+ /// Destroy all Vulkan objects. Should be called before dropping.
+ pub fn deactivate(self, context: &mut RenderingContext) {
+ unsafe {
+ use core::ptr::read;
+
+ ManuallyDrop::into_inner(read(&self.vertex_buffer)).deactivate(context);
+ ManuallyDrop::into_inner(read(&self.index_buffer)).deactivate(context);
+ }
+ }
+}
diff --git a/stockton-skeleton/src/buffers/draw_buffers.rs b/stockton-skeleton/src/buffers/draw_buffers.rs
deleted file mode 100644
index 5baec92..0000000
--- a/stockton-skeleton/src/buffers/draw_buffers.rs
+++ /dev/null
@@ -1,43 +0,0 @@
-//! A vertex and index buffer set for drawing
-
-use super::StagedBuffer;
-use crate::types::*;
-
-use anyhow::{Context, Result};
-use hal::buffer::Usage;
-use std::mem::ManuallyDrop;
-
-/// Initial size of vertex buffer. TODO: Way of overriding this
-pub const INITIAL_VERT_SIZE: u64 = 3 * 3000;
-
-/// Initial size of index buffer. TODO: Way of overriding this
-pub const INITIAL_INDEX_SIZE: u64 = 3000;
-
-/// The buffers used for drawing, ie index and vertex buffer
-pub struct DrawBuffers<'a, T: Sized> {
- pub vertex_buffer: ManuallyDrop<StagedBuffer<'a, T>>,
- pub index_buffer: ManuallyDrop<StagedBuffer<'a, (u16, u16, u16)>>,
-}
-
-impl<'a, T> DrawBuffers<'a, T> {
- pub fn new(device: &mut DeviceT, adapter: &Adapter) -> Result<DrawBuffers<'a, T>> {
- let vert = StagedBuffer::new(device, adapter, Usage::VERTEX, INITIAL_VERT_SIZE)
- .context("Error creating vertex buffer")?;
- let index = StagedBuffer::new(device, adapter, Usage::INDEX, INITIAL_INDEX_SIZE)
- .context("Error creating index buffer")?;
-
- Ok(DrawBuffers {
- vertex_buffer: ManuallyDrop::new(vert),
- index_buffer: ManuallyDrop::new(index),
- })
- }
-
- pub fn deactivate(self, device: &mut DeviceT) {
- unsafe {
- use core::ptr::read;
-
- ManuallyDrop::into_inner(read(&self.vertex_buffer)).deactivate(device);
- ManuallyDrop::into_inner(read(&self.index_buffer)).deactivate(device);
- }
- }
-}
diff --git a/stockton-skeleton/src/buffers/image.rs b/stockton-skeleton/src/buffers/image.rs
new file mode 100644
index 0000000..34a0a37
--- /dev/null
+++ b/stockton-skeleton/src/buffers/image.rs
@@ -0,0 +1,342 @@
+//! An image with memory bound to it and an image view into its entirety.
+//! This is useful for most types of images.
+//! ```rust
+//! # use anyhow::Result;
+//! # use crate::{mem::DrawAttachments, context::RenderingContext};
+//! fn create_depth_buffer(
+//! context: &mut RenderingContext,
+//! ) -> Result<BoundImageView<DrawAttachments>> {
+//! BoundImageView::from_context(
+//! context,
+//! &ImageSpec {
+//! width: 10,
+//! height: 10,
+//! format: Format::D32Sfloat,
+//! usage: Usage::DEPTH_STENCIL_ATTACHMENT,
+//! },
+//! )
+//! }
+/// ```
+use std::mem::ManuallyDrop;
+
+use crate::{
+ context::RenderingContext,
+ error::LockPoisoned,
+ mem::{Block, MemoryPool},
+ types::*,
+ utils::get_pixel_size,
+};
+use anyhow::{Context, Result};
+use hal::{
+ format::{Aspects, Format, Swizzle},
+ image::{SamplerDesc, SubresourceRange, Usage, ViewKind},
+ memory::SparseFlags,
+};
+
+pub const COLOR_RESOURCES: SubresourceRange = SubresourceRange {
+ aspects: Aspects::COLOR,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
+};
+
+pub const DEPTH_RESOURCES: SubresourceRange = SubresourceRange {
+ aspects: Aspects::DEPTH,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
+};
+
+/// An image with memory bound to it and an image view into its entirety
+/// Memory is allocated from the memory pool P, see [`crate::mem`]
+pub struct BoundImageView<P: MemoryPool> {
+ mem: ManuallyDrop<P::Block>,
+ img: ManuallyDrop<ImageT>,
+ img_view: ManuallyDrop<ImageViewT>,
+ unpadded_row_size: u32,
+ row_size: u32,
+ height: u32,
+}
+
+impl<P: MemoryPool> BoundImageView<P> {
+ /// Create an uninitialised image using memory from the specified pool
+ pub fn from_context(context: &mut RenderingContext, spec: &ImageSpec) -> Result<Self> {
+ // Ensure the memory pool exists before we get a reference to it
+ context
+ .ensure_memory_pool::<P>()
+ .context("Error creating memory pool requested for BoundImageView")?;
+ let mut allocator = context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
+ let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let row_alignment_mask = context
+ .physical_device_properties()
+ .limits
+ .optimal_buffer_copy_pitch_alignment as u32
+ - 1;
+ Self::from_device_allocator(&mut device, &mut allocator, row_alignment_mask, spec)
+ }
+
+ /// Create an uninitialised image using memory from the specified pool, but using a much less convenient signature.
+ /// Use this when you don't have access to the full context.
+ pub fn from_device_allocator(
+ device: &mut DeviceT,
+ pool: &mut P,
+ row_alignment_mask: u32,
+ spec: &ImageSpec,
+ ) -> Result<Self> {
+ // Calculate buffer size & alignment
+ let initial_row_size = get_pixel_size(spec.format) * spec.width;
+ let row_size = (initial_row_size + row_alignment_mask) & !row_alignment_mask;
+ debug_assert!(row_size >= initial_row_size);
+
+ unsafe {
+ use hal::image::{Kind, Tiling, ViewCapabilities};
+
+ // Create the image
+ let mut img = device
+ .create_image(
+ Kind::D2(spec.width, spec.height, 1, 1),
+ 1,
+ spec.format,
+ Tiling::Optimal,
+ spec.usage,
+ SparseFlags::empty(),
+ ViewCapabilities::empty(),
+ )
+ .context("Error creating image")?;
+
+ // Get memory requirements
+ let requirements = device.get_image_requirements(&img);
+
+ // Allocate memory
+ let (mem, _) = pool
+ .alloc(&device, requirements.size, requirements.alignment)
+ .context("Error allocating memory")?;
+
+ // Bind memory
+ device
+ .bind_image_memory(mem.memory(), mem.range().start, &mut img)
+ .context("Error binding memory to image")?;
+
+ // Create image view
+ let img_view = device
+ .create_image_view(
+ &img,
+ ViewKind::D2,
+ spec.format,
+ Swizzle::NO,
+ spec.usage,
+ spec.resources.clone(),
+ )
+ .context("Error creating image view")?;
+
+ Ok(Self {
+ mem: ManuallyDrop::new(mem),
+ img: ManuallyDrop::new(img),
+ img_view: ManuallyDrop::new(img_view),
+ row_size,
+ height: spec.height,
+ unpadded_row_size: spec.width,
+ })
+ }
+ }
+
+ /// Destroy all vulkan objects. Must be called before dropping.
+ pub fn deactivate_with_context(self, context: &mut RenderingContext) {
+ let mut device = context
+ .device()
+ .write()
+ .map_err(|_| LockPoisoned::Device)
+ .unwrap();
+ let mut pool = context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .unwrap();
+
+ self.deactivate_with_device_pool(&mut device, &mut pool);
+ }
+
+ /// Destroy all vulkan objects. Must be called before dropping.
+ pub fn deactivate_with_device_pool(self, device: &mut DeviceT, pool: &mut P) {
+ use std::ptr::read;
+ unsafe {
+ device.destroy_image_view(read(&*self.img_view));
+ device.destroy_image(read(&*self.img));
+ pool.free(&device, read(&*self.mem));
+ }
+ }
+
+ /// Get a reference to the bound image.
+ pub fn img(&self) -> &ImageT {
+ &*self.img
+ }
+
+ /// Get a reference to the view of the bound image.
+ pub fn img_view(&self) -> &ImageViewT {
+ &*self.img_view
+ }
+
+ /// Get a reference to the memory used by the bound image.
+ pub fn mem(&self) -> &<P as MemoryPool>::Block {
+ &*self.mem
+ }
+
+ /// Get the bound image view's row size.
+ pub fn row_size(&self) -> u32 {
+ self.row_size
+ }
+
+ /// Get the bound image view's height.
+ pub fn height(&self) -> u32 {
+ self.height
+ }
+
+ /// Get the bound image view's unpadded row size.
+ pub fn unpadded_row_size(&self) -> u32 {
+ self.unpadded_row_size
+ }
+}
+
+/// A [`self::BoundImageView`] and accompanying sampler.
+pub struct SampledImage<P: MemoryPool> {
+ bound_image: ManuallyDrop<BoundImageView<P>>,
+ sampler: ManuallyDrop<SamplerT>,
+}
+
+impl<P: MemoryPool> SampledImage<P> {
+ /// Create an uninitialised image using memory from the specified pool
+ pub fn from_context(
+ context: &mut RenderingContext,
+ spec: &ImageSpec,
+ sampler_desc: &SamplerDesc,
+ ) -> Result<Self> {
+ // Ensure the memory pool exists before we get a reference to it
+ context
+ .ensure_memory_pool::<P>()
+ .context("Error creating memory pool requested for BoundImageView")?;
+ let mut allocator = context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
+ let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let row_alignment_mask = context
+ .physical_device_properties()
+ .limits
+ .optimal_buffer_copy_pitch_alignment as u32
+ - 1;
+
+ Self::from_device_allocator(
+ &mut device,
+ &mut allocator,
+ row_alignment_mask,
+ spec,
+ sampler_desc,
+ )
+ }
+
+ /// Create an uninitialised image and sampler using memory from the specified pool, but using a much less convenient signature.
+ /// Use this when you don't have access to the full context.
+ pub fn from_device_allocator(
+ device: &mut DeviceT,
+ pool: &mut P,
+ row_alignment_mask: u32,
+ spec: &ImageSpec,
+ sampler_desc: &SamplerDesc,
+ ) -> Result<Self> {
+ let sampler = unsafe { device.create_sampler(sampler_desc) }?;
+
+ Ok(SampledImage {
+ bound_image: ManuallyDrop::new(BoundImageView::from_device_allocator(
+ device,
+ pool,
+ row_alignment_mask,
+ spec,
+ )?),
+ sampler: ManuallyDrop::new(sampler),
+ })
+ }
+
+ /// Destroy all vulkan objects. Must be called before dropping.
+ pub fn deactivate_with_context(self, context: &mut RenderingContext) {
+ let mut device = context
+ .device()
+ .write()
+ .map_err(|_| LockPoisoned::Device)
+ .unwrap();
+ let mut pool = context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .unwrap();
+
+ self.deactivate_with_device_pool(&mut device, &mut pool);
+ }
+
+ /// Destroy all vulkan objects. Must be called before dropping.
+ pub fn deactivate_with_device_pool(self, device: &mut DeviceT, pool: &mut P) {
+ unsafe {
+ use std::ptr::read;
+ read(&*self.bound_image).deactivate_with_device_pool(device, pool);
+ device.destroy_sampler(read(&*self.sampler));
+ }
+ }
+
+ /// Get a reference to the bound image object.
+ pub fn bound_image(&self) -> &BoundImageView<P> {
+ &self.bound_image
+ }
+
+ /// Get a reference to the bound image.
+ pub fn img(&self) -> &ImageT {
+ self.bound_image.img()
+ }
+
+ /// Get a reference to the view of the bound image.
+ pub fn img_view(&self) -> &ImageViewT {
+ self.bound_image.img_view()
+ }
+
+ /// Get the bound image view's row size.
+ pub fn row_size(&self) -> u32 {
+ self.bound_image.row_size()
+ }
+
+ /// Get the bound image view's unpadded row size.
+ pub fn unpadded_row_size(&self) -> u32 {
+ self.bound_image.unpadded_row_size()
+ }
+
+ /// Get the bound image view's height.
+ pub fn height(&self) -> u32 {
+ self.bound_image.height()
+ }
+
+ /// Get a reference to the memory used by the bound image.
+ pub fn mem(&self) -> &<P as MemoryPool>::Block {
+ self.bound_image.mem()
+ }
+
+ /// Get a reference to the sampler.
+ pub fn sampler(&self) -> &SamplerT {
+ &self.sampler
+ }
+}
+
+/// Information needed to create an image.
+#[derive(Debug, Clone)]
+pub struct ImageSpec {
+ pub width: u32,
+ pub height: u32,
+ pub format: Format,
+ pub usage: Usage,
+ pub resources: SubresourceRange,
+}
diff --git a/stockton-skeleton/src/buffers/mod.rs b/stockton-skeleton/src/buffers/mod.rs
index 74c5aab..08b2356 100644
--- a/stockton-skeleton/src/buffers/mod.rs
+++ b/stockton-skeleton/src/buffers/mod.rs
@@ -1,63 +1,6 @@
-//! All sorts of buffers
+//! Convenience types wrapping buffers
-use std::ops::IndexMut;
-
-use crate::{error::EnvironmentError, types::*};
-
-use anyhow::{Context, Result};
-use hal::{
- buffer::Usage,
- memory::{Properties, SparseFlags},
- MemoryTypeId,
-};
-
-mod dedicated_image;
-mod draw_buffers;
-mod staged;
-
-pub use dedicated_image::*;
-pub use draw_buffers::*;
-pub use staged::*;
-
-/// Create a buffer of the given specifications, allocating more device memory.
-// TODO: Use a different memory allocator?
-pub(crate) fn create_buffer(
- device: &mut DeviceT,
- adapter: &Adapter,
- usage: Usage,
- properties: Properties,
- size: u64,
-) -> Result<(BufferT, MemoryT)> {
- let mut buffer = unsafe { device.create_buffer(size, usage, SparseFlags::empty()) }
- .context("Error creating buffer")?;
-
- let requirements = unsafe { device.get_buffer_requirements(&buffer) };
- let memory_type_id = adapter
- .physical_device
- .memory_properties()
- .memory_types
- .iter()
- .enumerate()
- .find(|&(id, memory_type)| {
- requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(properties)
- })
- .map(|(id, _)| MemoryTypeId(id))
- .ok_or(EnvironmentError::NoMemoryTypes)?;
-
- let memory = unsafe { device.allocate_memory(memory_type_id, requirements.size) }
- .context("Error allocating memory")?;
-
- unsafe { device.bind_buffer_memory(&memory, 0, &mut buffer) }
- .context("Error binding memory to buffer")?;
-
- Ok((buffer, memory))
-}
-
-/// A buffer that can be modified by the CPU
-pub trait ModifiableBuffer: IndexMut<usize> {
- /// Get a handle to the underlying GPU buffer
- fn get_buffer(&mut self) -> &BufferT;
-
- /// Record the command(s) required to commit changes to this buffer to the given command buffer.
- fn record_commit_cmds(&mut self, cmd_buffer: &mut CommandBufferT) -> Result<()>;
-}
+pub mod draw;
+pub mod image;
+pub mod staged;
+pub mod staging;
diff --git a/stockton-skeleton/src/buffers/staged.rs b/stockton-skeleton/src/buffers/staged.rs
index 71b5204..ec42102 100644
--- a/stockton-skeleton/src/buffers/staged.rs
+++ b/stockton-skeleton/src/buffers/staged.rs
@@ -1,7 +1,11 @@
//! A buffer that can be written to by the CPU using staging memory
-use super::{create_buffer, ModifiableBuffer};
-use crate::types::*;
+use crate::{
+ context::RenderingContext,
+ error::LockPoisoned,
+ mem::{Block, MappableBlock, MemoryPool},
+ types::*,
+};
use core::mem::{size_of, ManuallyDrop};
use std::{
@@ -10,72 +14,88 @@ use std::{
};
use anyhow::{Context, Result};
-use hal::{
- buffer::Usage,
- command::BufferCopy,
- memory::{Properties, Segment},
-};
+use hal::{buffer::Usage, command::BufferCopy, memory::SparseFlags};
-/// A GPU buffer that is written to using a staging buffer
-pub struct StagedBuffer<'a, T: Sized> {
+/// A GPU buffer that is written to using a staging buffer. The staging buffer and the GPU buffers are the same size,
+/// so this isn't optimal in a lot of cases.
+pub struct StagedBuffer<'a, T: Sized, P: MemoryPool, SP: MemoryPool> {
/// CPU-visible buffer
staged_buffer: ManuallyDrop<BufferT>,
/// CPU-visible memory
- staged_memory: ManuallyDrop<MemoryT>,
+ staged_memory: ManuallyDrop<SP::Block>,
/// GPU Buffer
buffer: ManuallyDrop<BufferT>,
/// GPU Memory
- memory: ManuallyDrop<MemoryT>,
+ memory: ManuallyDrop<P::Block>,
/// Where staged buffer is mapped in CPU memory
staged_mapped_memory: &'a mut [T],
/// The highest index in the buffer that's been written to.
- pub highest_used: usize,
+ highest_used: usize,
}
-impl<'a, T: Sized> StagedBuffer<'a, T> {
- /// size is the size in T
- pub fn new(device: &mut DeviceT, adapter: &Adapter, usage: Usage, size: u64) -> Result<Self> {
+impl<'a, T, P, SP> StagedBuffer<'a, T, P, SP>
+where
+ T: Sized,
+ P: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
+ /// Create an new staged buffer from the given rendering context.
+ /// `size` is the size in T. The GPU buffer's usage will be `usage | Usage::TRANSFER_DST` and the staging buffer's usage will be `Usage::TRANSFER_SRC`.
+ pub fn from_context(context: &mut RenderingContext, usage: Usage, size: u64) -> Result<Self> {
// Convert size to bytes
let size_bytes = size * size_of::<T>() as u64;
- // Get CPU-visible buffer
- let (staged_buffer, mut staged_memory) = create_buffer(
- device,
- adapter,
- Usage::TRANSFER_SRC,
- Properties::CPU_VISIBLE,
- size_bytes,
- )
- .context("Error creating staging buffer")?;
-
- // Get GPU Buffer
- let (buffer, memory) = create_buffer(
- device,
- adapter,
- Usage::TRANSFER_DST | usage,
- Properties::DEVICE_LOCAL | Properties::COHERENT,
- size_bytes,
- )
- .context("Error creating GPU buffer")?;
-
- // Map it somewhere and get a slice to that memory
+ // Make sure our memory pools exist
+ context.ensure_memory_pool::<P>()?;
+ context.ensure_memory_pool::<SP>()?;
+
+ // Lock the device and memory pools
+ let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let mut mempool = context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+ let mut staging_mempool = context
+ .existing_memory_pool::<SP>()
+ .unwrap()
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
+ // Staging buffer
+ let (staged_buffer, mut staged_memory) = unsafe {
+ create_buffer(
+ &mut device,
+ size_bytes,
+ Usage::TRANSFER_SRC,
+ &mut *staging_mempool,
+ )
+ .context("Error creating staging buffer")?
+ };
+
+ // GPU Buffer
+ let (buffer, memory) = unsafe {
+ create_buffer(
+ &mut device,
+ size_bytes,
+ usage | Usage::TRANSFER_DST,
+ &mut *mempool,
+ )
+ .context("Error creating GPU buffer")?
+ };
+
+ // Map the staging buffer somewhere
let staged_mapped_memory = unsafe {
- let ptr = device
- .map_memory(
- &mut staged_memory,
- Segment {
- offset: 0,
- size: Some(size_bytes),
- },
- )
- .context("Error mapping staged memory")?;
-
- std::slice::from_raw_parts_mut(ptr as *mut T, size.try_into()?)
+ std::slice::from_raw_parts_mut(
+ std::mem::transmute(staged_memory.map(&mut device, 0..size_bytes)?),
+ size.try_into()?,
+ )
};
Ok(StagedBuffer {
@@ -88,26 +108,39 @@ impl<'a, T: Sized> StagedBuffer<'a, T> {
})
}
- /// Call this before dropping
- pub(crate) fn deactivate(mut self, device: &mut DeviceT) {
+ /// Destroy all Vulkan objects. Should be called before dropping.
+ pub fn deactivate(mut self, context: &mut RenderingContext) {
unsafe {
- device.unmap_memory(&mut self.staged_memory);
+ let device = &mut *context.device().write().unwrap();
- device.free_memory(ManuallyDrop::take(&mut self.staged_memory));
- device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer));
+ self.staged_memory.unmap(device).unwrap();
+
+ context
+ .existing_memory_pool::<SP>()
+ .unwrap()
+ .write()
+ .unwrap()
+ .free(device, ManuallyDrop::take(&mut self.staged_memory));
- device.free_memory(ManuallyDrop::take(&mut self.memory));
+ context
+ .existing_memory_pool::<P>()
+ .unwrap()
+ .write()
+ .unwrap()
+ .free(device, ManuallyDrop::take(&mut self.memory));
+
+ device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer));
device.destroy_buffer(ManuallyDrop::take(&mut self.buffer));
};
}
-}
-impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> {
- fn get_buffer(&mut self) -> &BufferT {
+ /// Get a handle to the underlying GPU buffer
+ pub fn get_buffer(&mut self) -> &BufferT {
&self.buffer
}
- fn record_commit_cmds(&mut self, buf: &mut CommandBufferT) -> Result<()> {
+ /// Record the command(s) required to commit changes to this buffer to the given command buffer.
+ pub fn record_commit_cmds(&mut self, buf: &mut CommandBufferT) -> Result<()> {
unsafe {
buf.copy_buffer(
&self.staged_buffer,
@@ -122,9 +155,35 @@ impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> {
Ok(())
}
+
+ /// Get the highest byte in this buffer that's been written to (by the CPU)
+ pub fn highest_used(&self) -> usize {
+ self.highest_used
+ }
+}
+
+/// Used internally to create a buffer from a memory pool
+unsafe fn create_buffer<P: MemoryPool>(
+ device: &mut DeviceT,
+ size: u64,
+ usage: Usage,
+ mempool: &mut P,
+) -> Result<(BufferT, P::Block)> {
+ let mut buffer = device
+ .create_buffer(size, usage, SparseFlags::empty())
+ .context("Error creating buffer")?;
+ let req = device.get_buffer_requirements(&buffer);
+
+ let (memory, _) = mempool.alloc(device, size, req.alignment)?;
+
+ device
+ .bind_buffer_memory(memory.memory(), 0, &mut buffer)
+ .context("Error binding memory to buffer")?;
+
+ Ok((buffer, memory))
}
-impl<'a, T: Sized> Index<usize> for StagedBuffer<'a, T> {
+impl<'a, T: Sized, P: MemoryPool, SP: MemoryPool> Index<usize> for StagedBuffer<'a, T, P, SP> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
@@ -132,7 +191,7 @@ impl<'a, T: Sized> Index<usize> for StagedBuffer<'a, T> {
}
}
-impl<'a, T: Sized> IndexMut<usize> for StagedBuffer<'a, T> {
+impl<'a, T: Sized, P: MemoryPool, SP: MemoryPool> IndexMut<usize> for StagedBuffer<'a, T, P, SP> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
if index > self.highest_used {
self.highest_used = index;
diff --git a/stockton-skeleton/src/buffers/staging.rs b/stockton-skeleton/src/buffers/staging.rs
new file mode 100644
index 0000000..5c80f51
--- /dev/null
+++ b/stockton-skeleton/src/buffers/staging.rs
@@ -0,0 +1,103 @@
+//! A buffer that can be written to by the CPU
+
+use crate::{
+ context::RenderingContext,
+ error::LockPoisoned,
+ mem::{Block, MappableBlock, MemoryPool},
+ types::*,
+};
+
+use std::{mem::ManuallyDrop, ops::Range};
+
+use anyhow::{Context, Result};
+use hal::{buffer::Usage, memory::SparseFlags};
+
+/// A buffer that can be written to by the CPU. Usage will be `Usage::TRANSFER_SRC`.
+pub struct StagingBuffer<P: MemoryPool> {
+ buf: ManuallyDrop<BufferT>,
+ mem: ManuallyDrop<P::Block>,
+}
+
+impl<P> StagingBuffer<P>
+where
+ P: MemoryPool,
+ P::Block: MappableBlock,
+{
+ /// Create a new staging buffer from the given RenderingContext. `size` is in bytes.
+ pub fn from_context(context: &mut RenderingContext, size: u64) -> Result<Self> {
+ context.ensure_memory_pool::<P>()?;
+
+ let mut device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let mut mempool = context
+ .existing_memory_pool()
+ .unwrap()
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
+ Self::from_device_pool(&mut device, &mut mempool, size)
+ }
+
+ /// Create a new staging buffer from the given device and memory pool. `size` is in bytes.
+ pub fn from_device_pool(device: &mut DeviceT, mempool: &mut P, size: u64) -> Result<Self> {
+ let mut buffer =
+ unsafe { device.create_buffer(size, Usage::TRANSFER_SRC, SparseFlags::empty()) }
+ .context("Error creating buffer")?;
+
+ let requirements = unsafe { device.get_buffer_requirements(&buffer) };
+
+ let (memory, _) = mempool
+ .alloc(device, requirements.size, requirements.alignment)
+ .context("Error allocating staging memory")?;
+
+ unsafe { device.bind_buffer_memory(memory.memory(), memory.range().start, &mut buffer) }
+ .context("Error binding staging memory to buffer")?;
+
+ Ok(StagingBuffer {
+ buf: ManuallyDrop::new(buffer),
+ mem: ManuallyDrop::new(memory),
+ })
+ }
+
+ /// Map the given range to CPU-visible memory, returning a pointer to the start of that range.
+ /// inner_range is local to this block of memory, not to the container as a whole.
+ pub fn map(&mut self, device: &mut DeviceT, inner_range: Range<u64>) -> Result<*mut u8> {
+ Ok(<<P as MemoryPool>::Block>::map(
+ &mut *self.mem,
+ device,
+ inner_range,
+ )?)
+ }
+
+ /// Remove any mappings present for this staging buffer.
+ pub unsafe fn unmap(&mut self, device: &mut DeviceT) -> Result<()> {
+ self.mem.unmap(device)
+ }
+
+ pub fn deactivate_context(self, context: &mut RenderingContext) {
+ let mut device = context.device().write().unwrap();
+ let mut mempool = context.existing_memory_pool().unwrap().write().unwrap();
+
+ self.deactivate_device_pool(&mut device, &mut mempool)
+ }
+
+ /// Destroy all vulkan objects. This should be called before dropping
+ pub fn deactivate_device_pool(self, device: &mut DeviceT, mempool: &mut P) {
+ unsafe {
+ use std::ptr::read;
+ // Destroy buffer
+ device.destroy_buffer(read(&*self.buf));
+ // Free memory
+ mempool.free(device, read(&*self.mem));
+ }
+ }
+
+ /// Get a reference to the staging buffer's memory.
+ pub fn mem(&self) -> &P::Block {
+ &self.mem
+ }
+
+ /// Get a reference to the staging buffer.
+ pub fn buf(&self) -> &ManuallyDrop<BufferT> {
+ &self.buf
+ }
+}
diff --git a/stockton-skeleton/src/builders/pipeline.rs b/stockton-skeleton/src/builders/pipeline.rs
index f68d9d6..9f4937f 100644
--- a/stockton-skeleton/src/builders/pipeline.rs
+++ b/stockton-skeleton/src/builders/pipeline.rs
@@ -1,5 +1,7 @@
use super::{renderpass::RenderpassSpec, shader::ShaderDesc};
-use crate::{error::EnvironmentError, target::SwapchainProperties, types::*};
+use crate::{
+ error::EnvironmentError, target::SwapchainProperties, types::*, utils::get_pixel_size,
+};
use std::{mem::ManuallyDrop, ops::Range};
@@ -33,23 +35,15 @@ impl VertexBufferSpec {
format: *format,
},
});
- offset += get_size(*format);
+ offset += get_pixel_size(*format);
}
v
}
pub fn stride(&self) -> ElemStride {
- self.attributes.iter().fold(0, |x, f| x + get_size(*f))
- }
-}
-
-fn get_size(f: Format) -> u32 {
- match f {
- Format::Rgb32Sfloat => 4 * 3,
- Format::R32Sint => 4,
- Format::Rg32Sfloat => 4 * 2,
- Format::Rgba32Sfloat => 4 * 4,
- _ => unimplemented!("dont know size of format {:?}", f),
+ self.attributes
+ .iter()
+ .fold(0, |x, f| x + get_pixel_size(*f))
}
}
diff --git a/stockton-skeleton/src/context.rs b/stockton-skeleton/src/context.rs
index 5e2d3a3..be627f2 100644
--- a/stockton-skeleton/src/context.rs
+++ b/stockton-skeleton/src/context.rs
@@ -2,13 +2,15 @@
//! This relies on draw passes for the actual drawing logic.
use std::{
+ any::{Any, TypeId},
+ collections::HashMap,
mem::ManuallyDrop,
ptr::read,
sync::{Arc, RwLock},
};
use anyhow::{Context, Result};
-use hal::pool::CommandPoolCreateFlags;
+use hal::{pool::CommandPoolCreateFlags, PhysicalDeviceProperties};
use log::debug;
use winit::window::Window;
@@ -21,16 +23,15 @@ use super::{
use crate::{
draw_passes::Singular,
error::{EnvironmentError, LockPoisoned},
+ mem::MemoryPool,
types::*,
};
use stockton_types::Session;
-/// Contains all the hal related stuff.
-/// In the end, this takes in a depth-sorted list of faces and a map file and renders them.
-// TODO: Settings for clear colour, buffer sizes, etc
+/// Contains most root vulkan objects, and some precalculated info such as best formats to use.
+/// In most cases, this and the DrawPass should contain all vulkan objects present.
pub struct RenderingContext {
- // Parents for most of these things
/// Vulkan Instance
instance: ManuallyDrop<back::Instance>,
@@ -40,6 +41,9 @@ pub struct RenderingContext {
/// Adapter we're using
adapter: Adapter,
+ /// The properties of the physical device we're using
+ physical_device_properties: PhysicalDeviceProperties,
+
/// Swapchain and stuff
target_chain: ManuallyDrop<TargetChain>,
@@ -47,12 +51,17 @@ pub struct RenderingContext {
/// The command pool used for our buffers
cmd_pool: ManuallyDrop<CommandPoolT>,
+ /// The queue negotiator to use
queue_negotiator: QueueNegotiator,
/// The queue to use for drawing
queue: Arc<RwLock<QueueT>>,
+ /// Number of pixels per standard point
pixels_per_point: f32,
+
+ /// The list of memory pools
+ memory_pools: HashMap<TypeId, Box<dyn Any>>,
}
impl RenderingContext {
@@ -164,6 +173,7 @@ impl RenderingContext {
instance: ManuallyDrop::new(instance),
device: device_lock,
+ physical_device_properties: adapter.physical_device.properties(),
adapter,
queue_negotiator,
@@ -172,8 +182,8 @@ impl RenderingContext {
target_chain: ManuallyDrop::new(target_chain),
cmd_pool: ManuallyDrop::new(cmd_pool),
- // pixels_per_point: window.scale_factor() as f32,
pixels_per_point: window.scale_factor() as f32,
+ memory_pools: HashMap::new(),
})
}
@@ -259,6 +269,36 @@ impl RenderingContext {
pub fn queue_negotiator_mut(&mut self) -> &mut QueueNegotiator {
&mut self.queue_negotiator
}
+
+ /// Get a reference to the physical device's properties.
+ pub fn physical_device_properties(&self) -> &PhysicalDeviceProperties {
+ &self.physical_device_properties
+ }
+
+ /// Get the specified memory pool, lazily initialising it if it's not yet present
+ pub fn memory_pool<'a, P: MemoryPool>(&'a mut self) -> Result<&'a Arc<RwLock<P>>> {
+ self.ensure_memory_pool::<P>()?;
+ Ok(self.existing_memory_pool::<P>().unwrap())
+ }
+
+ /// Ensure the specified memory pool is initialised.
+ pub fn ensure_memory_pool<P: MemoryPool>(&mut self) -> Result<()> {
+ let tid = TypeId::of::<P>();
+ if !self.memory_pools.contains_key(&tid) {
+ self.memory_pools
+ .insert(tid, Box::new(P::from_context(self)?));
+ }
+ Ok(())
+ }
+
+ /// Get the specified memory pool, returning None if it's not yet present
+ /// You should only use this when you're certain it exists, such as when freeing memory
+ /// allocated from that pool
+ pub fn existing_memory_pool<P: MemoryPool>(&self) -> Option<&Arc<RwLock<P>>> {
+ self.memory_pools
+ .get(&TypeId::of::<P>())
+ .map(|x| x.downcast_ref().unwrap())
+ }
}
impl core::ops::Drop for RenderingContext {
@@ -267,6 +307,8 @@ impl core::ops::Drop for RenderingContext {
self.device.write().unwrap().wait_idle().unwrap();
}
+ // TODO: Better deactivation code
+
unsafe {
let mut device = self.device.write().unwrap();
diff --git a/stockton-skeleton/src/error.rs b/stockton-skeleton/src/error.rs
index 1f57892..6d7e7ad 100644
--- a/stockton-skeleton/src/error.rs
+++ b/stockton-skeleton/src/error.rs
@@ -15,6 +15,9 @@ pub enum LockPoisoned {
#[error("Other lock poisoned")]
Other,
+
+ #[error("Memory pool lock poisoned")]
+ MemoryPool,
}
/// Indicates the given property has no acceptable values
@@ -43,6 +46,15 @@ pub enum EnvironmentError {
#[error("No suitable queues")]
NoQueues,
+
+ #[error("Memory pool missing")]
+ MemoryPoolMissing,
+}
+
+#[derive(Debug, Error)]
+pub enum UsageError {
+ #[error("Attempt to create mappable memory block from non-mappable memory")]
+ NonMappableMemory,
}
/// Indicates an issue with the level object being used
diff --git a/stockton-skeleton/src/lib.rs b/stockton-skeleton/src/lib.rs
index 3212873..785fb30 100644
--- a/stockton-skeleton/src/lib.rs
+++ b/stockton-skeleton/src/lib.rs
@@ -11,6 +11,7 @@ pub mod builders;
pub mod context;
pub mod draw_passes;
pub mod error;
+pub mod mem;
pub mod queue_negotiator;
mod target;
pub mod texture;
diff --git a/stockton-skeleton/src/mem.rs b/stockton-skeleton/src/mem.rs
new file mode 100644
index 0000000..af0a42b
--- /dev/null
+++ b/stockton-skeleton/src/mem.rs
@@ -0,0 +1,368 @@
+//! Used to represent access different memory 'pools'.
+//! Ideally, each pool is optimised for a specific use case.
+//! You can implement your own pools using whatever algorithm you'd like. You just need to implement [`MemoryPool`] and optionally [`Block`], then access it
+//! using [`RenderingContext.pool_allocator`]
+//! Alternatively, some default memory pools are availble when the feature `rendy_pools` is used (on by default).
+
+use crate::{context::RenderingContext, types::*};
+
+use std::{
+ ops::Range,
+ sync::{Arc, RwLock},
+};
+
+use anyhow::Result;
+use hal::memory::Properties;
+
+/// An allocator whose memory and allocation pattern is optimised for a specific use case.
+pub trait MemoryPool: Send + Sync + 'static {
+ /// The block returned by this pool
+ type Block: Block + Send + Sync;
+
+ /// Create a new memory pool from the given context
+ /// This is called to lazily initialise the memory pool when it is first requested.
+ /// It can do any sort of filtering on memory types required.
+ fn from_context(context: &RenderingContext) -> Result<Arc<RwLock<Self>>>;
+
+ /// Allocate block of memory.
+ /// On success returns allocated block and amount of memory consumed from device.
+ /// The returned block must not overlap with any other allocated block, the start of it must be `0 mod(align)`,
+ /// and it must be at least `size` bytes.
+ fn alloc(&mut self, device: &DeviceT, size: u64, align: u64) -> Result<(Self::Block, u64)>;
+
+ /// Free block of memory.
+ /// Returns amount of memory returned to the device.
+ /// If the given block was not allocated from this pool, this should be a no-op and should return 0.
+ fn free(&mut self, device: &DeviceT, block: Self::Block) -> u64;
+
+ /// Deactivate this memory pool, freeing any allocated memory objects.
+ fn deactivate(self, context: &mut RenderingContext);
+}
+
+/// Block that owns a `Range` of the `Memory`.
+/// Provides access to safe memory range mapping.
+pub trait Block {
+ /// Get memory properties of the block.
+ fn properties(&self) -> Properties;
+
+ /// Get raw memory object.
+ fn memory(&self) -> &MemoryT;
+
+ /// Get memory range owned by this block.
+ fn range(&self) -> Range<u64>;
+
+ /// Get size of the block.
+ fn size(&self) -> u64 {
+ let range = self.range();
+ range.end - range.start
+ }
+}
+
+/// An additional trait for [`Block`]s that can be mapped to CPU-visible memory.
+///
+/// This should only be implemented for blocks that are *guaranteed* to be visible to the CPU
+/// and may panic if this is not the case.
+pub trait MappableBlock: Block {
+ /// Attempt to map this block to CPU-visible memory.
+ /// `inner_range` is counted from only inside this block, not the wider memory object this block is a part of
+ fn map(&mut self, device: &mut DeviceT, inner_range: Range<u64>) -> Result<*mut u8>;
+
+ /// Unmap this block from CPU-visible memory.
+ /// If this block is not mapped, this should be a no-op.
+ /// Implementors should ensure that this does not accidentally unmap other blocks using the same memory block.
+ fn unmap(&mut self, device: &mut DeviceT) -> Result<()>;
+}
+
+#[cfg(feature = "rendy-pools")]
+mod rendy {
+ use super::*;
+
+ use crate::{
+ error::{EnvironmentError, LockPoisoned, UsageError},
+ utils::find_memory_type_id,
+ };
+
+ use anyhow::{anyhow, Context, Result};
+ use hal::{
+ format::Format,
+ memory::{Properties as MemProps, SparseFlags},
+ };
+ use rendy_memory::{Allocator, Block as RBlock, DynamicAllocator, DynamicBlock, DynamicConfig};
+
+ /// So we can use rendy blocks as our blocks
+ impl<T: RBlock<back::Backend>> Block for T {
+ fn properties(&self) -> Properties {
+ <T as RBlock<back::Backend>>::properties(&self)
+ }
+
+ fn memory(&self) -> &MemoryT {
+ <T as RBlock<back::Backend>>::memory(&self)
+ }
+
+ fn range(&self) -> Range<u64> {
+ <T as RBlock<back::Backend>>::range(&self)
+ }
+ }
+
+ /// Intended to be used for textures.
+ /// The allocated memory is guaranteed to be suitable for any colour image with optimal tiling and no extra sparse flags or view capabilities.
+ pub struct TexturesPool(DynamicAllocator<back::Backend>);
+ impl MemoryPool for TexturesPool {
+ type Block = DynamicBlock<back::Backend>;
+
+ fn alloc(&mut self, device: &DeviceT, size: u64, align: u64) -> Result<(Self::Block, u64)> {
+ Ok(self.0.alloc(device, size, align)?)
+ }
+
+ fn free(&mut self, device: &DeviceT, block: Self::Block) -> u64 {
+ self.0.free(device, block)
+ }
+
+ fn from_context(context: &RenderingContext) -> Result<Arc<RwLock<Self>>> {
+ let type_mask = unsafe {
+ use hal::image::{Kind, Tiling, Usage, ViewCapabilities};
+
+ // We create an empty image with the same format as used for textures
+ // this is to get the type_mask required, which will stay the same for
+ // all colour images of the same tiling. (certain memory flags excluded).
+
+ // Size and alignment don't necessarily stay the same, so we're forced to
+ // guess at the alignment for our allocator.
+ let device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let img = device
+ .create_image(
+ Kind::D2(16, 16, 1, 1),
+ 1,
+ Format::Rgba8Srgb,
+ Tiling::Optimal,
+ Usage::SAMPLED,
+ SparseFlags::empty(),
+ ViewCapabilities::empty(),
+ )
+ .context("Error creating test image to get buffer settings")?;
+
+ let type_mask = device.get_image_requirements(&img).type_mask;
+
+ device.destroy_image(img);
+
+ type_mask
+ };
+
+ let allocator = {
+ let props = MemProps::DEVICE_LOCAL;
+
+ DynamicAllocator::new(
+ find_memory_type_id(context.adapter(), type_mask, props)
+ .ok_or(EnvironmentError::NoMemoryTypes)?,
+ props,
+ DynamicConfig {
+ block_size_granularity: 4 * 32 * 32, // 32x32 image
+ max_chunk_size: u64::pow(2, 63),
+ min_device_allocation: 4 * 32 * 32,
+ },
+ context
+ .physical_device_properties()
+ .limits
+ .non_coherent_atom_size as u64,
+ )
+ };
+
+ Ok(Arc::new(RwLock::new(Self(allocator))))
+ }
+
+ fn deactivate(self, _context: &mut RenderingContext) {
+ self.0.dispose();
+ }
+ }
+
+ /// Used for depth buffers.
+ /// Memory returned is guaranteed to be suitable for any image using `context.target_chain().properties().depth_format` with optimal tiling, and no sparse flags or view capabilities.
+ pub struct DepthBufferPool(DynamicAllocator<back::Backend>);
+ impl MemoryPool for DepthBufferPool {
+ type Block = DynamicBlock<back::Backend>;
+
+ fn alloc(&mut self, device: &DeviceT, size: u64, align: u64) -> Result<(Self::Block, u64)> {
+ Ok(self.0.alloc(device, size, align)?)
+ }
+
+ fn free(&mut self, device: &DeviceT, block: Self::Block) -> u64 {
+ self.0.free(device, block)
+ }
+
+ fn from_context(context: &RenderingContext) -> Result<Arc<RwLock<Self>>> {
+ let type_mask = unsafe {
+ use hal::image::{Kind, Tiling, Usage, ViewCapabilities};
+
+ let device = context.device().write().map_err(|_| LockPoisoned::Device)?;
+ let img = device
+ .create_image(
+ Kind::D2(16, 16, 1, 1),
+ 1,
+ context.target_chain().properties().depth_format,
+ Tiling::Optimal,
+ Usage::SAMPLED,
+ SparseFlags::empty(),
+ ViewCapabilities::empty(),
+ )
+ .context("Error creating test image to get buffer settings")?;
+
+ let type_mask = device.get_image_requirements(&img).type_mask;
+
+ device.destroy_image(img);
+
+ type_mask
+ };
+
+ let allocator = {
+ let props = MemProps::DEVICE_LOCAL;
+
+ DynamicAllocator::new(
+ find_memory_type_id(context.adapter(), type_mask, props)
+ .ok_or(EnvironmentError::NoMemoryTypes)?,
+ props,
+ DynamicConfig {
+ block_size_granularity: 4 * 32 * 32, // 32x32 image
+ max_chunk_size: u64::pow(2, 63),
+ min_device_allocation: 4 * 32 * 32,
+ },
+ context
+ .physical_device_properties()
+ .limits
+ .non_coherent_atom_size as u64,
+ )
+ };
+
+ Ok(Arc::new(RwLock::new(Self(allocator))))
+ }
+
+ fn deactivate(self, _context: &mut RenderingContext) {
+ self.0.dispose()
+ }
+ }
+
+ /// Used for staging buffers
+ pub struct StagingPool(DynamicAllocator<back::Backend>);
+ impl MemoryPool for StagingPool {
+ type Block = MappableRBlock<DynamicBlock<back::Backend>>;
+
+ fn alloc(&mut self, device: &DeviceT, size: u64, align: u64) -> Result<(Self::Block, u64)> {
+ let (b, size) = self.0.alloc(device, size, align)?;
+ Ok((MappableRBlock::new_unchecked(b), size))
+ }
+
+ fn free(&mut self, device: &DeviceT, block: Self::Block) -> u64 {
+ self.0.free(device, block.0)
+ }
+
+ fn from_context(context: &RenderingContext) -> Result<Arc<RwLock<Self>>> {
+ let allocator = {
+ let props = MemProps::CPU_VISIBLE | MemProps::COHERENT;
+ let t = find_memory_type_id(context.adapter(), u32::MAX, props)
+ .ok_or(EnvironmentError::NoMemoryTypes)?;
+ DynamicAllocator::new(
+ t,
+ props,
+ DynamicConfig {
+ block_size_granularity: 4 * 32 * 32, // 32x32 image
+ max_chunk_size: u64::pow(2, 63),
+ min_device_allocation: 4 * 32 * 32,
+ },
+ context
+ .physical_device_properties()
+ .limits
+ .non_coherent_atom_size as u64,
+ )
+ };
+
+ Ok(Arc::new(RwLock::new(StagingPool(allocator))))
+ }
+
+ fn deactivate(self, _context: &mut RenderingContext) {
+ self.0.dispose()
+ }
+ }
+
+ /// Suitable for input data, such as vertices and indices.
+ pub struct DataPool(DynamicAllocator<back::Backend>);
+ impl MemoryPool for DataPool {
+ type Block = DynamicBlock<back::Backend>;
+
+ fn alloc(&mut self, device: &DeviceT, size: u64, align: u64) -> Result<(Self::Block, u64)> {
+ Ok(self.0.alloc(device, size, align)?)
+ }
+
+ fn free(&mut self, device: &DeviceT, block: Self::Block) -> u64 {
+ self.0.free(device, block)
+ }
+
+ fn from_context(context: &RenderingContext) -> Result<Arc<RwLock<Self>>> {
+ let allocator = {
+ let props = MemProps::CPU_VISIBLE | MemProps::COHERENT;
+ let t = find_memory_type_id(context.adapter(), u32::MAX, props)
+ .ok_or(EnvironmentError::NoMemoryTypes)?;
+ DynamicAllocator::new(
+ t,
+ props,
+ DynamicConfig {
+ block_size_granularity: 4 * 4 * 128, // 128 f32 XYZ[?] vertices
+ max_chunk_size: u64::pow(2, 63),
+ min_device_allocation: 4 * 4 * 128,
+ },
+ context
+ .physical_device_properties()
+ .limits
+ .non_coherent_atom_size as u64,
+ )
+ };
+
+ Ok(Arc::new(RwLock::new(DataPool(allocator))))
+ }
+
+ fn deactivate(self, _context: &mut RenderingContext) {
+ self.0.dispose()
+ }
+ }
+
+ /// A rendy memory block that is guaranteed to be CPU visible.
+ pub struct MappableRBlock<B: RBlock<back::Backend>>(B);
+ impl<B: RBlock<back::Backend>> MappableRBlock<B> {
+ /// Create a new mappable memory block, returning an error if the block is not CPU visible
+ pub fn new(block: B) -> Result<Self> {
+ if !block.properties().contains(MemProps::CPU_VISIBLE) {
+ return Err(anyhow!(UsageError::NonMappableMemory));
+ }
+ Ok(Self::new_unchecked(block))
+ }
+
+ /// Create a new mappable memory block, without checking if the block is CPU visible.
+ pub fn new_unchecked(block: B) -> Self {
+ Self(block)
+ }
+ }
+
+ impl<B: RBlock<back::Backend>> Block for MappableRBlock<B> {
+ fn properties(&self) -> MemProps {
+ self.0.properties()
+ }
+
+ fn memory(&self) -> &MemoryT {
+ self.0.memory()
+ }
+
+ fn range(&self) -> Range<u64> {
+ self.0.range()
+ }
+ }
+ impl<B: RBlock<back::Backend>> MappableBlock for MappableRBlock<B> {
+ fn map(&mut self, device: &mut DeviceT, inner_range: Range<u64>) -> Result<*mut u8> {
+ unsafe { Ok(self.0.map(device, inner_range)?.ptr().as_mut()) }
+ }
+
+ fn unmap(&mut self, device: &mut DeviceT) -> Result<()> {
+ Ok(self.0.unmap(device))
+ }
+ }
+}
+
+#[cfg(feature = "rendy-pools")]
+pub use rendy::*;
diff --git a/stockton-skeleton/src/texture/block.rs b/stockton-skeleton/src/texture/block.rs
index 5ac3a94..1b195c2 100644
--- a/stockton-skeleton/src/texture/block.rs
+++ b/stockton-skeleton/src/texture/block.rs
@@ -1,21 +1,22 @@
use super::{loader::BlockRef, repo::BLOCK_SIZE};
-use crate::types::*;
+use crate::{buffers::image::SampledImage, mem::MemoryPool, types::*};
use arrayvec::ArrayVec;
-use rendy_memory::{Allocator, Block};
use std::{iter::once, mem::ManuallyDrop};
-pub struct TexturesBlock<B: Block<back::Backend>> {
+/// A block of loaded textures
+pub struct TexturesBlock<TP: MemoryPool> {
pub id: BlockRef,
pub descriptor_set: ManuallyDrop<RDescriptorSet>,
- pub imgs: ArrayVec<[LoadedImage<B>; BLOCK_SIZE]>,
+ pub imgs: ArrayVec<[SampledImage<TP>; BLOCK_SIZE]>,
}
-impl<B: Block<back::Backend>> TexturesBlock<B> {
- pub fn deactivate<T: Allocator<back::Backend, Block = B>>(
+impl<TP: MemoryPool> TexturesBlock<TP> {
+ /// Destroy all Vulkan objects. Must be called before dropping.
+ pub fn deactivate(
mut self,
device: &mut DeviceT,
- tex_alloc: &mut T,
+ tex_alloc: &mut TP,
desc_alloc: &mut DescriptorAllocator,
) {
unsafe {
@@ -27,36 +28,8 @@ impl<B: Block<back::Backend>> TexturesBlock<B> {
// Images
self.imgs
.drain(..)
- .map(|x| x.deactivate(device, tex_alloc))
+ .map(|x| x.deactivate_with_device_pool(device, tex_alloc))
.for_each(|_| {});
}
}
}
-
-pub struct LoadedImage<B: Block<back::Backend>> {
- pub mem: ManuallyDrop<B>,
- pub img: ManuallyDrop<ImageT>,
- pub img_view: ManuallyDrop<ImageViewT>,
- pub sampler: ManuallyDrop<SamplerT>,
- pub row_size: usize,
- pub height: u32,
- pub width: u32,
-}
-
-impl<B: Block<back::Backend>> LoadedImage<B> {
- pub fn deactivate<T: Allocator<back::Backend, Block = B>>(
- self,
- device: &mut DeviceT,
- alloc: &mut T,
- ) {
- unsafe {
- use std::ptr::read;
-
- device.destroy_image_view(read(&*self.img_view));
- device.destroy_image(read(&*self.img));
- device.destroy_sampler(read(&*self.sampler));
-
- alloc.free(device, read(&*self.mem));
- }
- }
-}
diff --git a/stockton-skeleton/src/texture/load.rs b/stockton-skeleton/src/texture/load.rs
index 1f33ad5..6cb4f4d 100644
--- a/stockton-skeleton/src/texture/load.rs
+++ b/stockton-skeleton/src/texture/load.rs
@@ -1,31 +1,31 @@
-use super::{
- block::LoadedImage, block::TexturesBlock, repo::BLOCK_SIZE, resolver::TextureResolver,
- staging_buffer::StagingBuffer, LoadableImage, PIXEL_SIZE,
+use std::sync::{Arc, RwLock};
+
+use super::{block::TexturesBlock, repo::BLOCK_SIZE, resolver::TextureResolver, LoadableImage};
+use crate::{
+ buffers::{
+ image::{ImageSpec, SampledImage, COLOR_RESOURCES},
+ staging::StagingBuffer,
+ },
+ error::LockPoisoned,
+ mem::{Block, MappableBlock, MemoryPool},
+ types::*,
};
-use crate::types::*;
use anyhow::{Context, Result};
use arrayvec::ArrayVec;
use hal::{
- format::{Aspects, Format, Swizzle},
+ format::{Aspects, Format},
image::{
- Filter, SamplerDesc, SubresourceLayers, SubresourceRange, Usage as ImgUsage, ViewKind,
- WrapMode,
+ Filter, SamplerDesc, SubresourceLayers, SubresourceRange, Usage as ImgUsage, WrapMode,
},
- memory::SparseFlags,
- MemoryTypeId,
};
-use rendy_memory::{Allocator, Block};
-use std::mem::ManuallyDrop;
use thiserror::Error;
-#[derive(Error, Debug)]
-pub enum TextureLoadError {
- #[error("No available resources")]
- NoResources,
-}
-
+/// The format used by the texture repo
+// TODO: This should be customisable.
pub const FORMAT: Format = Format::Rgba8Srgb;
+
+/// The resources used by each texture. ie one colour aspect
pub const RESOURCES: SubresourceRange = SubresourceRange {
aspects: Aspects::COLOR,
level_start: 0,
@@ -33,159 +33,112 @@ pub const RESOURCES: SubresourceRange = SubresourceRange {
layer_start: 0,
layer_count: Some(1),
};
+
+/// The layers used by each texture. ie one colour layer
pub const LAYERS: SubresourceLayers = SubresourceLayers {
aspects: Aspects::COLOR,
level: 0,
layers: 0..1,
};
+/// Configuration required to load a texture
pub struct TextureLoadConfig<R: TextureResolver> {
+ /// The resolver to use
pub resolver: R,
+
+ /// How to sample the image
pub filter: Filter,
+
+ /// How to deal with texture coordinates outside the image.
pub wrap_mode: WrapMode,
}
-pub struct QueuedLoad<B: Block<back::Backend>> {
+/// A texture load that has been queued, and is finished when the fence triggers.
+pub struct QueuedLoad<TP: MemoryPool, SP: MemoryPool> {
pub fence: FenceT,
pub buf: CommandBufferT,
- pub block: TexturesBlock<B>,
- pub staging_bufs: ArrayVec<[StagingBuffer; BLOCK_SIZE]>,
+ pub block: TexturesBlock<TP>,
+ pub staging_bufs: ArrayVec<[StagingBuffer<SP>; BLOCK_SIZE]>,
}
-impl<B: Block<back::Backend>> QueuedLoad<B> {
+impl<TP: MemoryPool, SP: MemoryPool> QueuedLoad<TP, SP> {
+ /// Break down into a tuple
pub fn dissolve(
self,
) -> (
(FenceT, CommandBufferT),
- ArrayVec<[StagingBuffer; BLOCK_SIZE]>,
- TexturesBlock<B>,
+ ArrayVec<[StagingBuffer<SP>; BLOCK_SIZE]>,
+ TexturesBlock<TP>,
) {
((self.fence, self.buf), self.staging_bufs, self.block)
}
}
-pub fn tex_size_info<T: LoadableImage>(img: &T, obcpa: hal::buffer::Offset) -> (usize, usize) {
- let initial_row_size = PIXEL_SIZE * img.width() as usize;
- let row_alignment_mask = obcpa as u32 - 1;
-
- let row_size = ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize;
- let total_size = (row_size * (img.height() as usize)) as u64;
- debug_assert!(row_size as usize >= initial_row_size);
-
- (row_size, total_size as usize)
-}
-
-pub fn create_image_view<T, I>(
+/// Create a SampledImage for the given LoadableImage, and load the image data into a StagingBuffer
+/// Note that this doesn't queue up transferring from the buffer to the image.
+pub unsafe fn load_image<I, R, SP, TP>(
device: &mut DeviceT,
- allocator: &mut T,
- format: Format,
- usage: ImgUsage,
- img: &I,
-) -> Result<(T::Block, ImageT)>
+ staging_allocator: &Arc<RwLock<SP>>,
+ tex_allocator: &Arc<RwLock<TP>>,
+ obcpa: u32,
+ img_data: I,
+ config: &TextureLoadConfig<R>,
+) -> Result<(StagingBuffer<SP>, SampledImage<TP>)>
where
- T: Allocator<back::Backend>,
I: LoadableImage,
+ R: TextureResolver,
+ SP: MemoryPool,
+ TP: MemoryPool,
+ SP::Block: MappableBlock,
{
- // Make the image
- let mut image_ref = unsafe {
- use hal::image::{Kind, Tiling, ViewCapabilities};
-
- device.create_image(
- Kind::D2(img.width(), img.height(), 1, 1),
- 1,
- format,
- Tiling::Optimal,
- usage,
- SparseFlags::empty(),
- ViewCapabilities::empty(),
- )
- }
- .context("Error creating image")?;
+ // Create sampled image
+ let sampled_image = {
+ let mut tex_allocator = tex_allocator
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
+ SampledImage::from_device_allocator(
+ device,
+ &mut *tex_allocator,
+ obcpa as u32,
+ &ImageSpec {
+ width: img_data.width(),
+ height: img_data.height(),
+ format: FORMAT,
+ usage: ImgUsage::TRANSFER_DST | ImgUsage::SAMPLED,
+ resources: COLOR_RESOURCES,
+ },
+ &SamplerDesc::new(config.filter, config.wrap_mode),
+ )?
+ };
- // Allocate memory
- let (block, _) = unsafe {
- let requirements = device.get_image_requirements(&image_ref);
-
- allocator.alloc(device, requirements.size, requirements.alignment)
- }
- .context("Error allocating memory")?;
-
- unsafe {
- device
- .bind_image_memory(block.memory(), block.range().start, &mut image_ref)
- .context("Error binding memory to image")?;
- }
+ // Create staging buffer
+ let total_size = sampled_image.bound_image().mem().size();
- Ok((block, image_ref))
-}
+ let mut staging_buffer = {
+ let mut staging_allocator = staging_allocator
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
-pub unsafe fn load_image<I: LoadableImage, R: TextureResolver>(
- device: &mut DeviceT,
- staging_allocator: &mut DynamicAllocator,
- tex_allocator: &mut DynamicAllocator,
- staging_memory_type: MemoryTypeId,
- obcpa: u64,
- img_data: I,
- config: &TextureLoadConfig<R>,
-) -> Result<(StagingBuffer, LoadedImage<DynamicBlock>)> {
- // Calculate buffer size
- let (row_size, total_size) = tex_size_info(&img_data, obcpa);
-
- // Create staging buffer
- let mut staging_buffer = StagingBuffer::new(
- device,
- staging_allocator,
- total_size as u64,
- staging_memory_type,
- )
- .context("Error creating staging buffer")?;
+ StagingBuffer::from_device_pool(device, &mut *staging_allocator, total_size as u64)
+ .context("Error creating staging buffer")?
+ };
// Write to staging buffer
let mapped_memory = staging_buffer
- .map_memory(device)
+ .map(device, 0..total_size)
.context("Error mapping staged memory")?;
- img_data.copy_into(mapped_memory, row_size);
-
- staging_buffer.unmap_memory(device);
-
- // Create image
- let (img_mem, img) = create_image_view(
- device,
- tex_allocator,
- FORMAT,
- ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST,
- &img_data,
- )
- .context("Error creating image")?;
-
- // Create image view
- let img_view = device
- .create_image_view(
- &img,
- ViewKind::D2,
- FORMAT,
- Swizzle::NO,
- ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST,
- RESOURCES,
- )
- .context("Error creating image view")?;
-
- // Create sampler
- let sampler = device
- .create_sampler(&SamplerDesc::new(config.filter, config.wrap_mode))
- .context("Error creating sampler")?;
-
- Ok((
- staging_buffer,
- LoadedImage {
- mem: ManuallyDrop::new(img_mem),
- img: ManuallyDrop::new(img),
- img_view: ManuallyDrop::new(img_view),
- sampler: ManuallyDrop::new(sampler),
- row_size,
- height: img_data.height(),
- width: img_data.width(),
- },
- ))
+ img_data.copy_into(mapped_memory, sampled_image.row_size() as usize);
+
+ staging_buffer.unmap(device)?;
+
+ Ok((staging_buffer, sampled_image))
+}
+
+/// Errors that can be encountered when loading a texture.
+#[derive(Error, Debug)]
+pub enum TextureLoadError {
+ #[error("No available resources")]
+ NoResources,
}
diff --git a/stockton-skeleton/src/texture/loader.rs b/stockton-skeleton/src/texture/loader.rs
index 5c85fd3..7f630ab 100644
--- a/stockton-skeleton/src/texture/loader.rs
+++ b/stockton-skeleton/src/texture/loader.rs
@@ -1,13 +1,23 @@
//! Manages the loading/unloading of textures
use super::{
- block::{LoadedImage, TexturesBlock},
- load::{load_image, QueuedLoad, TextureLoadConfig, TextureLoadError, LAYERS, RESOURCES},
+ block::TexturesBlock,
+ load::{
+ load_image, QueuedLoad, TextureLoadConfig, TextureLoadError, FORMAT, LAYERS, RESOURCES,
+ },
repo::BLOCK_SIZE,
resolver::TextureResolver,
PIXEL_SIZE,
};
-use crate::{error::LockPoisoned, types::*, utils::find_memory_type_id};
+use crate::{
+ buffers::image::SampledImage,
+ context::RenderingContext,
+ error::{EnvironmentError, LockPoisoned},
+ mem::{MappableBlock, MemoryPool},
+ queue_negotiator::QueueFamilySelector,
+ types::*,
+ utils::get_pixel_size,
+};
use std::{
array::IntoIter,
@@ -26,18 +36,14 @@ use anyhow::{Context, Result};
use arrayvec::ArrayVec;
use hal::{
command::{BufferImageCopy, CommandBufferFlags},
- format::{Aspects, Format},
+ format::Aspects,
image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange},
- memory::{Barrier, Dependencies, Properties as MemProps, SparseFlags},
+ memory::{Barrier, Dependencies},
pso::{Descriptor, DescriptorSetWrite, ImageDescriptorType, PipelineStage, ShaderStageFlags},
- queue::family::QueueFamilyId,
- MemoryTypeId,
};
use image::{Rgba, RgbaImage};
use log::*;
use rendy_descriptor::{DescriptorRanges, DescriptorSetLayoutBinding, DescriptorType};
-use rendy_memory::DynamicConfig;
-use thiserror::Error;
/// The number of command buffers to have in flight simultaneously.
pub const NUM_SIMULTANEOUS_CMDS: usize = 2;
@@ -47,9 +53,15 @@ pub type BlockRef = usize;
/// Manages the loading/unloading of textures
/// This is expected to load the textures, then send the loaded blocks back
-pub struct TextureLoader<R: TextureResolver> {
+pub struct TextureLoader<R, TP, SP>
+where
+ R: TextureResolver,
+ TP: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
/// Blocks for which commands have been queued and are done loading once the fence is triggered.
- commands_queued: ArrayVec<[QueuedLoad<DynamicBlock>; NUM_SIMULTANEOUS_CMDS]>,
+ commands_queued: ArrayVec<[QueuedLoad<TP, SP>; NUM_SIMULTANEOUS_CMDS]>,
/// The command buffers used and a fence to go with them
buffers: VecDeque<(FenceT, CommandBufferT)>,
@@ -64,21 +76,18 @@ pub struct TextureLoader<R: TextureResolver> {
queue: Arc<RwLock<QueueT>>,
/// The memory allocator being used for textures
- tex_allocator: ManuallyDrop<DynamicAllocator>,
+ tex_mempool: Arc<RwLock<TP>>,
/// The memory allocator for staging memory
- staging_allocator: ManuallyDrop<DynamicAllocator>,
+ staging_mempool: Arc<RwLock<SP>>,
/// Allocator for descriptor sets
descriptor_allocator: ManuallyDrop<DescriptorAllocator>,
ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
- /// Type ID for staging memory
- staging_memory_type: MemoryTypeId,
-
/// From adapter, used for determining alignment
- optimal_buffer_copy_pitch_alignment: hal::buffer::Offset,
+ optimal_buffer_copy_pitch_alignment: u32,
/// Configuration for how to find and load textures
config: TextureLoadConfig<R>,
@@ -88,19 +97,20 @@ pub struct TextureLoader<R: TextureResolver> {
request_channel: Receiver<LoaderRequest>,
/// The channel blocks are returned to.
- return_channel: Sender<TexturesBlock<DynamicBlock>>,
+ return_channel: Sender<TexturesBlock<TP>>,
/// A filler image for descriptors that aren't needed but still need to be written to
- blank_image: ManuallyDrop<LoadedImage<DynamicBlock>>,
-}
-
-#[derive(Error, Debug)]
-pub enum TextureLoaderError {
- #[error("Couldn't find a suitable memory type")]
- NoMemoryTypes,
+ blank_image: ManuallyDrop<SampledImage<TP>>,
}
-impl<R: TextureResolver> TextureLoader<R> {
+impl<R, TP, SP> TextureLoader<R, TP, SP>
+where
+ R: TextureResolver,
+ TP: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
+ /// Keep loading textures until asked to stop. This should be called from a seperate thread.
pub fn loop_until_exit(mut self) -> Result<TextureLoaderRemains> {
debug!("TextureLoader starting main loop");
let mut res = Ok(false);
@@ -123,12 +133,15 @@ impl<R: TextureResolver> TextureLoader<R> {
_ => unreachable!(),
}
}
+
fn main(&mut self) -> Result<bool> {
+ // Get a device lock so we can check fence status
let mut device = self
.device
.write()
.map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
+
// Check for blocks that are finished, then send them back
let mut i = 0;
while i < self.commands_queued.len() {
@@ -139,12 +152,21 @@ impl<R: TextureResolver> TextureLoader<R> {
let (assets, mut staging_bufs, block) = self.commands_queued.remove(i).dissolve();
debug!("Load finished for texture block {:?}", block.id);
+ // Lock staging memory pool
+ let mut staging_mempool = self
+ .staging_mempool
+ .write()
+ .map_err(|_| LockPoisoned::MemoryPool)?;
+
// Destroy staging buffers
for buf in staging_bufs.drain(..) {
- buf.deactivate(&mut device, &mut self.staging_allocator);
+ buf.deactivate_device_pool(&mut device, &mut *staging_mempool);
}
+ // Return assets used for loading
self.buffers.push_back(assets);
+
+ // Send back our loaded block
self.return_channel
.send(block)
.context("Error returning texture block")?;
@@ -153,6 +175,7 @@ impl<R: TextureResolver> TextureLoader<R> {
}
}
+ // Release device lock
drop(device);
// Check for messages to start loading blocks
@@ -181,97 +204,42 @@ impl<R: TextureResolver> TextureLoader<R> {
Ok(false)
}
- pub fn new(
- adapter: &Adapter,
- device_lock: Arc<RwLock<DeviceT>>,
- (family, queue_lock): (QueueFamilyId, Arc<RwLock<QueueT>>),
+ /// Create a new loader from the given context.
+ pub fn new<Q: QueueFamilySelector>(
+ context: &mut RenderingContext,
ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
- (request_channel, return_channel): (
- Receiver<LoaderRequest>,
- Sender<TexturesBlock<DynamicBlock>>,
- ),
+ (request_channel, return_channel): (Receiver<LoaderRequest>, Sender<TexturesBlock<TP>>),
config: TextureLoadConfig<R>,
) -> Result<Self> {
+ // Queue family & Lock
+ let family = context
+ .queue_negotiator_mut()
+ .family::<Q>()
+ .ok_or(EnvironmentError::NoSuitableFamilies)?;
+ let queue_lock = context
+ .queue_negotiator_mut()
+ .get_queue::<Q>()
+ .ok_or(EnvironmentError::NoQueues)?;
+
+ // Memory pools
+ let tex_mempool = context.memory_pool()?.clone();
+ let staging_mempool = context.memory_pool()?.clone();
+
+ // Lock device
+ let device_lock = context.device().clone();
let mut device = device_lock
.write()
.map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
- let device_props = adapter.physical_device.properties();
-
- let type_mask = unsafe {
- use hal::image::{Kind, Tiling, Usage, ViewCapabilities};
-
- // We create an empty image with the same format as used for textures
- // this is to get the type_mask required, which will stay the same for
- // all colour images of the same tiling. (certain memory flags excluded).
-
- // Size and alignment don't necessarily stay the same, so we're forced to
- // guess at the alignment for our allocator.
-
- // TODO: Way to tune these options
- let img = device
- .create_image(
- Kind::D2(16, 16, 1, 1),
- 1,
- Format::Rgba8Srgb,
- Tiling::Optimal,
- Usage::SAMPLED,
- SparseFlags::empty(),
- ViewCapabilities::empty(),
- )
- .context("Error creating test image to get buffer settings")?;
-
- let type_mask = device.get_image_requirements(&img).type_mask;
-
- device.destroy_image(img);
- type_mask
- };
-
- debug!("Using type mask {:?}", type_mask);
-
- // Tex Allocator
- let mut tex_allocator = {
- let props = MemProps::DEVICE_LOCAL;
-
- DynamicAllocator::new(
- find_memory_type_id(adapter, type_mask, props)
- .ok_or(TextureLoaderError::NoMemoryTypes)
- .context("Couldn't create tex memory allocator")?,
- props,
- DynamicConfig {
- block_size_granularity: 4 * 32 * 32, // 32x32 image
- max_chunk_size: u64::pow(2, 63),
- min_device_allocation: 4 * 32 * 32,
- },
- device_props.limits.non_coherent_atom_size as u64,
- )
- };
-
- let (staging_memory_type, mut staging_allocator) = {
- let props = MemProps::CPU_VISIBLE | MemProps::COHERENT;
- let t = find_memory_type_id(adapter, u32::MAX, props)
- .ok_or(TextureLoaderError::NoMemoryTypes)
- .context("Couldn't create staging memory allocator")?;
- (
- t,
- DynamicAllocator::new(
- t,
- props,
- DynamicConfig {
- block_size_granularity: 4 * 32 * 32, // 32x32 image
- max_chunk_size: u64::pow(2, 63),
- min_device_allocation: 4 * 32 * 32,
- },
- device_props.limits.non_coherent_atom_size as u64,
- ),
- )
- };
+ // Physical properties
+ let device_props = context.physical_device_properties();
+ let optimal_buffer_copy_pitch_alignment =
+ device_props.limits.optimal_buffer_copy_pitch_alignment as u32;
// Pool
let mut pool = unsafe {
use hal::pool::CommandPoolCreateFlags;
-
device.create_command_pool(family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
}
.context("Error creating command pool")?;
@@ -293,16 +261,13 @@ impl<R: TextureResolver> TextureLoader<R> {
data
};
- let optimal_buffer_copy_pitch_alignment =
- device_props.limits.optimal_buffer_copy_pitch_alignment;
-
+ // Blank image (for padding descriptors)
let blank_image = unsafe {
Self::get_blank_image(
&mut device,
&mut buffers[0].1,
&queue_lock,
- (&mut staging_allocator, &mut tex_allocator),
- staging_memory_type,
+ (&staging_mempool, &tex_mempool),
optimal_buffer_copy_pitch_alignment,
&config,
)
@@ -319,11 +284,10 @@ impl<R: TextureResolver> TextureLoader<R> {
queue: queue_lock,
ds_layout,
- tex_allocator: ManuallyDrop::new(tex_allocator),
- staging_allocator: ManuallyDrop::new(staging_allocator),
+ tex_mempool,
+ staging_mempool,
descriptor_allocator: ManuallyDrop::new(DescriptorAllocator::new()),
- staging_memory_type,
optimal_buffer_copy_pitch_alignment,
request_channel,
@@ -333,7 +297,7 @@ impl<R: TextureResolver> TextureLoader<R> {
})
}
- unsafe fn attempt_queue_load(&mut self, block_ref: usize) -> Result<QueuedLoad<DynamicBlock>> {
+ unsafe fn attempt_queue_load(&mut self, block_ref: usize) -> Result<QueuedLoad<TP, SP>> {
let mut device = self
.device
.write()
@@ -403,7 +367,7 @@ impl<R: TextureResolver> TextureLoader<R> {
binding: 0,
array_offset: tex_idx % BLOCK_SIZE,
descriptors: once(Descriptor::Image(
- &*self.blank_image.img_view,
+ &*self.blank_image.img_view(),
Layout::ShaderReadOnlyOptimal,
)),
});
@@ -411,7 +375,7 @@ impl<R: TextureResolver> TextureLoader<R> {
set: descriptor_set.raw_mut(),
binding: 1,
array_offset: tex_idx % BLOCK_SIZE,
- descriptors: once(Descriptor::Sampler(&*self.blank_image.sampler)),
+ descriptors: once(Descriptor::Sampler(&*self.blank_image.sampler())),
});
continue;
@@ -423,9 +387,8 @@ impl<R: TextureResolver> TextureLoader<R> {
let (staging_buffer, img) = load_image(
&mut device,
- &mut self.staging_allocator,
- &mut self.tex_allocator,
- self.staging_memory_type,
+ &mut self.staging_mempool,
+ &mut self.tex_mempool,
self.optimal_buffer_copy_pitch_alignment,
img_data,
&self.config,
@@ -438,7 +401,7 @@ impl<R: TextureResolver> TextureLoader<R> {
binding: 0,
array_offset,
descriptors: once(Descriptor::Image(
- &*img.img_view,
+ img.img_view(),
Layout::ShaderReadOnlyOptimal,
)),
});
@@ -446,7 +409,7 @@ impl<R: TextureResolver> TextureLoader<R> {
set: descriptor_set.raw_mut(),
binding: 1,
array_offset,
- descriptors: once(Descriptor::Sampler(&*img.sampler)),
+ descriptors: once(Descriptor::Sampler(img.sampler())),
});
}
@@ -462,7 +425,7 @@ impl<R: TextureResolver> TextureLoader<R> {
imgs.iter().map(|li| Barrier::Image {
states: (Access::empty(), Layout::Undefined)
..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
- target: &*li.img,
+ target: &*li.img(),
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
@@ -477,13 +440,13 @@ impl<R: TextureResolver> TextureLoader<R> {
// Record copy commands
for (li, sb) in imgs.iter().zip(staging_bufs.iter()) {
buf.copy_buffer_to_image(
- &*sb.buf,
- &*li.img,
+ &*sb.buf(),
+ &*li.img(),
Layout::TransferDstOptimal,
once(BufferImageCopy {
buffer_offset: 0,
- buffer_width: (li.row_size / super::PIXEL_SIZE) as u32,
- buffer_height: li.height,
+ buffer_width: (li.row_size() / get_pixel_size(FORMAT)) as u32,
+ buffer_height: li.height(),
image_layers: SubresourceLayers {
aspects: Aspects::COLOR,
level: 0,
@@ -491,8 +454,8 @@ impl<R: TextureResolver> TextureLoader<R> {
},
image_offset: Offset { x: 0, y: 0, z: 0 },
image_extent: gfx_hal::image::Extent {
- width: li.width,
- height: li.height,
+ width: li.unpadded_row_size(),
+ height: li.height(),
depth: 1,
},
}),
@@ -504,7 +467,7 @@ impl<R: TextureResolver> TextureLoader<R> {
imgs.iter().map(|li| Barrier::Image {
states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
..(Access::empty(), Layout::ShaderReadOnlyOptimal),
- target: &*li.img,
+ target: &*li.img(),
families: None,
range: RESOURCES,
}),
@@ -535,11 +498,10 @@ impl<R: TextureResolver> TextureLoader<R> {
device: &mut DeviceT,
buf: &mut CommandBufferT,
queue_lock: &Arc<RwLock<QueueT>>,
- (staging_allocator, tex_allocator): (&mut DynamicAllocator, &mut DynamicAllocator),
- staging_memory_type: MemoryTypeId,
- obcpa: u64,
+ (staging_mempool, tex_mempool): (&Arc<RwLock<SP>>, &Arc<RwLock<TP>>),
+ obcpa: u32,
config: &TextureLoadConfig<R>,
- ) -> Result<LoadedImage<DynamicBlock>> {
+ ) -> Result<SampledImage<TP>> {
let img_data = RgbaImage::from_pixel(1, 1, Rgba([255, 0, 255, 255]));
let height = img_data.height();
@@ -551,9 +513,8 @@ impl<R: TextureResolver> TextureLoader<R> {
let (staging_buffer, img) = load_image(
device,
- staging_allocator,
- tex_allocator,
- staging_memory_type,
+ staging_mempool,
+ tex_mempool,
obcpa,
img_data,
config,
@@ -567,7 +528,7 @@ impl<R: TextureResolver> TextureLoader<R> {
once(Barrier::Image {
states: (Access::empty(), Layout::Undefined)
..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
- target: &*img.img,
+ target: &*img.img(),
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
@@ -579,8 +540,8 @@ impl<R: TextureResolver> TextureLoader<R> {
}),
);
buf.copy_buffer_to_image(
- &*staging_buffer.buf,
- &*img.img,
+ &*staging_buffer.buf(),
+ &*img.img(),
Layout::TransferDstOptimal,
once(BufferImageCopy {
buffer_offset: 0,
@@ -602,7 +563,7 @@ impl<R: TextureResolver> TextureLoader<R> {
once(Barrier::Image {
states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
..(Access::empty(), Layout::ShaderReadOnlyOptimal),
- target: &*img.img,
+ target: &*img.img(),
families: None,
range: RESOURCES,
}),
@@ -628,7 +589,10 @@ impl<R: TextureResolver> TextureLoader<R> {
device.destroy_fence(fence);
- staging_buffer.deactivate(device, staging_allocator);
+ {
+ let mut staging_mempool = staging_mempool.write().unwrap();
+ staging_buffer.deactivate_device_pool(device, &mut *staging_mempool);
+ }
Ok(img)
}
@@ -658,8 +622,9 @@ impl<R: TextureResolver> TextureLoader<R> {
device.destroy_fence(assets.0);
// Command buffer will be freed when we reset the command pool
// Destroy staging buffers
+ let mut staging_mempool = self.staging_mempool.write().unwrap();
for buf in staging_bufs.drain(..) {
- buf.deactivate(&mut device, &mut self.staging_allocator);
+ buf.deactivate_device_pool(&mut device, &mut staging_mempool);
}
self.return_channel
@@ -674,7 +639,11 @@ impl<R: TextureResolver> TextureLoader<R> {
}
// Destroy blank image
- read(&*self.blank_image).deactivate(&mut device, &mut *self.tex_allocator);
+ {
+ let mut tex_mempool = self.tex_mempool.write().unwrap();
+ read(&*self.blank_image)
+ .deactivate_with_device_pool(&mut device, &mut *tex_mempool);
+ }
// Destroy fences
@@ -690,7 +659,6 @@ impl<R: TextureResolver> TextureLoader<R> {
debug!("Done deactivating TextureLoader");
TextureLoaderRemains {
- tex_allocator: ManuallyDrop::new(read(&*self.tex_allocator)),
descriptor_allocator: ManuallyDrop::new(read(&*self.descriptor_allocator)),
}
}
@@ -698,7 +666,6 @@ impl<R: TextureResolver> TextureLoader<R> {
}
pub struct TextureLoaderRemains {
- pub tex_allocator: ManuallyDrop<DynamicAllocator>,
pub descriptor_allocator: ManuallyDrop<DescriptorAllocator>,
}
diff --git a/stockton-skeleton/src/texture/mod.rs b/stockton-skeleton/src/texture/mod.rs
index aef1b03..10fbbad 100644
--- a/stockton-skeleton/src/texture/mod.rs
+++ b/stockton-skeleton/src/texture/mod.rs
@@ -6,7 +6,6 @@ mod load;
mod loader;
mod repo;
pub mod resolver;
-mod staging_buffer;
pub use self::block::TexturesBlock;
pub use self::image::LoadableImage;
diff --git a/stockton-skeleton/src/texture/repo.rs b/stockton-skeleton/src/texture/repo.rs
index 341d355..635eebb 100644
--- a/stockton-skeleton/src/texture/repo.rs
+++ b/stockton-skeleton/src/texture/repo.rs
@@ -4,14 +4,15 @@ use super::{
loader::{BlockRef, LoaderRequest, TextureLoader, TextureLoaderRemains, NUM_SIMULTANEOUS_CMDS},
resolver::TextureResolver,
};
-use crate::error::LockPoisoned;
-use crate::queue_negotiator::QueueFamilySelector;
use crate::types::*;
+use crate::{context::RenderingContext, error::LockPoisoned, mem::MappableBlock};
+use crate::{mem::MemoryPool, queue_negotiator::QueueFamilySelector};
use std::{
array::IntoIter,
collections::HashMap,
iter::empty,
+ marker::PhantomData,
mem::ManuallyDrop,
sync::{
mpsc::{channel, Receiver, Sender},
@@ -21,10 +22,7 @@ use std::{
};
use anyhow::{Context, Result};
-use hal::{
- pso::{DescriptorSetLayoutBinding, DescriptorType, ImageDescriptorType, ShaderStageFlags},
- queue::family::QueueFamilyId,
-};
+use hal::pso::{DescriptorSetLayoutBinding, DescriptorType, ImageDescriptorType, ShaderStageFlags};
use log::debug;
/// The number of textures in one 'block'
@@ -32,26 +30,46 @@ use log::debug;
/// Whenever a texture is needed, the whole block its in is loaded.
pub const BLOCK_SIZE: usize = 8;
-pub struct TextureRepo {
+/// An easy way to load [`super::LoadableImage`]s into GPU memory using another thread.
+/// This assumes each texture has a numeric id, and will group them into blocks of `[BLOCK_SIZE]`,
+/// yielding descriptor sets with that many samplers and images.
+/// You only need to supply a [`super::resolver::TextureResolver`] and create one from the main thread.
+/// Then, use [`get_ds_layout`] in your graphics pipeline.
+/// Make sure to call [`process_responses`] every frame.
+/// Then, whenever you draw, use [`attempt_get_descriptor_set`] to see if that texture has finished loading,
+/// or `queue_load` to start loading it ASAP.
+
+pub struct TextureRepo<TP, SP>
+where
+ TP: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
joiner: ManuallyDrop<JoinHandle<Result<TextureLoaderRemains>>>,
ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
req_send: Sender<LoaderRequest>,
- resp_recv: Receiver<TexturesBlock<DynamicBlock>>,
- blocks: HashMap<BlockRef, Option<TexturesBlock<DynamicBlock>>>,
+ resp_recv: Receiver<TexturesBlock<TP>>,
+ blocks: HashMap<BlockRef, Option<TexturesBlock<TP>>>,
+ _d: PhantomData<(TP, SP)>,
}
-impl TextureRepo {
- pub fn new<R: 'static + TextureResolver + Send + Sync>(
- device_lock: Arc<RwLock<DeviceT>>,
- family: QueueFamilyId,
- queue: Arc<RwLock<QueueT>>,
- adapter: &Adapter,
+impl<TP, SP> TextureRepo<TP, SP>
+where
+ TP: MemoryPool,
+ SP: MemoryPool,
+ SP::Block: MappableBlock,
+{
+ /// Create a new TextureRepo from the given context.
+ /// Q should most likely be [`TexLoadQueue`]
+ pub fn new<R: 'static + TextureResolver + Send + Sync, Q: QueueFamilySelector>(
+ context: &mut RenderingContext,
config: TextureLoadConfig<R>,
) -> Result<Self> {
// Create Channels
let (req_send, req_recv) = channel();
let (resp_send, resp_recv) = channel();
- let device = device_lock
+ let device = context
+ .device()
.write()
.map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
@@ -91,10 +109,8 @@ impl TextureRepo {
drop(device);
let joiner = {
- let loader = TextureLoader::new(
- adapter,
- device_lock.clone(),
- (family, queue),
+ let loader = <TextureLoader<_, TP, SP>>::new::<Q>(
+ context,
ds_lock.clone(),
(req_recv, resp_send),
config,
@@ -109,9 +125,12 @@ impl TextureRepo {
blocks: HashMap::new(),
req_send,
resp_recv,
+ _d: PhantomData,
})
}
+ /// Get the descriptor layout used for each texture descriptor
+ /// This can be used when creating graphics pipelines.
pub fn get_ds_layout(&self) -> Result<RwLockReadGuard<DescriptorSetLayoutT>> {
self.ds_layout
.read()
@@ -119,6 +138,7 @@ impl TextureRepo {
.context("Error locking descriptor set layout")
}
+ /// Ask for the given block to be loaded, if it's not already.
pub fn queue_load(&mut self, block_id: BlockRef) -> Result<()> {
if self.blocks.contains_key(&block_id) {
return Ok(());
@@ -127,6 +147,7 @@ impl TextureRepo {
self.force_queue_load(block_id)
}
+ /// Ask for the given block to be loaded, even if it already has been.
pub fn force_queue_load(&mut self, block_id: BlockRef) -> Result<()> {
self.req_send
.send(LoaderRequest::Load(block_id))
@@ -137,12 +158,14 @@ impl TextureRepo {
Ok(())
}
+ /// Get the descriptor set for the given block, if it's loaded.
pub fn attempt_get_descriptor_set(&mut self, block_id: BlockRef) -> Option<&DescriptorSetT> {
self.blocks
.get(&block_id)
.and_then(|opt| opt.as_ref().map(|z| z.descriptor_set.raw()))
}
+ /// Process any textures that just finished loading. This should be called every frame.
pub fn process_responses(&mut self) {
let resp_iter: Vec<_> = self.resp_recv.try_iter().collect();
for resp in resp_iter {
@@ -151,7 +174,8 @@ impl TextureRepo {
}
}
- pub fn deactivate(mut self, device_lock: &Arc<RwLock<DeviceT>>) {
+ /// Destroy all vulkan objects. Should be called before dropping.
+ pub fn deactivate(mut self, context: &mut RenderingContext) {
unsafe {
use std::ptr::read;
@@ -162,22 +186,27 @@ impl TextureRepo {
// Process any ones that just got done loading
self.process_responses();
+ let mut tex_allocator = context
+ .existing_memory_pool::<TP>()
+ .unwrap()
+ .write()
+ .unwrap();
+
// Only now can we lock device without deadlocking
- let mut device = device_lock.write().unwrap();
+ let mut device = context.device().write().unwrap();
// Return all the texture memory and descriptors.
for (_, v) in self.blocks.drain() {
if let Some(block) = v {
block.deactivate(
&mut device,
- &mut *remains.tex_allocator,
+ &mut *tex_allocator,
&mut remains.descriptor_allocator,
);
}
}
- // Dispose of both allocators
- read(&*remains.tex_allocator).dispose();
+ // Dispose of the descriptor allocator
read(&*remains.descriptor_allocator).dispose(&device);
// Deactivate DS Layout
@@ -190,6 +219,7 @@ impl TextureRepo {
}
}
+/// The queue to use when loading textures
pub struct TexLoadQueue;
impl QueueFamilySelector for TexLoadQueue {
diff --git a/stockton-skeleton/src/texture/staging_buffer.rs b/stockton-skeleton/src/texture/staging_buffer.rs
deleted file mode 100644
index 8d2ae17..0000000
--- a/stockton-skeleton/src/texture/staging_buffer.rs
+++ /dev/null
@@ -1,59 +0,0 @@
-#![allow(mutable_transmutes)]
-use crate::types::*;
-
-use std::mem::ManuallyDrop;
-
-use anyhow::{Context, Result};
-use hal::{device::MapError, memory::SparseFlags, MemoryTypeId};
-use rendy_memory::{Allocator, Block};
-
-pub struct StagingBuffer {
- pub buf: ManuallyDrop<BufferT>,
- pub mem: ManuallyDrop<DynamicBlock>,
-}
-
-impl StagingBuffer {
- const USAGE: hal::buffer::Usage = hal::buffer::Usage::TRANSFER_SRC;
-
- pub fn new(
- device: &mut DeviceT,
- alloc: &mut DynamicAllocator,
- size: u64,
- _memory_type_id: MemoryTypeId,
- ) -> Result<StagingBuffer> {
- let mut buffer = unsafe { device.create_buffer(size, Self::USAGE, SparseFlags::empty()) }
- .context("Error creating buffer")?;
-
- let requirements = unsafe { device.get_buffer_requirements(&buffer) };
-
- let (memory, _) = alloc
- .alloc(device, requirements.size, requirements.alignment)
- .context("Error allocating staging memory")?;
-
- unsafe { device.bind_buffer_memory(memory.memory(), 0, &mut buffer) }
- .context("Error binding staging memory to buffer")?;
-
- Ok(StagingBuffer {
- buf: ManuallyDrop::new(buffer),
- mem: ManuallyDrop::new(memory),
- })
- }
-
- pub unsafe fn map_memory(&mut self, device: &mut DeviceT) -> Result<*mut u8, MapError> {
- let range = 0..(self.mem.range().end - self.mem.range().start);
- Ok(self.mem.map(device, range)?.ptr().as_mut())
- }
- pub unsafe fn unmap_memory(&mut self, device: &mut DeviceT) {
- self.mem.unmap(device);
- }
-
- pub fn deactivate(self, device: &mut DeviceT, alloc: &mut DynamicAllocator) {
- unsafe {
- use std::ptr::read;
- // Destroy buffer
- device.destroy_buffer(read(&*self.buf));
- // Free memory
- alloc.free(device, read(&*self.mem));
- }
- }
-}
diff --git a/stockton-skeleton/src/utils.rs b/stockton-skeleton/src/utils.rs
index 152ba10..253ad7f 100644
--- a/stockton-skeleton/src/utils.rs
+++ b/stockton-skeleton/src/utils.rs
@@ -1,5 +1,5 @@
use crate::types::*;
-use hal::{memory::Properties as MemProperties, MemoryTypeId};
+use hal::{format::Format, memory::Properties as MemProperties, MemoryTypeId};
pub fn find_memory_type_id(
adapter: &Adapter,
@@ -17,3 +17,7 @@ pub fn find_memory_type_id(
})
.map(|(id, _)| MemoryTypeId(id))
}
+
+pub fn get_pixel_size(f: Format) -> u32 {
+ f.surface_desc().bits as u32 / 8
+}