diff options
author | tcmal <me@aria.rip> | 2024-08-25 17:44:20 +0100 |
---|---|---|
committer | tcmal <me@aria.rip> | 2024-08-25 17:44:20 +0100 |
commit | 9806e1d2552b944e809d4f545e5d8bdb6827c144 (patch) | |
tree | e2b7273c70b3758079817fcef916a15e72c5425f /stockton-render/src/draw | |
parent | c3683cb91a7142be405aa672fcbae4238a3bde72 (diff) |
fix(render): fix some validation warnings
Diffstat (limited to 'stockton-render/src/draw')
-rw-r--r-- | stockton-render/src/draw/buffer.rs | 189 | ||||
-rw-r--r-- | stockton-render/src/draw/context.rs | 2 | ||||
-rw-r--r-- | stockton-render/src/draw/texture.rs | 16 |
3 files changed, 20 insertions, 187 deletions
diff --git a/stockton-render/src/draw/buffer.rs b/stockton-render/src/draw/buffer.rs index a603cd6..7bbd7e3 100644 --- a/stockton-render/src/draw/buffer.rs +++ b/stockton-render/src/draw/buffer.rs @@ -120,8 +120,14 @@ impl <'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> { command_queue: &mut CommandQueue, command_pool: &mut CommandPool) -> &'b Buffer { if self.staged_is_dirty { - // Copy from staged to buffer + // Flush mapped memory to ensure the staged buffer is filled + unsafe { + use std::ops::Deref; + device.flush_mapped_memory_ranges(once((self.staged_memory.deref(), Segment::ALL))).unwrap(); + } + + // Copy from staged to buffer let buf = unsafe { use hal::command::{CommandBufferFlags, BufferCopy}; // Get a command buffer @@ -133,7 +139,7 @@ impl <'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> { BufferCopy { src: 0, dst: 0, - size: (self.staged_mapped_memory.len() * size_of::<T>()) as u64 + size: ((self.highest_used + 1) * size_of::<T>()) as u64 } ]); buf.finish(); @@ -183,181 +189,4 @@ impl<'a, T: Sized> IndexMut<usize> for StagedBuffer<'a, T> { } &mut self.staged_mapped_memory[index] } -} - -// trait VertexLump { -// pub fn new(device: &mut Device, adapter: &Adapter<back::Backend>) -> Result<Self, CreationError> { -// } - -// pub(crate) struct VertexLump<T: Into<X>, X: Pod> { -// pub (crate) buffer: ManuallyDrop<Buffer>, -// memory: ManuallyDrop<Memory>, -// requirements: Requirements, - -// unit_size_bytes: u64, -// unit_size_verts: u64, -// batch_size: u64, - -// num_batches: usize, - - -// /// An instance is active if it has been assigned to -// pub active_instances: Range<InstanceCount>, -// pub active_verts: Range<VertexCount>, - -// active: bool, - -// _t: PhantomData<T>, -// _x: PhantomData<X> -// } - -// const BATCH_SIZE: u64 = 3; - -// impl<T: Into<X>, X: Pod> VertexLump<T, X> { -// pub fn new(device: &mut Device, adapter: &Adapter<back::Backend>) -> Result<VertexLump<T, X>, CreationError> { -// let unit_size_bytes = size_of::<X>() as u64; -// let unit_size_verts = unit_size_bytes / size_of::<f32>() as u64; - -// let mut buffer = unsafe { device -// .create_buffer(BATCH_SIZE * unit_size_bytes, Usage::VERTEX) } - -// .map_err(|e| CreationError::BufferError (e))?; - -// let requirements = unsafe { device.get_buffer_requirements(&buffer) }; -// let memory_type_id = adapter.physical_device -// .memory_properties().memory_types -// .iter().enumerate() -// .find(|&(id, memory_type)| { -// requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE) -// }) -// .map(|(id, _)| MemoryTypeId(id)) -// .ok_or(CreationError::BufferNoMemory)?; - -// let memory = unsafe {device -// .allocate_memory(memory_type_id, requirements.size) } -// .map_err(|_| CreationError::OutOfMemoryError)?; - -// unsafe { device -// .bind_buffer_memory(&memory, 0, &mut buffer) } -// .map_err(|_| CreationError::BufferNoMemory)?; - -// Ok(VertexLump { -// buffer: ManuallyDrop::new(buffer), -// memory: ManuallyDrop::new(memory), -// requirements, -// active_verts: 0..0, -// active_instances: 0..0, -// num_batches: 1, -// unit_size_bytes, -// unit_size_verts, -// batch_size: BATCH_SIZE, // TODO -// active: true, -// _t: PhantomData, -// _x: PhantomData -// }) -// } - -// pub fn set_active_instances(&mut self, range: Range<InstanceCount>) { -// let count: u64 = (range.end - range.start).into(); -// let size_verts: u32 = (count * self.unit_size_verts).try_into().unwrap(); -// self.active_verts = range.start * size_verts..range.end * size_verts; -// self.active_instances = range; -// } - -// pub fn add(&mut self, tri: T, ctx: &mut RenderingContext) -> Result<(), ()> { - -// // figure out where to put it -// let idx: usize = (self.active_instances.end).try_into().unwrap(); -// let batch_size: usize = self.batch_size.try_into().unwrap(); -// let max_size: usize = self.num_batches * batch_size; - -// // make sure correct size -// if idx >= max_size { -// self.num_batches += 1; - -// debug!("Reallocating Vertex buffer to {} batches ({} instances)", self.num_batches, self.num_batches as u64 * self.batch_size); -// // get new buffer -// let (new_buffer, new_requirements, new_memory) = { -// let mut buffer = ManuallyDrop::new(unsafe { ctx.device -// .create_buffer(self.batch_size * self.unit_size_bytes * self.num_batches as u64, Usage::VERTEX) } -// .map_err(|_| ())? -// ); -// let requirements = unsafe { ctx.device.get_buffer_requirements(&buffer) }; - -// let memory_type_id = ctx.adapter.physical_device -// .memory_properties().memory_types -// .iter().enumerate() -// .find(|&(id, memory_type)| { -// requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE) -// }) -// .map(|(id, _)| MemoryTypeId(id)) -// .ok_or(())?; - -// let memory = ManuallyDrop::new(unsafe { ctx.device -// .allocate_memory(memory_type_id, requirements.size) } -// .map_err(|_| ())?); - -// unsafe { ctx.device -// .bind_buffer_memory(&memory, 0, &mut buffer) } -// .map_err(|_| ())?; - -// (buffer, requirements, memory) -// }; - -// // copy vertices -// unsafe { -// let copy_range = 0..self.requirements.size; - -// trace!("Copying {:?} from old buffer to new buffer", copy_range); - -// let reader = ctx.device.acquire_mapping_reader::<u8>(&*(self.memory), copy_range.clone()) -// .map_err(|_| ())?; -// let mut writer = ctx.device.acquire_mapping_writer::<u8>(&new_memory, copy_range.clone()) -// .map_err(|_| ())?; - -// let copy_range: Range<usize> = 0..self.requirements.size.try_into().unwrap(); -// writer[copy_range.clone()].copy_from_slice(&reader[copy_range.clone()]); - -// ctx.device.release_mapping_reader(reader); -// ctx.device.release_mapping_writer(writer).map_err(|_| ())?; -// }; - -// // destroy old buffer -// self.deactivate(ctx); - -// // use new one -// self.buffer = new_buffer; -// self.requirements = new_requirements; -// self.memory = new_memory; -// self.active = true; - -// } - -// { -// // acquire writer -// let mut writer = self.writer(ctx)?; - -// // write to it -// writer[idx] = tri.into(); -// } - -// // activate new triangle -// let new_range = self.active_instances.start..self.active_instances.end + 1; -// self.set_active_instances(new_range); - -// Ok(()) -// } - -// pub(crate) fn writer<'a>(&'a mut self, ctx: &'a mut RenderingContext) -> Result<VertexWriter<'a, X>, ()> { -// let mapping_writer = unsafe { ctx.device -// .acquire_mapping_writer(&*(self.memory), 0..self.requirements.size) -// .map_err(|_| ())? }; - -// Ok(VertexWriter { -// mapping_writer: ManuallyDrop::new(mapping_writer), -// ctx -// }) -// } - -// } - +}
\ No newline at end of file diff --git a/stockton-render/src/draw/context.rs b/stockton-render/src/draw/context.rs index f8a057c..c179f9d 100644 --- a/stockton-render/src/draw/context.rs +++ b/stockton-render/src/draw/context.rs @@ -774,6 +774,8 @@ impl<'a> core::ops::Drop for RenderingContext<'a> { .destroy_render_pass(ManuallyDrop::into_inner(read(&self.renderpass))); self.device .destroy_swapchain(ManuallyDrop::into_inner(read(&self.swapchain))); + + self.device.destroy_graphics_pipeline(ManuallyDrop::into_inner(read(&self.pipeline))); self.device .destroy_pipeline_layout(ManuallyDrop::into_inner(read(&self.pipeline_layout))); diff --git a/stockton-render/src/draw/texture.rs b/stockton-render/src/draw/texture.rs index 52d899f..19f60d9 100644 --- a/stockton-render/src/draw/texture.rs +++ b/stockton-render/src/draw/texture.rs @@ -49,7 +49,7 @@ const PIXEL_SIZE: usize = size_of::<image::Rgba<u8>>(); /// Note that it's possible not all descriptors are actually initialised images pub struct TextureStore { descriptor_pool: ManuallyDrop<DescriptorPool>, - pub descriptor_set: ManuallyDrop<DescriptorSet>, + pub descriptor_set: DescriptorSet, pub descriptor_set_layout: ManuallyDrop<DescriptorSetLayout>, loaded_images: Vec<LoadedImage>, next_index: usize, @@ -117,7 +117,7 @@ impl TextureStore { Ok(TextureStore { descriptor_pool: ManuallyDrop::new(descriptor_pool), - descriptor_set: ManuallyDrop::new(descriptor_set), + descriptor_set: descriptor_set, loaded_images: Vec::with_capacity(size), descriptor_set_layout: ManuallyDrop::new(descriptor_set_layout), next_index: 0, @@ -172,7 +172,7 @@ impl TextureStore { device.write_descriptor_sets(vec![ DescriptorSetWrite { - set: self.descriptor_set.deref(), + set: &self.descriptor_set, binding: 0, array_offset: idx, descriptors: Some(Descriptor::Image( @@ -181,7 +181,7 @@ impl TextureStore { )), }, DescriptorSetWrite { - set: self.descriptor_set.deref(), + set: &self.descriptor_set, binding: 1, array_offset: idx, descriptors: Some(Descriptor::Sampler(texture.sampler.deref())), @@ -204,9 +204,11 @@ impl TextureStore { unsafe { use core::ptr::read; - self.loaded_images.drain(..).map(|img| img.deactivate(device)).collect(); + for img in self.loaded_images.drain(..) { + img.deactivate(device) + } - self.descriptor_pool.free_sets(once(ManuallyDrop::into_inner(read(&self.descriptor_set)))); + self.descriptor_pool.reset(); device.destroy_descriptor_pool(ManuallyDrop::into_inner(read(&self.descriptor_pool))); device .destroy_descriptor_set_layout(ManuallyDrop::into_inner(read(&self.descriptor_set_layout))); @@ -251,7 +253,7 @@ impl LoadedImage { copy_nonoverlapping(row.as_ptr(), mapped_memory.offset(dest_base), row.len()); } - + device.flush_mapped_memory_ranges(once((&staging_memory, Segment::ALL))).map_err(|_| "Couldn't write buffer memory")?; device.unmap_memory(&staging_memory); } |