From 5360fe6a337145b2640ec1e5727c97fd9bbeacd9 Mon Sep 17 00:00:00 2001 From: tcmal Date: Sun, 25 Aug 2024 17:44:24 +0100 Subject: refactor(all): small fixes --- stockton-skeleton/src/buffers/image.rs | 4 ++-- stockton-skeleton/src/buffers/staging.rs | 8 ++------ stockton-skeleton/src/context.rs | 3 ++- stockton-skeleton/src/mem.rs | 9 +++++---- stockton-skeleton/src/texture/load.rs | 13 ------------- stockton-skeleton/src/texture/loader.rs | 16 +++++++++++----- 6 files changed, 22 insertions(+), 31 deletions(-) diff --git a/stockton-skeleton/src/buffers/image.rs b/stockton-skeleton/src/buffers/image.rs index 34a0a37..4278585 100644 --- a/stockton-skeleton/src/buffers/image.rs +++ b/stockton-skeleton/src/buffers/image.rs @@ -116,7 +116,7 @@ impl BoundImageView

{ // Allocate memory let (mem, _) = pool - .alloc(&device, requirements.size, requirements.alignment) + .alloc(device, requirements.size, requirements.alignment) .context("Error allocating memory")?; // Bind memory @@ -169,7 +169,7 @@ impl BoundImageView

{ unsafe { device.destroy_image_view(read(&*self.img_view)); device.destroy_image(read(&*self.img)); - pool.free(&device, read(&*self.mem)); + pool.free(device, read(&*self.mem)); } } diff --git a/stockton-skeleton/src/buffers/staging.rs b/stockton-skeleton/src/buffers/staging.rs index 5c80f51..44d0c2d 100644 --- a/stockton-skeleton/src/buffers/staging.rs +++ b/stockton-skeleton/src/buffers/staging.rs @@ -61,15 +61,11 @@ where /// Map the given range to CPU-visible memory, returning a pointer to the start of that range. /// inner_range is local to this block of memory, not to the container as a whole. pub fn map(&mut self, device: &mut DeviceT, inner_range: Range) -> Result<*mut u8> { - Ok(<

::Block>::map( - &mut *self.mem, - device, - inner_range, - )?) + <

::Block>::map(&mut *self.mem, device, inner_range) } /// Remove any mappings present for this staging buffer. - pub unsafe fn unmap(&mut self, device: &mut DeviceT) -> Result<()> { + pub fn unmap(&mut self, device: &mut DeviceT) -> Result<()> { self.mem.unmap(device) } diff --git a/stockton-skeleton/src/context.rs b/stockton-skeleton/src/context.rs index bbd0164..d1997d3 100644 --- a/stockton-skeleton/src/context.rs +++ b/stockton-skeleton/src/context.rs @@ -285,12 +285,13 @@ impl RenderingContext { } /// Get the specified memory pool, lazily initialising it if it's not yet present - pub fn memory_pool<'a, P: MemoryPool>(&'a mut self) -> Result<&'a Arc>> { + pub fn memory_pool(&mut self) -> Result<&Arc>> { self.ensure_memory_pool::

()?; Ok(self.existing_memory_pool::

().unwrap()) } /// Ensure the specified memory pool is initialised. + #[allow(clippy::map_entry)] // We can't follow the suggestion because of a borrowing issue pub fn ensure_memory_pool(&mut self) -> Result<()> { let tid = TypeId::of::

(); if !self.memory_pools.contains_key(&tid) { diff --git a/stockton-skeleton/src/mem.rs b/stockton-skeleton/src/mem.rs index af0a42b..85bf295 100644 --- a/stockton-skeleton/src/mem.rs +++ b/stockton-skeleton/src/mem.rs @@ -92,15 +92,15 @@ mod rendy { /// So we can use rendy blocks as our blocks impl> Block for T { fn properties(&self) -> Properties { - >::properties(&self) + >::properties(self) } fn memory(&self) -> &MemoryT { - >::memory(&self) + >::memory(self) } fn range(&self) -> Range { - >::range(&self) + >::range(self) } } @@ -359,7 +359,8 @@ mod rendy { } fn unmap(&mut self, device: &mut DeviceT) -> Result<()> { - Ok(self.0.unmap(device)) + self.0.unmap(device); + Ok(()) } } } diff --git a/stockton-skeleton/src/texture/load.rs b/stockton-skeleton/src/texture/load.rs index 6cb4f4d..c4d3b72 100644 --- a/stockton-skeleton/src/texture/load.rs +++ b/stockton-skeleton/src/texture/load.rs @@ -61,19 +61,6 @@ pub struct QueuedLoad { pub staging_bufs: ArrayVec<[StagingBuffer; BLOCK_SIZE]>, } -impl QueuedLoad { - /// Break down into a tuple - pub fn dissolve( - self, - ) -> ( - (FenceT, CommandBufferT), - ArrayVec<[StagingBuffer; BLOCK_SIZE]>, - TexturesBlock, - ) { - ((self.fence, self.buf), self.staging_bufs, self.block) - } -} - /// Create a SampledImage for the given LoadableImage, and load the image data into a StagingBuffer /// Note that this doesn't queue up transferring from the buffer to the image. pub unsafe fn load_image( diff --git a/stockton-skeleton/src/texture/loader.rs b/stockton-skeleton/src/texture/loader.rs index ea42c29..80d4a61 100644 --- a/stockton-skeleton/src/texture/loader.rs +++ b/stockton-skeleton/src/texture/loader.rs @@ -149,7 +149,11 @@ where .context("Error checking fence status")?; if signalled { - let (assets, mut staging_bufs, block) = self.commands_queued.remove(i).dissolve(); + let queued_load = self.commands_queued.remove(i); + let assets = (queued_load.fence, queued_load.buf); + let block = queued_load.block; + let mut staging_bufs = queued_load.staging_bufs; + debug!("Load finished for texture block {:?}", block.id); // Lock staging memory pool @@ -384,8 +388,8 @@ where let (staging_buffer, img) = load_image( &mut device, - &mut self.staging_mempool, - &mut self.tex_mempool, + &self.staging_mempool, + &self.tex_mempool, self.optimal_buffer_copy_pitch_alignment, img_data, &self.config, @@ -613,8 +617,10 @@ where if signalled { // Destroy finished ones - let (assets, mut staging_bufs, block) = - self.commands_queued.remove(i).dissolve(); + let queued_load = self.commands_queued.remove(i); + let assets = (queued_load.fence, queued_load.buf); + let block = queued_load.block; + let mut staging_bufs = queued_load.staging_bufs; device.destroy_fence(assets.0); // Command buffer will be freed when we reset the command pool -- cgit v1.2.3