aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortcmal <me@aria.rip>2024-08-25 17:44:24 +0100
committertcmal <me@aria.rip>2024-08-25 17:44:24 +0100
commit5360fe6a337145b2640ec1e5727c97fd9bbeacd9 (patch)
tree264f4cda45b001fb5049b9120d4d7b3a63c95e6f
parent0ddc1e39dc24cff636defbbbab974967bb5301b8 (diff)
refactor(all): small fixes
-rw-r--r--stockton-skeleton/src/buffers/image.rs4
-rw-r--r--stockton-skeleton/src/buffers/staging.rs8
-rw-r--r--stockton-skeleton/src/context.rs3
-rw-r--r--stockton-skeleton/src/mem.rs9
-rw-r--r--stockton-skeleton/src/texture/load.rs13
-rw-r--r--stockton-skeleton/src/texture/loader.rs16
6 files changed, 22 insertions, 31 deletions
diff --git a/stockton-skeleton/src/buffers/image.rs b/stockton-skeleton/src/buffers/image.rs
index 34a0a37..4278585 100644
--- a/stockton-skeleton/src/buffers/image.rs
+++ b/stockton-skeleton/src/buffers/image.rs
@@ -116,7 +116,7 @@ impl<P: MemoryPool> BoundImageView<P> {
// Allocate memory
let (mem, _) = pool
- .alloc(&device, requirements.size, requirements.alignment)
+ .alloc(device, requirements.size, requirements.alignment)
.context("Error allocating memory")?;
// Bind memory
@@ -169,7 +169,7 @@ impl<P: MemoryPool> BoundImageView<P> {
unsafe {
device.destroy_image_view(read(&*self.img_view));
device.destroy_image(read(&*self.img));
- pool.free(&device, read(&*self.mem));
+ pool.free(device, read(&*self.mem));
}
}
diff --git a/stockton-skeleton/src/buffers/staging.rs b/stockton-skeleton/src/buffers/staging.rs
index 5c80f51..44d0c2d 100644
--- a/stockton-skeleton/src/buffers/staging.rs
+++ b/stockton-skeleton/src/buffers/staging.rs
@@ -61,15 +61,11 @@ where
/// Map the given range to CPU-visible memory, returning a pointer to the start of that range.
/// inner_range is local to this block of memory, not to the container as a whole.
pub fn map(&mut self, device: &mut DeviceT, inner_range: Range<u64>) -> Result<*mut u8> {
- Ok(<<P as MemoryPool>::Block>::map(
- &mut *self.mem,
- device,
- inner_range,
- )?)
+ <<P as MemoryPool>::Block>::map(&mut *self.mem, device, inner_range)
}
/// Remove any mappings present for this staging buffer.
- pub unsafe fn unmap(&mut self, device: &mut DeviceT) -> Result<()> {
+ pub fn unmap(&mut self, device: &mut DeviceT) -> Result<()> {
self.mem.unmap(device)
}
diff --git a/stockton-skeleton/src/context.rs b/stockton-skeleton/src/context.rs
index bbd0164..d1997d3 100644
--- a/stockton-skeleton/src/context.rs
+++ b/stockton-skeleton/src/context.rs
@@ -285,12 +285,13 @@ impl RenderingContext {
}
/// Get the specified memory pool, lazily initialising it if it's not yet present
- pub fn memory_pool<'a, P: MemoryPool>(&'a mut self) -> Result<&'a Arc<RwLock<P>>> {
+ pub fn memory_pool<P: MemoryPool>(&mut self) -> Result<&Arc<RwLock<P>>> {
self.ensure_memory_pool::<P>()?;
Ok(self.existing_memory_pool::<P>().unwrap())
}
/// Ensure the specified memory pool is initialised.
+ #[allow(clippy::map_entry)] // We can't follow the suggestion because of a borrowing issue
pub fn ensure_memory_pool<P: MemoryPool>(&mut self) -> Result<()> {
let tid = TypeId::of::<P>();
if !self.memory_pools.contains_key(&tid) {
diff --git a/stockton-skeleton/src/mem.rs b/stockton-skeleton/src/mem.rs
index af0a42b..85bf295 100644
--- a/stockton-skeleton/src/mem.rs
+++ b/stockton-skeleton/src/mem.rs
@@ -92,15 +92,15 @@ mod rendy {
/// So we can use rendy blocks as our blocks
impl<T: RBlock<back::Backend>> Block for T {
fn properties(&self) -> Properties {
- <T as RBlock<back::Backend>>::properties(&self)
+ <T as RBlock<back::Backend>>::properties(self)
}
fn memory(&self) -> &MemoryT {
- <T as RBlock<back::Backend>>::memory(&self)
+ <T as RBlock<back::Backend>>::memory(self)
}
fn range(&self) -> Range<u64> {
- <T as RBlock<back::Backend>>::range(&self)
+ <T as RBlock<back::Backend>>::range(self)
}
}
@@ -359,7 +359,8 @@ mod rendy {
}
fn unmap(&mut self, device: &mut DeviceT) -> Result<()> {
- Ok(self.0.unmap(device))
+ self.0.unmap(device);
+ Ok(())
}
}
}
diff --git a/stockton-skeleton/src/texture/load.rs b/stockton-skeleton/src/texture/load.rs
index 6cb4f4d..c4d3b72 100644
--- a/stockton-skeleton/src/texture/load.rs
+++ b/stockton-skeleton/src/texture/load.rs
@@ -61,19 +61,6 @@ pub struct QueuedLoad<TP: MemoryPool, SP: MemoryPool> {
pub staging_bufs: ArrayVec<[StagingBuffer<SP>; BLOCK_SIZE]>,
}
-impl<TP: MemoryPool, SP: MemoryPool> QueuedLoad<TP, SP> {
- /// Break down into a tuple
- pub fn dissolve(
- self,
- ) -> (
- (FenceT, CommandBufferT),
- ArrayVec<[StagingBuffer<SP>; BLOCK_SIZE]>,
- TexturesBlock<TP>,
- ) {
- ((self.fence, self.buf), self.staging_bufs, self.block)
- }
-}
-
/// Create a SampledImage for the given LoadableImage, and load the image data into a StagingBuffer
/// Note that this doesn't queue up transferring from the buffer to the image.
pub unsafe fn load_image<I, R, SP, TP>(
diff --git a/stockton-skeleton/src/texture/loader.rs b/stockton-skeleton/src/texture/loader.rs
index ea42c29..80d4a61 100644
--- a/stockton-skeleton/src/texture/loader.rs
+++ b/stockton-skeleton/src/texture/loader.rs
@@ -149,7 +149,11 @@ where
.context("Error checking fence status")?;
if signalled {
- let (assets, mut staging_bufs, block) = self.commands_queued.remove(i).dissolve();
+ let queued_load = self.commands_queued.remove(i);
+ let assets = (queued_load.fence, queued_load.buf);
+ let block = queued_load.block;
+ let mut staging_bufs = queued_load.staging_bufs;
+
debug!("Load finished for texture block {:?}", block.id);
// Lock staging memory pool
@@ -384,8 +388,8 @@ where
let (staging_buffer, img) = load_image(
&mut device,
- &mut self.staging_mempool,
- &mut self.tex_mempool,
+ &self.staging_mempool,
+ &self.tex_mempool,
self.optimal_buffer_copy_pitch_alignment,
img_data,
&self.config,
@@ -613,8 +617,10 @@ where
if signalled {
// Destroy finished ones
- let (assets, mut staging_bufs, block) =
- self.commands_queued.remove(i).dissolve();
+ let queued_load = self.commands_queued.remove(i);
+ let assets = (queued_load.fence, queued_load.buf);
+ let block = queued_load.block;
+ let mut staging_bufs = queued_load.staging_bufs;
device.destroy_fence(assets.0);
// Command buffer will be freed when we reset the command pool