diff options
author | tcmal <me@aria.rip> | 2024-08-25 17:44:23 +0100 |
---|---|---|
committer | tcmal <me@aria.rip> | 2024-08-25 17:44:23 +0100 |
commit | 399ae8ba45f40a54ec68026f77ca3c7c709ab62f (patch) | |
tree | 0ca721f8baf1cdf612c27822cd106343cad26155 /stockton-render/src/draw/texture/loader.rs | |
parent | d7d0f0277c437004ed476393542da39c84c5cf9f (diff) |
refactor(render): move functions from load into loader
Diffstat (limited to 'stockton-render/src/draw/texture/loader.rs')
-rw-r--r-- | stockton-render/src/draw/texture/loader.rs | 356 |
1 files changed, 334 insertions, 22 deletions
diff --git a/stockton-render/src/draw/texture/loader.rs b/stockton-render/src/draw/texture/loader.rs index a23d633..2198fe4 100644 --- a/stockton-render/src/draw/texture/loader.rs +++ b/stockton-render/src/draw/texture/loader.rs @@ -2,14 +2,17 @@ use super::{ block::{LoadedImage, TexturesBlock}, - load::{QueuedLoad, TextureLoadError}, + load::{load_image, QueuedLoad, TextureLoadError, LAYERS, RESOURCES}, + repo::BLOCK_SIZE, resolver::TextureResolver, - LoadableImage, + LoadableImage, PIXEL_SIZE, }; use crate::{draw::utils::find_memory_type_id, error::LockPoisoned, types::*}; use std::{ + array::IntoIter, collections::VecDeque, + iter::{empty, once}, marker::PhantomData, mem::{drop, ManuallyDrop}, sync::{ @@ -23,12 +26,17 @@ use std::{ use anyhow::{Context, Result}; use arrayvec::ArrayVec; use hal::{ - format::Format, - memory::{Properties as MemProps, SparseFlags}, + command::{BufferImageCopy, CommandBufferFlags}, + format::{Aspects, Format}, + image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange}, + memory::{Barrier, Dependencies, Properties as MemProps, SparseFlags}, + pso::{Descriptor, DescriptorSetWrite, ImageDescriptorType, PipelineStage, ShaderStageFlags}, queue::family::QueueFamilyId, MemoryTypeId, }; +use image::{Rgba, RgbaImage}; use log::*; +use rendy_descriptor::{DescriptorRanges, DescriptorSetLayoutBinding, DescriptorType}; use rendy_memory::DynamicConfig; use stockton_levels::prelude::HasTextures; use thiserror::Error; @@ -43,54 +51,54 @@ pub type BlockRef = usize; /// This is expected to load the textures, then send the loaded blocks back pub struct TextureLoader<T, R, I> { /// Blocks for which commands have been queued and are done loading once the fence is triggered. - pub(crate) commands_queued: ArrayVec<[QueuedLoad<DynamicBlock>; NUM_SIMULTANEOUS_CMDS]>, + commands_queued: ArrayVec<[QueuedLoad<DynamicBlock>; NUM_SIMULTANEOUS_CMDS]>, /// The command buffers used and a fence to go with them - pub(crate) buffers: VecDeque<(FenceT, CommandBufferT)>, + buffers: VecDeque<(FenceT, CommandBufferT)>, /// The command pool buffers were allocated from - pub(crate) pool: ManuallyDrop<CommandPoolT>, + pool: ManuallyDrop<CommandPoolT>, /// The GPU we're submitting to - pub(crate) device: Arc<RwLock<DeviceT>>, + device: Arc<RwLock<DeviceT>>, /// The command queue being used - pub(crate) queue: Arc<RwLock<QueueT>>, + queue: Arc<RwLock<QueueT>>, /// The memory allocator being used for textures - pub(crate) tex_allocator: ManuallyDrop<DynamicAllocator>, + tex_allocator: ManuallyDrop<DynamicAllocator>, /// The memory allocator for staging memory - pub(crate) staging_allocator: ManuallyDrop<DynamicAllocator>, + staging_allocator: ManuallyDrop<DynamicAllocator>, /// Allocator for descriptor sets - pub(crate) descriptor_allocator: ManuallyDrop<DescriptorAllocator>, + descriptor_allocator: ManuallyDrop<DescriptorAllocator>, - pub(crate) ds_layout: Arc<RwLock<DescriptorSetLayoutT>>, + ds_layout: Arc<RwLock<DescriptorSetLayoutT>>, /// Type ID for staging memory - pub(crate) staging_memory_type: MemoryTypeId, + staging_memory_type: MemoryTypeId, /// From adapter, used for determining alignment - pub(crate) optimal_buffer_copy_pitch_alignment: hal::buffer::Offset, + optimal_buffer_copy_pitch_alignment: hal::buffer::Offset, /// The textures lump to get info from - pub(crate) textures: Arc<RwLock<T>>, + textures: Arc<RwLock<T>>, /// The resolver which gets image data for a given texture. - pub(crate) resolver: R, + resolver: R, /// The channel requests come in. /// Requests should reference a texture **block**, for example textures 8..16 is block 1. - pub(crate) request_channel: Receiver<LoaderRequest>, + request_channel: Receiver<LoaderRequest>, /// The channel blocks are returned to. - pub(crate) return_channel: Sender<TexturesBlock<DynamicBlock>>, + return_channel: Sender<TexturesBlock<DynamicBlock>>, /// A filler image for descriptors that aren't needed but still need to be written to - pub(crate) blank_image: ManuallyDrop<LoadedImage<DynamicBlock>>, + blank_image: ManuallyDrop<LoadedImage<DynamicBlock>>, - pub(crate) _li: PhantomData<I>, + _li: PhantomData<I>, } #[derive(Error, Debug)] @@ -100,7 +108,7 @@ pub enum TextureLoaderError { } impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R, I> { - pub fn loop_forever(mut self) -> Result<TextureLoaderRemains> { + pub fn loop_until_exit(mut self) -> Result<TextureLoaderRemains> { debug!("TextureLoader starting main loop"); let mut res = Ok(false); while res.is_ok() { @@ -331,6 +339,310 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R }) } + unsafe fn attempt_queue_load(&mut self, block_ref: usize) -> Result<QueuedLoad<DynamicBlock>> { + let mut device = self + .device + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + + let textures = self + .textures + .read() + .map_err(|_| LockPoisoned::Map) + .context("Error getting map lock")?; + + // Get assets to use + let (mut fence, mut buf) = self + .buffers + .pop_front() + .ok_or(TextureLoadError::NoResources) + .context("Error getting resources to use")?; + + // Create descriptor set + let mut descriptor_set = { + let mut v: ArrayVec<[RDescriptorSet; 1]> = ArrayVec::new(); + self.descriptor_allocator + .allocate( + &device, + &*self.ds_layout.read().unwrap(), + DescriptorRanges::from_bindings(&[ + DescriptorSetLayoutBinding { + binding: 0, + ty: DescriptorType::Image { + ty: ImageDescriptorType::Sampled { + with_sampler: false, + }, + }, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + DescriptorSetLayoutBinding { + binding: 1, + ty: DescriptorType::Sampler, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + ]), + 1, + &mut v, + ) + .context("Error creating descriptor set")?; + + v.pop().unwrap() + }; + + // Get a command buffer + buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT); + + let mut imgs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new(); + let mut staging_bufs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new(); + + // For each texture in block + for tex_idx in (block_ref * BLOCK_SIZE)..(block_ref + 1) * BLOCK_SIZE { + // Get texture and Resolve image + let tex = textures.get_texture(tex_idx as u32); + if tex.is_none() { + // Write a blank descriptor + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 0, + array_offset: tex_idx % BLOCK_SIZE, + descriptors: once(Descriptor::Image( + &*self.blank_image.img_view, + Layout::ShaderReadOnlyOptimal, + )), + }); + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 1, + array_offset: tex_idx % BLOCK_SIZE, + descriptors: once(Descriptor::Sampler(&*self.blank_image.sampler)), + }); + + continue; + } + + let tex = tex.unwrap(); + + let img_data = self + .resolver + .resolve(tex) + .ok_or(TextureLoadError::ResolveFailed(tex_idx))?; + let array_offset = tex_idx % BLOCK_SIZE; + + let (staging_buffer, img) = load_image( + &mut device, + &mut self.staging_allocator, + &mut self.tex_allocator, + self.staging_memory_type, + self.optimal_buffer_copy_pitch_alignment, + img_data, + )?; + + // Write to descriptor set + { + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 0, + array_offset, + descriptors: once(Descriptor::Image( + &*img.img_view, + Layout::ShaderReadOnlyOptimal, + )), + }); + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 1, + array_offset, + descriptors: once(Descriptor::Sampler(&*img.sampler)), + }); + } + + imgs.push(img); + + staging_bufs.push(staging_buffer); + } + + // Add start pipeline barrier + buf.pipeline_barrier( + PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER, + Dependencies::empty(), + imgs.iter().map(|li| Barrier::Image { + states: (Access::empty(), Layout::Undefined) + ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal), + target: &*li.img, + families: None, + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: None, + layer_start: 0, + layer_count: None, + }, + }), + ); + + // Record copy commands + for (li, sb) in imgs.iter().zip(staging_bufs.iter()) { + buf.copy_buffer_to_image( + &*sb.buf, + &*li.img, + Layout::TransferDstOptimal, + once(BufferImageCopy { + buffer_offset: 0, + buffer_width: (li.row_size / super::PIXEL_SIZE) as u32, + buffer_height: li.height, + image_layers: SubresourceLayers { + aspects: Aspects::COLOR, + level: 0, + layers: 0..1, + }, + image_offset: Offset { x: 0, y: 0, z: 0 }, + image_extent: gfx_hal::image::Extent { + width: li.width, + height: li.height, + depth: 1, + }, + }), + ); + } + buf.pipeline_barrier( + PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE, + Dependencies::empty(), + imgs.iter().map(|li| Barrier::Image { + states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal) + ..(Access::empty(), Layout::ShaderReadOnlyOptimal), + target: &*li.img, + families: None, + range: RESOURCES, + }), + ); + + buf.finish(); + + // Submit command buffer + { + let mut queue = self.queue.write().map_err(|_| LockPoisoned::Queue)?; + + queue.submit(IntoIter::new([&buf]), empty(), empty(), Some(&mut fence)); + } + + Ok(QueuedLoad { + staging_bufs, + fence, + buf, + block: TexturesBlock { + id: block_ref, + imgs, + descriptor_set: ManuallyDrop::new(descriptor_set), + }, + }) + } + + unsafe fn get_blank_image( + device: &mut DeviceT, + buf: &mut CommandBufferT, + queue_lock: &Arc<RwLock<QueueT>>, + staging_allocator: &mut DynamicAllocator, + tex_allocator: &mut DynamicAllocator, + staging_memory_type: MemoryTypeId, + obcpa: u64, + ) -> Result<LoadedImage<DynamicBlock>> { + let img_data = RgbaImage::from_pixel(1, 1, Rgba([0, 0, 0, 0])); + + let height = img_data.height(); + let width = img_data.width(); + let row_alignment_mask = obcpa as u32 - 1; + let initial_row_size = PIXEL_SIZE * img_data.width() as usize; + let row_size = + ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize; + + let (staging_buffer, img) = load_image( + device, + staging_allocator, + tex_allocator, + staging_memory_type, + obcpa, + img_data, + )?; + + buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT); + + buf.pipeline_barrier( + PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::empty(), Layout::Undefined) + ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal), + target: &*img.img, + families: None, + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: None, + layer_start: 0, + layer_count: None, + }, + }), + ); + buf.copy_buffer_to_image( + &*staging_buffer.buf, + &*img.img, + Layout::TransferDstOptimal, + once(BufferImageCopy { + buffer_offset: 0, + buffer_width: (row_size / super::PIXEL_SIZE) as u32, + buffer_height: height, + image_layers: LAYERS, + image_offset: Offset { x: 0, y: 0, z: 0 }, + image_extent: Extent { + width: width, + height: height, + depth: 1, + }, + }), + ); + + buf.pipeline_barrier( + PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal) + ..(Access::empty(), Layout::ShaderReadOnlyOptimal), + target: &*img.img, + families: None, + range: RESOURCES, + }), + ); + buf.finish(); + + let mut fence = device.create_fence(false).context("Error creating fence")?; + + { + let mut queue = queue_lock.write().map_err(|_| LockPoisoned::Queue)?; + + queue.submit( + IntoIter::new([buf as &CommandBufferT]), + empty(), + empty(), + Some(&mut fence), + ); + } + + device + .wait_for_fence(&fence, std::u64::MAX) + .context("Error waiting for copy")?; + + device.destroy_fence(fence); + + staging_buffer.deactivate(device, staging_allocator); + + Ok(img) + } + /// Safely destroy all the vulkan stuff in this instance /// Note that this returns the memory allocators, from which should be freed any TextureBlocks /// All in-progress things are sent to return_channel. |