diff options
author | tcmal <me@aria.rip> | 2024-08-25 17:44:23 +0100 |
---|---|---|
committer | tcmal <me@aria.rip> | 2024-08-25 17:44:23 +0100 |
commit | 0353181306702c40ad0fe482b5c2b159b46794a4 (patch) | |
tree | 33acc6a9e8ea4705884cf93b78cf869008f71832 /stockton-skeleton | |
parent | 664f0b0777ba96298b29f0c753d52a81cbb233f1 (diff) |
refactor(all): rename some crates
Diffstat (limited to 'stockton-skeleton')
27 files changed, 3331 insertions, 0 deletions
diff --git a/stockton-skeleton/Cargo.toml b/stockton-skeleton/Cargo.toml new file mode 100644 index 0000000..8e3df4b --- /dev/null +++ b/stockton-skeleton/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "stockton-skeleton" +version = "0.1.0" +authors = ["Oscar <oscar.shrimpton.personal@gmail.com>"] +edition = "2018" + +[dependencies] +stockton-input = { path = "../stockton-input" } +stockton-levels = { path = "../stockton-levels" } +stockton-types = { path = "../stockton-types" } +winit = "^0.21" +gfx-hal = "^0.8.0" +arrayvec = "0.4.10" +nalgebra-glm = "^0.6" +shaderc = "^0.7" +log = "0.4.0" +image = "0.23.11" +legion = { version = "^0.3" } +rendy-memory = { path = "../rendy-memory" } +rendy-descriptor = { path = "../rendy-descriptor" } +anyhow = "1.0.40" +thiserror = "1.0.25" +derive_builder = "0.10.2" + +[features] +default = ["vulkan"] +vulkan = ["gfx-backend-vulkan"] + +[dependencies.gfx-backend-vulkan] +version = "^0.8.0" +optional = true diff --git a/stockton-skeleton/src/buffers/dedicated_image.rs b/stockton-skeleton/src/buffers/dedicated_image.rs new file mode 100644 index 0000000..bf49a38 --- /dev/null +++ b/stockton-skeleton/src/buffers/dedicated_image.rs @@ -0,0 +1,134 @@ +//! A dedicated image. Used for depth buffers. + +use crate::texture::PIXEL_SIZE; +use crate::types::*; + +use std::mem::ManuallyDrop; + +use anyhow::{Context, Result}; +use hal::{ + format::{Format, Swizzle}, + image::{SubresourceRange, Usage, Usage as ImgUsage, ViewKind}, + memory, + memory::Properties, + MemoryTypeId, +}; +use thiserror::Error; + +/// Holds an image that's loaded into GPU memory dedicated only to that image, bypassing the memory allocator. +pub struct DedicatedLoadedImage { + /// The GPU Image handle + image: ManuallyDrop<ImageT>, + + /// The full view of the image + pub image_view: ManuallyDrop<ImageViewT>, + + /// The memory backing the image + memory: ManuallyDrop<MemoryT>, +} + +#[derive(Debug, Error)] +pub enum ImageLoadError { + #[error("No suitable memory type for image memory")] + NoMemoryTypes, +} + +impl DedicatedLoadedImage { + pub fn new( + device: &mut DeviceT, + adapter: &Adapter, + format: Format, + usage: Usage, + resources: SubresourceRange, + width: usize, + height: usize, + ) -> Result<DedicatedLoadedImage> { + let (memory, image_ref) = { + // Round up the size to align properly + let initial_row_size = PIXEL_SIZE * width; + let limits = adapter.physical_device.properties().limits; + let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1; + + let row_size = + ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize; + debug_assert!(row_size as usize >= initial_row_size); + + // Make the image + let mut image_ref = unsafe { + use hal::image::{Kind, Tiling, ViewCapabilities}; + + device.create_image( + Kind::D2(width as u32, height as u32, 1, 1), + 1, + format, + Tiling::Optimal, + usage, + memory::SparseFlags::empty(), + ViewCapabilities::empty(), + ) + } + .context("Error creating image")?; + + // Allocate memory + let memory = unsafe { + let requirements = device.get_image_requirements(&image_ref); + + let memory_type_id = adapter + .physical_device + .memory_properties() + .memory_types + .iter() + .enumerate() + .find(|&(id, memory_type)| { + requirements.type_mask & (1 << id) != 0 + && memory_type.properties.contains(Properties::DEVICE_LOCAL) + }) + .map(|(id, _)| MemoryTypeId(id)) + .ok_or(ImageLoadError::NoMemoryTypes)?; + + let memory = device + .allocate_memory(memory_type_id, requirements.size) + .context("Error allocating memory for image")?; + + device + .bind_image_memory(&memory, 0, &mut image_ref) + .context("Error binding memory to image")?; + + memory + }; + + (memory, image_ref) + }; + + // Create ImageView and sampler + let image_view = unsafe { + device.create_image_view( + &image_ref, + ViewKind::D2, + format, + Swizzle::NO, + ImgUsage::DEPTH_STENCIL_ATTACHMENT, + resources, + ) + } + .context("Error creating image view")?; + + Ok(DedicatedLoadedImage { + image: ManuallyDrop::new(image_ref), + image_view: ManuallyDrop::new(image_view), + memory: ManuallyDrop::new(memory), + }) + } + + /// Properly frees/destroys all the objects in this struct + /// Dropping without doing this is a bad idea + pub fn deactivate(self, device: &mut DeviceT) { + unsafe { + use core::ptr::read; + + device.destroy_image_view(ManuallyDrop::into_inner(read(&self.image_view))); + device.destroy_image(ManuallyDrop::into_inner(read(&self.image))); + device.free_memory(ManuallyDrop::into_inner(read(&self.memory))); + } + } +} diff --git a/stockton-skeleton/src/buffers/draw_buffers.rs b/stockton-skeleton/src/buffers/draw_buffers.rs new file mode 100644 index 0000000..5baec92 --- /dev/null +++ b/stockton-skeleton/src/buffers/draw_buffers.rs @@ -0,0 +1,43 @@ +//! A vertex and index buffer set for drawing + +use super::StagedBuffer; +use crate::types::*; + +use anyhow::{Context, Result}; +use hal::buffer::Usage; +use std::mem::ManuallyDrop; + +/// Initial size of vertex buffer. TODO: Way of overriding this +pub const INITIAL_VERT_SIZE: u64 = 3 * 3000; + +/// Initial size of index buffer. TODO: Way of overriding this +pub const INITIAL_INDEX_SIZE: u64 = 3000; + +/// The buffers used for drawing, ie index and vertex buffer +pub struct DrawBuffers<'a, T: Sized> { + pub vertex_buffer: ManuallyDrop<StagedBuffer<'a, T>>, + pub index_buffer: ManuallyDrop<StagedBuffer<'a, (u16, u16, u16)>>, +} + +impl<'a, T> DrawBuffers<'a, T> { + pub fn new(device: &mut DeviceT, adapter: &Adapter) -> Result<DrawBuffers<'a, T>> { + let vert = StagedBuffer::new(device, adapter, Usage::VERTEX, INITIAL_VERT_SIZE) + .context("Error creating vertex buffer")?; + let index = StagedBuffer::new(device, adapter, Usage::INDEX, INITIAL_INDEX_SIZE) + .context("Error creating index buffer")?; + + Ok(DrawBuffers { + vertex_buffer: ManuallyDrop::new(vert), + index_buffer: ManuallyDrop::new(index), + }) + } + + pub fn deactivate(self, device: &mut DeviceT) { + unsafe { + use core::ptr::read; + + ManuallyDrop::into_inner(read(&self.vertex_buffer)).deactivate(device); + ManuallyDrop::into_inner(read(&self.index_buffer)).deactivate(device); + } + } +} diff --git a/stockton-skeleton/src/buffers/mod.rs b/stockton-skeleton/src/buffers/mod.rs new file mode 100644 index 0000000..74c5aab --- /dev/null +++ b/stockton-skeleton/src/buffers/mod.rs @@ -0,0 +1,63 @@ +//! All sorts of buffers + +use std::ops::IndexMut; + +use crate::{error::EnvironmentError, types::*}; + +use anyhow::{Context, Result}; +use hal::{ + buffer::Usage, + memory::{Properties, SparseFlags}, + MemoryTypeId, +}; + +mod dedicated_image; +mod draw_buffers; +mod staged; + +pub use dedicated_image::*; +pub use draw_buffers::*; +pub use staged::*; + +/// Create a buffer of the given specifications, allocating more device memory. +// TODO: Use a different memory allocator? +pub(crate) fn create_buffer( + device: &mut DeviceT, + adapter: &Adapter, + usage: Usage, + properties: Properties, + size: u64, +) -> Result<(BufferT, MemoryT)> { + let mut buffer = unsafe { device.create_buffer(size, usage, SparseFlags::empty()) } + .context("Error creating buffer")?; + + let requirements = unsafe { device.get_buffer_requirements(&buffer) }; + let memory_type_id = adapter + .physical_device + .memory_properties() + .memory_types + .iter() + .enumerate() + .find(|&(id, memory_type)| { + requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(properties) + }) + .map(|(id, _)| MemoryTypeId(id)) + .ok_or(EnvironmentError::NoMemoryTypes)?; + + let memory = unsafe { device.allocate_memory(memory_type_id, requirements.size) } + .context("Error allocating memory")?; + + unsafe { device.bind_buffer_memory(&memory, 0, &mut buffer) } + .context("Error binding memory to buffer")?; + + Ok((buffer, memory)) +} + +/// A buffer that can be modified by the CPU +pub trait ModifiableBuffer: IndexMut<usize> { + /// Get a handle to the underlying GPU buffer + fn get_buffer(&mut self) -> &BufferT; + + /// Record the command(s) required to commit changes to this buffer to the given command buffer. + fn record_commit_cmds(&mut self, cmd_buffer: &mut CommandBufferT) -> Result<()>; +} diff --git a/stockton-skeleton/src/buffers/staged.rs b/stockton-skeleton/src/buffers/staged.rs new file mode 100644 index 0000000..71b5204 --- /dev/null +++ b/stockton-skeleton/src/buffers/staged.rs @@ -0,0 +1,142 @@ +//! A buffer that can be written to by the CPU using staging memory + +use super::{create_buffer, ModifiableBuffer}; +use crate::types::*; + +use core::mem::{size_of, ManuallyDrop}; +use std::{ + convert::TryInto, + ops::{Index, IndexMut}, +}; + +use anyhow::{Context, Result}; +use hal::{ + buffer::Usage, + command::BufferCopy, + memory::{Properties, Segment}, +}; + +/// A GPU buffer that is written to using a staging buffer +pub struct StagedBuffer<'a, T: Sized> { + /// CPU-visible buffer + staged_buffer: ManuallyDrop<BufferT>, + + /// CPU-visible memory + staged_memory: ManuallyDrop<MemoryT>, + + /// GPU Buffer + buffer: ManuallyDrop<BufferT>, + + /// GPU Memory + memory: ManuallyDrop<MemoryT>, + + /// Where staged buffer is mapped in CPU memory + staged_mapped_memory: &'a mut [T], + + /// The highest index in the buffer that's been written to. + pub highest_used: usize, +} + +impl<'a, T: Sized> StagedBuffer<'a, T> { + /// size is the size in T + pub fn new(device: &mut DeviceT, adapter: &Adapter, usage: Usage, size: u64) -> Result<Self> { + // Convert size to bytes + let size_bytes = size * size_of::<T>() as u64; + + // Get CPU-visible buffer + let (staged_buffer, mut staged_memory) = create_buffer( + device, + adapter, + Usage::TRANSFER_SRC, + Properties::CPU_VISIBLE, + size_bytes, + ) + .context("Error creating staging buffer")?; + + // Get GPU Buffer + let (buffer, memory) = create_buffer( + device, + adapter, + Usage::TRANSFER_DST | usage, + Properties::DEVICE_LOCAL | Properties::COHERENT, + size_bytes, + ) + .context("Error creating GPU buffer")?; + + // Map it somewhere and get a slice to that memory + let staged_mapped_memory = unsafe { + let ptr = device + .map_memory( + &mut staged_memory, + Segment { + offset: 0, + size: Some(size_bytes), + }, + ) + .context("Error mapping staged memory")?; + + std::slice::from_raw_parts_mut(ptr as *mut T, size.try_into()?) + }; + + Ok(StagedBuffer { + staged_buffer: ManuallyDrop::new(staged_buffer), + staged_memory: ManuallyDrop::new(staged_memory), + buffer: ManuallyDrop::new(buffer), + memory: ManuallyDrop::new(memory), + staged_mapped_memory, + highest_used: 0, + }) + } + + /// Call this before dropping + pub(crate) fn deactivate(mut self, device: &mut DeviceT) { + unsafe { + device.unmap_memory(&mut self.staged_memory); + + device.free_memory(ManuallyDrop::take(&mut self.staged_memory)); + device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer)); + + device.free_memory(ManuallyDrop::take(&mut self.memory)); + device.destroy_buffer(ManuallyDrop::take(&mut self.buffer)); + }; + } +} + +impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> { + fn get_buffer(&mut self) -> &BufferT { + &self.buffer + } + + fn record_commit_cmds(&mut self, buf: &mut CommandBufferT) -> Result<()> { + unsafe { + buf.copy_buffer( + &self.staged_buffer, + &self.buffer, + std::iter::once(BufferCopy { + src: 0, + dst: 0, + size: ((self.highest_used + 1) * size_of::<T>()) as u64, + }), + ); + } + + Ok(()) + } +} + +impl<'a, T: Sized> Index<usize> for StagedBuffer<'a, T> { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self.staged_mapped_memory[index] + } +} + +impl<'a, T: Sized> IndexMut<usize> for StagedBuffer<'a, T> { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + if index > self.highest_used { + self.highest_used = index; + } + &mut self.staged_mapped_memory[index] + } +} diff --git a/stockton-skeleton/src/builders/mod.rs b/stockton-skeleton/src/builders/mod.rs new file mode 100644 index 0000000..97b47a0 --- /dev/null +++ b/stockton-skeleton/src/builders/mod.rs @@ -0,0 +1,7 @@ +mod pipeline; +mod renderpass; +mod shader; + +pub use pipeline::*; +pub use renderpass::*; +pub use shader::*; diff --git a/stockton-skeleton/src/builders/pipeline.rs b/stockton-skeleton/src/builders/pipeline.rs new file mode 100644 index 0000000..f68d9d6 --- /dev/null +++ b/stockton-skeleton/src/builders/pipeline.rs @@ -0,0 +1,288 @@ +use super::{renderpass::RenderpassSpec, shader::ShaderDesc}; +use crate::{error::EnvironmentError, target::SwapchainProperties, types::*}; + +use std::{mem::ManuallyDrop, ops::Range}; + +use anyhow::{Context, Result}; +use hal::{ + format::Format, + pso::{ + AttributeDesc, BakedStates, BasePipeline, BlendDesc, BufferIndex, DepthStencilDesc, + ElemStride, Element, GraphicsPipelineDesc, InputAssemblerDesc, PipelineCreationFlags, + PrimitiveAssemblerDesc, Rasterizer, Rect, ShaderStageFlags, VertexBufferDesc, + VertexInputRate, Viewport, + }, +}; +use shaderc::Compiler; + +pub struct VertexBufferSpec { + pub attributes: Vec<Format>, + pub rate: VertexInputRate, +} + +impl VertexBufferSpec { + pub fn as_attribute_desc(&self, binding: BufferIndex) -> Vec<AttributeDesc> { + let mut v = Vec::with_capacity(self.attributes.len()); + let mut offset = 0; + for (idx, format) in self.attributes.iter().enumerate() { + v.push(AttributeDesc { + location: idx as u32, + binding, + element: Element { + offset, + format: *format, + }, + }); + offset += get_size(*format); + } + + v + } + pub fn stride(&self) -> ElemStride { + self.attributes.iter().fold(0, |x, f| x + get_size(*f)) + } +} + +fn get_size(f: Format) -> u32 { + match f { + Format::Rgb32Sfloat => 4 * 3, + Format::R32Sint => 4, + Format::Rg32Sfloat => 4 * 2, + Format::Rgba32Sfloat => 4 * 4, + _ => unimplemented!("dont know size of format {:?}", f), + } +} + +#[derive(Debug, Clone)] +pub struct VertexPrimitiveAssemblerSpec { + buffers: Vec<VertexBufferDesc>, + attributes: Vec<AttributeDesc>, + input_assembler: InputAssemblerDesc, +} + +impl VertexPrimitiveAssemblerSpec { + pub fn with_buffer(&mut self, bd: VertexBufferSpec) -> &mut Self { + let idx = self.buffers.len() as u32; + self.buffers.push(VertexBufferDesc { + binding: idx, + stride: bd.stride(), + rate: bd.rate, + }); + + self.attributes.extend(bd.as_attribute_desc(idx)); + + self + } + + pub fn with_buffers(iad: InputAssemblerDesc, mut bds: Vec<VertexBufferSpec>) -> Self { + let mut this = VertexPrimitiveAssemblerSpec { + buffers: vec![], + attributes: vec![], + input_assembler: iad, + }; + + for bd in bds.drain(..) { + this.with_buffer(bd); + } + + this + } +} + +#[derive(Builder, Debug)] +#[builder(public)] +pub struct PipelineSpec { + rasterizer: Rasterizer, + depth_stencil: DepthStencilDesc, + blender: BlendDesc, + primitive_assembler: VertexPrimitiveAssemblerSpec, + + shader_vertex: ShaderDesc, + #[builder(setter(strip_option))] + shader_fragment: Option<ShaderDesc>, + #[builder(setter(strip_option), default)] + shader_geom: Option<ShaderDesc>, + #[builder(setter(strip_option), default)] + shader_tesselation: Option<(ShaderDesc, ShaderDesc)>, + + push_constants: Vec<(ShaderStageFlags, Range<u32>)>, + + #[builder(default = "false")] + dynamic_viewport: bool, + #[builder(default = "false")] + dynamic_scissor: bool, + + renderpass: RenderpassSpec, +} + +impl PipelineSpec { + pub fn build<'b, T: Iterator<Item = &'b DescriptorSetLayoutT> + std::fmt::Debug>( + self, + device: &mut DeviceT, + extent: hal::image::Extent, + _swapchain_properties: &SwapchainProperties, + set_layouts: T, + ) -> Result<CompletePipeline> { + // Renderpass + let renderpass = self.renderpass.build_renderpass(device)?; + + // Subpass + let subpass = hal::pass::Subpass { + index: 0, + main_pass: &renderpass, + }; + + let mut compiler = Compiler::new().ok_or(EnvironmentError::NoShaderC)?; + let (vs_module, fs_module, gm_module, ts_module) = { + ( + self.shader_vertex.compile(&mut compiler, device)?, + self.shader_fragment + .as_ref() + .map(|x| x.compile(&mut compiler, device)) + .transpose()?, + self.shader_geom + .as_ref() + .map(|x| x.compile(&mut compiler, device)) + .transpose()?, + self.shader_tesselation + .as_ref() + .map::<Result<_>, _>(|(a, b)| { + Ok(( + a.compile(&mut compiler, device)?, + b.compile(&mut compiler, device)?, + )) + }) + .transpose()?, + ) + }; + + // Safety: *_module is always populated when shader_* is, so this is safe + let (vs_entry, fs_entry, gm_entry, ts_entry) = ( + self.shader_vertex.as_entry(&vs_module), + self.shader_fragment + .as_ref() + .map(|x| x.as_entry(fs_module.as_ref().unwrap())), + self.shader_geom + .as_ref() + .map(|x| x.as_entry(gm_module.as_ref().unwrap())), + self.shader_tesselation.as_ref().map(|(a, b)| { + ( + a.as_entry(&ts_module.as_ref().unwrap().0), + b.as_entry(&ts_module.as_ref().unwrap().1), + ) + }), + ); + + // Pipeline layout + let layout = unsafe { + device.create_pipeline_layout(set_layouts.into_iter(), self.push_constants.into_iter()) + } + .context("Error creating pipeline layout")?; + + // Baked states + let baked_states = BakedStates { + viewport: match self.dynamic_viewport { + true => None, + false => Some(Viewport { + rect: extent.rect(), + depth: (0.0..1.0), + }), + }, + scissor: match self.dynamic_scissor { + true => None, + false => Some(extent.rect()), + }, + blend_constants: None, + depth_bounds: None, + }; + + // Primitive assembler + let primitive_assembler = PrimitiveAssemblerDesc::Vertex { + buffers: self.primitive_assembler.buffers.as_slice(), + attributes: self.primitive_assembler.attributes.as_slice(), + input_assembler: self.primitive_assembler.input_assembler, + vertex: vs_entry, + tessellation: ts_entry, + geometry: gm_entry, + }; + + // Pipeline description + let pipeline_desc = GraphicsPipelineDesc { + label: Some("stockton"), + rasterizer: self.rasterizer, + fragment: fs_entry, + blender: self.blender, + depth_stencil: self.depth_stencil, + multisampling: None, + baked_states, + layout: &layout, + subpass, + flags: PipelineCreationFlags::empty(), + parent: BasePipeline::None, + primitive_assembler, + }; + + // Pipeline + let pipeline = unsafe { device.create_graphics_pipeline(&pipeline_desc, None) } + .context("Error creating graphics pipeline")?; + + Ok(CompletePipeline { + renderpass: ManuallyDrop::new(renderpass), + pipeline_layout: ManuallyDrop::new(layout), + pipeline: ManuallyDrop::new(pipeline), + vs_module: ManuallyDrop::new(vs_module), + fs_module, + gm_module, + ts_module, + render_area: extent.rect(), + }) + } +} + +pub struct CompletePipeline { + /// Our main render pass + pub renderpass: ManuallyDrop<RenderPassT>, + + /// The layout of our main graphics pipeline + pub pipeline_layout: ManuallyDrop<PipelineLayoutT>, + + /// Our main graphics pipeline + pub pipeline: ManuallyDrop<GraphicsPipelineT>, + + /// The vertex shader module + pub vs_module: ManuallyDrop<ShaderModuleT>, + + /// The fragment shader module + pub fs_module: Option<ShaderModuleT>, + pub gm_module: Option<ShaderModuleT>, + pub ts_module: Option<(ShaderModuleT, ShaderModuleT)>, + + pub render_area: Rect, +} + +impl CompletePipeline { + /// Deactivate vulkan resources. Use before dropping + pub fn deactivate(mut self, device: &mut DeviceT) { + unsafe { + use core::ptr::read; + + device.destroy_render_pass(ManuallyDrop::into_inner(read(&self.renderpass))); + + device.destroy_shader_module(ManuallyDrop::into_inner(read(&self.vs_module))); + if let Some(x) = self.fs_module.take() { + device.destroy_shader_module(x) + } + if let Some(x) = self.gm_module.take() { + device.destroy_shader_module(x) + } + if let Some((a, b)) = self.ts_module.take() { + device.destroy_shader_module(a); + device.destroy_shader_module(b); + } + + device.destroy_graphics_pipeline(ManuallyDrop::into_inner(read(&self.pipeline))); + + device.destroy_pipeline_layout(ManuallyDrop::into_inner(read(&self.pipeline_layout))); + } + } +} diff --git a/stockton-skeleton/src/builders/renderpass.rs b/stockton-skeleton/src/builders/renderpass.rs new file mode 100644 index 0000000..43f0eb2 --- /dev/null +++ b/stockton-skeleton/src/builders/renderpass.rs @@ -0,0 +1,75 @@ +use crate::types::*; + +use std::iter::{empty, once}; + +use anyhow::Result; +use hal::pass::{Attachment, AttachmentRef, SubpassDesc}; + +#[derive(Debug, Clone)] +pub struct RenderpassSpec { + pub colors: Vec<Attachment>, + pub depth: Option<Attachment>, + pub inputs: Vec<Attachment>, + pub resolves: Vec<Attachment>, + pub preserves: Vec<Attachment>, +} + +impl RenderpassSpec { + pub fn build_renderpass(self, device: &mut DeviceT) -> Result<RenderPassT> { + let mut next_offset = 0; + + let colors: Vec<AttachmentRef> = self + .colors + .iter() + .enumerate() + .map(|(i, a)| (next_offset + i, a.layouts.end)) + .collect(); + next_offset = colors.len(); + + let depth_stencil = self.depth.as_ref().map(|x| (next_offset, x.layouts.end)); + if depth_stencil.is_some() { + next_offset += 1; + } + + let inputs: Vec<AttachmentRef> = self + .inputs + .iter() + .enumerate() + .map(|(i, a)| (next_offset + i, a.layouts.end)) + .collect(); + next_offset += inputs.len(); + + let resolves: Vec<AttachmentRef> = self + .resolves + .iter() + .enumerate() + .map(|(i, a)| (next_offset + i, a.layouts.end)) + .collect(); + next_offset += resolves.len(); + + let preserves: Vec<usize> = self + .preserves + .iter() + .enumerate() + .map(|(i, _a)| next_offset + i) + .collect(); + + let sp_desc = SubpassDesc { + colors: colors.as_slice(), + depth_stencil: depth_stencil.as_ref(), + inputs: inputs.as_slice(), + resolves: resolves.as_slice(), + preserves: preserves.as_slice(), + }; + + let all_attachments = self + .colors + .into_iter() + .chain(self.depth.into_iter()) + .chain(self.inputs.into_iter()) + .chain(self.resolves.into_iter()) + .chain(self.preserves.into_iter()); + + Ok(unsafe { device.create_render_pass(all_attachments, once(sp_desc), empty())? }) + } +} diff --git a/stockton-skeleton/src/builders/shader.rs b/stockton-skeleton/src/builders/shader.rs new file mode 100644 index 0000000..fde185d --- /dev/null +++ b/stockton-skeleton/src/builders/shader.rs @@ -0,0 +1,35 @@ +use crate::types::*; + +use anyhow::{Context, Result}; +use hal::pso::Specialization; +use shaderc::{Compiler, ShaderKind}; + +#[derive(Debug, Clone)] +pub struct ShaderDesc { + pub source: String, + pub entry: String, + pub kind: ShaderKind, +} + +impl ShaderDesc { + pub fn compile(&self, compiler: &mut Compiler, device: &mut DeviceT) -> Result<ShaderModuleT> { + let artifact = compiler + .compile_into_spirv(&self.source, self.kind, "shader", &self.entry, None) + .context("Shader compilation failed")?; + + // Make into shader module + Ok(unsafe { + device + .create_shader_module(artifact.as_binary()) + .context("Shader module creation failed")? + }) + } + + pub fn as_entry<'a>(&'a self, module: &'a ShaderModuleT) -> EntryPoint<'a> { + EntryPoint { + entry: &self.entry, + module, + specialization: Specialization::default(), + } + } +} diff --git a/stockton-skeleton/src/context.rs b/stockton-skeleton/src/context.rs new file mode 100644 index 0000000..802b8ca --- /dev/null +++ b/stockton-skeleton/src/context.rs @@ -0,0 +1,275 @@ +//! Deals with all the Vulkan/HAL details. +//! This relies on draw passes for the actual drawing logic. + +use std::{ + mem::ManuallyDrop, + ptr::read, + sync::{Arc, RwLock}, +}; + +use anyhow::{Context, Result}; +use hal::pool::CommandPoolCreateFlags; +use log::debug; + +use winit::window::Window; + +use super::{ + draw_passes::{DrawPass, IntoDrawPass}, + queue_negotiator::{DrawQueue, QueueNegotiator}, + target::{SwapchainProperties, TargetChain}, +}; +use crate::{ + error::{EnvironmentError, LockPoisoned}, + types::*, +}; + +use stockton_types::Session; + +/// Contains all the hal related stuff. +/// In the end, this takes in a depth-sorted list of faces and a map file and renders them. +// TODO: Settings for clear colour, buffer sizes, etc +pub struct RenderingContext { + // Parents for most of these things + /// Vulkan Instance + instance: ManuallyDrop<back::Instance>, + + /// Device we're using + device: Arc<RwLock<DeviceT>>, + + /// Adapter we're using + adapter: Adapter, + + /// Swapchain and stuff + target_chain: ManuallyDrop<TargetChain>, + + // Command pool and buffers + /// The command pool used for our buffers + cmd_pool: ManuallyDrop<CommandPoolT>, + + queue_negotiator: QueueNegotiator, + + /// The queue to use for drawing + queue: Arc<RwLock<QueueT>>, + + pixels_per_point: f32, +} + +impl RenderingContext { + /// Create a new RenderingContext for the given window. + pub fn new<IDP: IntoDrawPass<DP>, DP: DrawPass>(window: &Window) -> Result<Self> { + // Create surface + let (instance, surface, mut adapters) = unsafe { + let instance = + back::Instance::create("stockton", 1).context("Error creating vulkan instance")?; + let surface = instance + .create_surface(window) + .context("Error creating surface")?; + let adapters = instance.enumerate_adapters(); + + (instance, surface, adapters) + }; + + // TODO: Properly figure out which adapter to use + let adapter = adapters.remove(0); + + // Queue Negotiator + let mut queue_families_specs = Vec::new(); + let (mut queue_negotiator, surface) = { + let dq: DrawQueue = DrawQueue { surface }; + + let mut qn = QueueNegotiator::default(); + + // Draw Queue + qn.find(&adapter, &dq) + .context("Couldn't find draw queue family")?; + queue_families_specs.push( + qn.family_spec::<DrawQueue>(&adapter.queue_families, 1) + .context("Couldn't find draw queue family")?, + ); + + // Auxiliary queues for DP + queue_families_specs.extend( + IDP::find_aux_queues(&adapter, &mut qn) + .context("Level pass couldn't populate queue negotiator")?, + ); + + (qn, dq.surface) + }; + + // Device & Queue groups + let (device_lock, queue_groups) = { + // TODO: This sucks, but hal is restrictive on how we can pass this specific argument. + let queue_families_specs_real: Vec<_> = queue_families_specs + .iter() + .map(|(qf, ns)| (*qf, ns.as_slice())) + .collect(); + + let gpu = unsafe { + adapter + .physical_device + .open(queue_families_specs_real.as_slice(), hal::Features::empty()) + .context("Error opening logical device")? + }; + + (Arc::new(RwLock::new(gpu.device)), gpu.queue_groups) + }; + + queue_negotiator.set_queue_groups(queue_groups); + + // Figure out what our swapchain will look like + let swapchain_properties = SwapchainProperties::find_best(&adapter, &surface) + .context("Error getting properties for swapchain")?; + + // Lock device + let mut device = device_lock + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + + debug!("Detected swapchain properties: {:?}", swapchain_properties); + + // Command pool + let mut cmd_pool = unsafe { + device.create_command_pool( + queue_negotiator + .family::<DrawQueue>() + .ok_or(EnvironmentError::NoSuitableFamilies)?, + CommandPoolCreateFlags::RESET_INDIVIDUAL, + ) + } + .context("Error creating draw command pool")?; + + // Swapchain and associated resources + let target_chain = TargetChain::new( + &mut device, + &adapter, + surface, + &mut cmd_pool, + swapchain_properties, + ) + .context("Error creating target chain")?; + + // Unlock device + drop(device); + + let queue = queue_negotiator + .get_queue::<DrawQueue>() + .ok_or(EnvironmentError::NoQueues) + .context("Error getting draw queue")?; + + Ok(RenderingContext { + instance: ManuallyDrop::new(instance), + + device: device_lock, + adapter, + + queue_negotiator, + queue, + + target_chain: ManuallyDrop::new(target_chain), + cmd_pool: ManuallyDrop::new(cmd_pool), + + // pixels_per_point: window.scale_factor() as f32, + pixels_per_point: window.scale_factor() as f32, + }) + } + + /// If this function fails the whole context is probably dead + /// # Safety + /// The context must not be used while this is being called + pub unsafe fn handle_surface_change(&mut self) -> Result<()> { + let mut device = self + .device + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + + device + .wait_idle() + .context("Error waiting for device to become idle")?; + + let surface = ManuallyDrop::into_inner(read(&self.target_chain)) + .deactivate_with_recyling(&mut device, &mut self.cmd_pool); + + let properties = SwapchainProperties::find_best(&self.adapter, &surface) + .context("Error finding best swapchain properties")?; + + self.target_chain = ManuallyDrop::new( + TargetChain::new( + &mut device, + &self.adapter, + surface, + &mut self.cmd_pool, + properties, + ) + .context("Error creating target chain")?, + ); + Ok(()) + } + + /// Draw onto the next frame of the swapchain + pub fn draw_next_frame<DP: DrawPass>(&mut self, session: &Session, dp: &mut DP) -> Result<()> { + let mut device = self + .device + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + let mut queue = self + .queue + .write() + .map_err(|_| LockPoisoned::Queue) + .context("Error getting draw queue lock")?; + + // Level draw pass + self.target_chain + .do_draw_with(&mut device, &mut queue, dp, session) + .context("Error preparing next target")?; + + Ok(()) + } + + /// Get a reference to the rendering context's pixels per point. + pub fn pixels_per_point(&self) -> f32 { + self.pixels_per_point + } + + /// Get a reference to the rendering context's device. + pub fn device(&self) -> &Arc<RwLock<DeviceT>> { + &self.device + } + + /// Get a reference to the rendering context's target chain. + pub fn target_chain(&self) -> &TargetChain { + &self.target_chain + } + + /// Get a reference to the rendering context's adapter. + pub fn adapter(&self) -> &Adapter { + &self.adapter + } + + /// Get a mutable reference to the rendering context's queue negotiator. + pub fn queue_negotiator_mut(&mut self) -> &mut QueueNegotiator { + &mut self.queue_negotiator + } +} + +impl core::ops::Drop for RenderingContext { + fn drop(&mut self) { + { + self.device.write().unwrap().wait_idle().unwrap(); + } + + unsafe { + let mut device = self.device.write().unwrap(); + + ManuallyDrop::into_inner(read(&self.target_chain)).deactivate( + &mut self.instance, + &mut device, + &mut self.cmd_pool, + ); + + device.destroy_command_pool(ManuallyDrop::into_inner(read(&self.cmd_pool))); + } + } +} diff --git a/stockton-skeleton/src/draw_passes/cons.rs b/stockton-skeleton/src/draw_passes/cons.rs new file mode 100644 index 0000000..ad94b1c --- /dev/null +++ b/stockton-skeleton/src/draw_passes/cons.rs @@ -0,0 +1,66 @@ +//! Code for using multiple draw passes in place of just one +//! Note that this can be extended to an arbitrary amount of draw passes. + +use super::{DrawPass, IntoDrawPass}; +use crate::{context::RenderingContext, queue_negotiator::QueueNegotiator, types::*}; +use stockton_types::Session; + +use anyhow::Result; + +/// One draw pass, then another. +pub struct ConsDrawPass<A: DrawPass, B: DrawPass> { + pub a: A, + pub b: B, +} + +impl<A: DrawPass, B: DrawPass> DrawPass for ConsDrawPass<A, B> { + fn queue_draw( + &mut self, + session: &Session, + img_view: &ImageViewT, + cmd_buffer: &mut CommandBufferT, + ) -> Result<()> { + self.a.queue_draw(session, img_view, cmd_buffer)?; + self.b.queue_draw(session, img_view, cmd_buffer)?; + + Ok(()) + } + + fn deactivate(self, context: &mut RenderingContext) -> Result<()> { + self.a.deactivate(context)?; + self.b.deactivate(context) + } + + fn handle_surface_change( + &mut self, + session: &Session, + context: &mut RenderingContext, + ) -> Result<()> { + self.a.handle_surface_change(session, context)?; + self.b.handle_surface_change(session, context) + } +} + +impl<A: DrawPass, B: DrawPass, IA: IntoDrawPass<A>, IB: IntoDrawPass<B>> + IntoDrawPass<ConsDrawPass<A, B>> for (IA, IB) +{ + fn init( + self, + session: &mut Session, + context: &mut RenderingContext, + ) -> Result<ConsDrawPass<A, B>> { + Ok(ConsDrawPass { + a: self.0.init(session, context)?, + b: self.1.init(session, context)?, + }) + } + + fn find_aux_queues<'a>( + adapter: &'a Adapter, + queue_negotiator: &mut QueueNegotiator, + ) -> Result<Vec<(&'a QueueFamilyT, Vec<f32>)>> { + let mut v = IA::find_aux_queues(adapter, queue_negotiator)?; + v.extend(IB::find_aux_queues(adapter, queue_negotiator)?); + Ok(v) + } +} diff --git a/stockton-skeleton/src/draw_passes/mod.rs b/stockton-skeleton/src/draw_passes/mod.rs new file mode 100644 index 0000000..a0dbba5 --- /dev/null +++ b/stockton-skeleton/src/draw_passes/mod.rs @@ -0,0 +1,47 @@ +//! Traits and common draw passes. +use super::{queue_negotiator::QueueNegotiator, RenderingContext}; +use crate::types::*; +use stockton_types::Session; + +use anyhow::Result; + +mod cons; +pub mod util; + +pub use cons::ConsDrawPass; + +/// One of several 'passes' that draw on each frame. +pub trait DrawPass { + /// Queue any necessary draw commands to cmd_buffer + /// This should assume the command buffer isn't in the middle of a renderpass, and should leave it as such. + fn queue_draw( + &mut self, + session: &Session, + img_view: &ImageViewT, + cmd_buffer: &mut CommandBufferT, + ) -> Result<()>; + + /// Called just after the surface changes (probably a resize). + fn handle_surface_change( + &mut self, + session: &Session, + context: &mut RenderingContext, + ) -> Result<()>; + + /// Deactivate any vulkan parts that need to be deactivated + fn deactivate(self, context: &mut RenderingContext) -> Result<()>; +} + +/// A type that can be made into a specific draw pass type. +/// This allows extra data to be used in initialisation without the Renderer needing to worry about it. +pub trait IntoDrawPass<T: DrawPass> { + fn init(self, session: &mut Session, context: &mut RenderingContext) -> Result<T>; + + /// This function should ask the queue negotatior to find families for any auxilary operations this draw pass needs to perform + /// For example, .find(&TexLoadQueue) + /// It should return then call .family_spec for each queue type negotiated and return the results. + fn find_aux_queues<'a>( + adapter: &'a Adapter, + queue_negotiator: &mut QueueNegotiator, + ) -> Result<Vec<(&'a QueueFamilyT, Vec<f32>)>>; +} diff --git a/stockton-skeleton/src/draw_passes/util.rs b/stockton-skeleton/src/draw_passes/util.rs new file mode 100644 index 0000000..7e82209 --- /dev/null +++ b/stockton-skeleton/src/draw_passes/util.rs @@ -0,0 +1,41 @@ +//! Utility structs & functions + +use anyhow::Result; + +/// Keeps a given resource for each swapchain image +pub struct TargetSpecificResources<T> { + elements: Vec<T>, + next_idx: usize, +} + +impl<T> TargetSpecificResources<T> { + /// Create a new set of resources, given a function to generate them and the count + /// In most cases, count should be swapchain_properties.image_count + pub fn new<F>(mut generator: F, count: usize) -> Result<Self> + where + F: FnMut() -> Result<T>, + { + let mut elements = Vec::with_capacity(count); + for _ in 0..count { + elements.push(generator()?); + } + + Ok(TargetSpecificResources { + elements, + next_idx: 0, + }) + } + + /// Get the next resource, wrapping around if necessary. + pub fn get_next(&mut self) -> &T { + let el = &self.elements[self.next_idx]; + self.next_idx = (self.next_idx + 1) % self.elements.len(); + el + } + + /// Dissolve the resource set, returning an iterator over each item. + /// In most cases, each item will need deactivated. + pub fn dissolve(self) -> impl Iterator<Item = T> { + self.elements.into_iter() + } +} diff --git a/stockton-skeleton/src/error.rs b/stockton-skeleton/src/error.rs new file mode 100644 index 0000000..1f57892 --- /dev/null +++ b/stockton-skeleton/src/error.rs @@ -0,0 +1,64 @@ +//! Error types + +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum LockPoisoned { + #[error("Device lock poisoned")] + Device, + + #[error("Map lock poisoned")] + Map, + + #[error("Queue lock poisoned")] + Queue, + + #[error("Other lock poisoned")] + Other, +} + +/// Indicates the given property has no acceptable values +#[derive(Debug, Error)] +pub enum EnvironmentError { + #[error("No supported color format")] + ColorFormat, + + #[error("No supported depth format")] + DepthFormat, + + #[error("No supported present mode")] + PresentMode, + + #[error("No supported composite alpha mode")] + CompositeAlphaMode, + + #[error("No suitable queue families found")] + NoSuitableFamilies, + + #[error("No suitable memory types found")] + NoMemoryTypes, + + #[error("Couldn't use shaderc")] + NoShaderC, + + #[error("No suitable queues")] + NoQueues, +} + +/// Indicates an issue with the level object being used +#[derive(Debug, Error)] +pub enum LevelError { + #[error("Referential Integrity broken")] + BadReference, +} + +pub fn full_error_display(err: anyhow::Error) -> String { + let cont = err + .chain() + .skip(1) + .map(|cause| format!(" caused by: {}", cause)) + .collect::<Vec<String>>() + .join("\n"); + + format!("Error: {}\n{}", err, cont) +} diff --git a/stockton-skeleton/src/lib.rs b/stockton-skeleton/src/lib.rs new file mode 100644 index 0000000..03f6d53 --- /dev/null +++ b/stockton-skeleton/src/lib.rs @@ -0,0 +1,92 @@ +#[cfg(feature = "vulkan")] +extern crate gfx_backend_vulkan as back; +extern crate gfx_hal as hal; +extern crate nalgebra_glm as na; + +#[macro_use] +extern crate derive_builder; + +pub mod buffers; +pub mod builders; +pub mod context; +pub mod draw_passes; +pub mod error; +pub mod queue_negotiator; +mod target; +pub mod texture; +pub mod types; +pub mod utils; + +use context::RenderingContext; +use draw_passes::{DrawPass, IntoDrawPass}; + +use anyhow::{Context, Result}; + +use stockton_types::Session; +use winit::window::Window; + +/// Renders a world to a window when you tell it to. +/// Also takes ownership of the window and channels window events to be processed outside winit's event loop. +pub struct Renderer<DP> { + /// All the vulkan stuff + context: RenderingContext, + + /// The draw pass we're using + draw_pass: DP, +} + +impl<DP: DrawPass> Renderer<DP> { + /// Create a new Renderer. + pub fn new<IDP: IntoDrawPass<DP>>( + window: &Window, + session: &mut Session, + idp: IDP, + ) -> Result<Self> { + let mut context = RenderingContext::new::<IDP, DP>(window)?; + + // Draw pass + let draw_pass = idp + .init(session, &mut context) + .context("Error initialising draw pass")?; + + Ok(Renderer { context, draw_pass }) + } + + /// Render a single frame of the given session. + pub fn render(&mut self, session: &Session) -> Result<()> { + // Try to draw + if self + .context + .draw_next_frame(session, &mut self.draw_pass) + .is_err() + { + // Probably the surface changed + self.handle_surface_change(session)?; + + // If it fails twice, then error + self.context.draw_next_frame(session, &mut self.draw_pass)?; + } + + Ok(()) + } + + pub fn get_aspect_ratio(&self) -> f32 { + let e = self.context.target_chain().properties().extent; + e.width as f32 / e.height as f32 + } + + pub fn handle_surface_change(&mut self, session: &Session) -> Result<()> { + unsafe { + self.context.handle_surface_change()?; + self.draw_pass + .handle_surface_change(session, &mut self.context)?; + } + + Ok(()) + } + + /// Get a reference to the renderer's context. + pub fn context(&self) -> &RenderingContext { + &self.context + } +} diff --git a/stockton-skeleton/src/queue_negotiator.rs b/stockton-skeleton/src/queue_negotiator.rs new file mode 100644 index 0000000..879a935 --- /dev/null +++ b/stockton-skeleton/src/queue_negotiator.rs @@ -0,0 +1,139 @@ +use crate::{error::EnvironmentError, types::*}; + +use anyhow::{Error, Result}; +use hal::queue::family::QueueFamilyId; +use std::{ + any::TypeId, + collections::HashMap, + sync::{Arc, RwLock}, +}; + +type SharedQueue = Arc<RwLock<QueueT>>; + +/// Used to find appropriate queue families and share queues from them as needed. +pub struct QueueNegotiator { + family_ids: HashMap<TypeId, QueueFamilyId>, + already_allocated: HashMap<TypeId, (Vec<SharedQueue>, usize)>, + all: Vec<QueueGroup>, +} + +/// Can be used to select a specific queue family +pub trait QueueFamilySelector: 'static { + /// Check if the given family is suitable + fn is_suitable(&self, family: &QueueFamilyT) -> bool; +} + +impl QueueNegotiator { + pub fn find<T: QueueFamilySelector>(&mut self, adapter: &Adapter, filter: &T) -> Result<()> { + if self.family_ids.contains_key(&TypeId::of::<T>()) { + return Ok(()); + } + + let candidates: Vec<&QueueFamilyT> = adapter + .queue_families + .iter() + .filter(|x| filter.is_suitable(*x)) + .collect(); + + if candidates.is_empty() { + return Err(Error::new(EnvironmentError::NoSuitableFamilies)); + } + + // Prefer using unique families + let family = match candidates + .iter() + .find(|x| !self.family_ids.values().any(|y| *y == x.id())) + { + Some(x) => *x, + None => candidates[0], + }; + + self.family_ids.insert(TypeId::of::<T>(), family.id()); + + Ok(()) + } + + pub fn set_queue_groups(&mut self, queue_groups: Vec<QueueGroup>) { + self.all = queue_groups + } + + pub fn get_queue<T: QueueFamilySelector>(&mut self) -> Option<Arc<RwLock<QueueT>>> { + let tid = TypeId::of::<T>(); + let family_id = self.family_ids.get(&tid)?; + log::debug!("{:?}", self.all); + log::debug!("{:?}", self.already_allocated); + match self + .all + .iter() + .position(|x| !x.queues.is_empty() && x.family == *family_id) + { + Some(idx) => { + // At least one remaining queue + let queue = self.all[idx].queues.pop().unwrap(); + let queue = Arc::new(RwLock::new(queue)); + + self.add_to_allocated::<T>(queue.clone()); + + Some(queue) + } + None => match self.already_allocated.get_mut(&tid) { + Some((queues, next_share)) => { + let queue = (&queues[*next_share]).clone(); + + *next_share += 1; + + Some(queue) + } + None => None, + }, + } + } + + pub fn family<T: QueueFamilySelector>(&self) -> Option<QueueFamilyId> { + self.family_ids.get(&TypeId::of::<T>()).cloned() + } + + fn add_to_allocated<T: QueueFamilySelector>(&mut self, queue: Arc<RwLock<QueueT>>) { + let tid = TypeId::of::<T>(); + match self.already_allocated.get_mut(&tid) { + None => { + self.already_allocated.insert(tid, (vec![queue], 0)); + } + Some(x) => { + x.0.push(queue); + } + } + } + + pub fn family_spec<'a, T: QueueFamilySelector>( + &self, + queue_families: &'a [QueueFamilyT], + count: usize, + ) -> Option<(&'a QueueFamilyT, Vec<f32>)> { + let qf_id = self.family::<T>()?; + + let qf = queue_families.iter().find(|x| x.id() == qf_id)?; + let v = vec![1.0; count]; + + Some((qf, v)) + } +} + +impl Default for QueueNegotiator { + fn default() -> Self { + QueueNegotiator { + family_ids: HashMap::new(), + already_allocated: HashMap::new(), + all: vec![], + } + } +} + +pub struct DrawQueue { + pub surface: SurfaceT, +} +impl QueueFamilySelector for DrawQueue { + fn is_suitable(&self, family: &QueueFamilyT) -> bool { + self.surface.supports_queue_family(family) && family.queue_type().supports_graphics() + } +} diff --git a/stockton-skeleton/src/target.rs b/stockton-skeleton/src/target.rs new file mode 100644 index 0000000..d0d2380 --- /dev/null +++ b/stockton-skeleton/src/target.rs @@ -0,0 +1,397 @@ +//! Resources needed for drawing on the screen, including sync objects + +use std::{ + borrow::Borrow, + iter::{empty, once}, + mem::ManuallyDrop, +}; + +use hal::{ + command::CommandBufferFlags, + format::{Aspects, ChannelType, Format, ImageFeature}, + image::{ + Access, Extent, FramebufferAttachment, Layout, SubresourceRange, Usage as ImgUsage, + ViewCapabilities, + }, + memory::{Barrier, Dependencies}, + pso::{PipelineStage, Viewport}, + window::{CompositeAlphaMode, Extent2D, PresentMode, SwapchainConfig}, +}; + +use super::draw_passes::DrawPass; +use crate::{error::EnvironmentError, types::*}; +use anyhow::{Context, Result}; +use stockton_types::Session; + +#[derive(Debug, Clone)] +pub struct SwapchainProperties { + pub format: Format, + pub depth_format: Format, + pub present_mode: PresentMode, + pub composite_alpha_mode: CompositeAlphaMode, + pub viewport: Viewport, + pub extent: Extent, + pub image_count: u32, +} + +impl SwapchainProperties { + pub fn find_best( + adapter: &Adapter, + surface: &SurfaceT, + ) -> Result<SwapchainProperties, EnvironmentError> { + let caps = surface.capabilities(&adapter.physical_device); + let formats = surface.supported_formats(&adapter.physical_device); + + // Find which settings we'll actually use based on preset preferences + let format = match formats { + Some(formats) => formats + .iter() + .find(|format| format.base_format().1 == ChannelType::Srgb) + .copied() + .ok_or(EnvironmentError::ColorFormat), + None => Ok(Format::Rgba8Srgb), + }?; + + let depth_format = *[ + Format::D32SfloatS8Uint, + Format::D24UnormS8Uint, + Format::D32Sfloat, + ] + .iter() + .find(|format| { + format.is_depth() + && adapter + .physical_device + .format_properties(Some(**format)) + .optimal_tiling + .contains(ImageFeature::DEPTH_STENCIL_ATTACHMENT) + }) + .ok_or(EnvironmentError::DepthFormat)?; + + let present_mode = [ + PresentMode::MAILBOX, + PresentMode::FIFO, + PresentMode::RELAXED, + PresentMode::IMMEDIATE, + ] + .iter() + .cloned() + .find(|pm| caps.present_modes.contains(*pm)) + .ok_or(EnvironmentError::PresentMode)?; + + let composite_alpha_mode = [ + CompositeAlphaMode::OPAQUE, + CompositeAlphaMode::INHERIT, + CompositeAlphaMode::PREMULTIPLIED, + CompositeAlphaMode::POSTMULTIPLIED, + ] + .iter() + .cloned() + .find(|ca| caps.composite_alpha_modes.contains(*ca)) + .ok_or(EnvironmentError::CompositeAlphaMode)?; + + let extent = caps.extents.end().to_extent(); // Size + let viewport = Viewport { + rect: extent.rect(), + depth: 0.0..1.0, + }; + + Ok(SwapchainProperties { + format, + depth_format, + present_mode, + composite_alpha_mode, + extent, + viewport, + image_count: if present_mode == PresentMode::MAILBOX { + ((*caps.image_count.end()) - 1).min((*caps.image_count.start()).max(3)) + } else { + ((*caps.image_count.end()) - 1).min((*caps.image_count.start()).max(2)) + }, + }) + } + + pub fn framebuffer_attachment(&self) -> FramebufferAttachment { + FramebufferAttachment { + usage: ImgUsage::COLOR_ATTACHMENT, + format: self.format, + view_caps: ViewCapabilities::empty(), + } + } +} + +pub struct TargetChain { + /// Surface we're targeting + surface: ManuallyDrop<SurfaceT>, + properties: SwapchainProperties, + + /// Resources tied to each target frame in the swapchain + targets: Box<[TargetResources]>, + + /// Sync objects used in drawing + /// These are seperated from the targets because we don't necessarily always match up indexes + sync_objects: Box<[SyncObjects]>, + + /// The last set of sync objects used + last_syncs: usize, + + /// Last image index of the swapchain drawn to + last_image: u32, +} + +impl TargetChain { + pub fn new( + device: &mut DeviceT, + adapter: &Adapter, + mut surface: SurfaceT, + cmd_pool: &mut CommandPoolT, + properties: SwapchainProperties, + ) -> Result<TargetChain> { + let caps = surface.capabilities(&adapter.physical_device); + + // Number of frames to pre-render + let image_count = if properties.present_mode == PresentMode::MAILBOX { + ((*caps.image_count.end()) - 1).min((*caps.image_count.start()).max(3)) + } else { + ((*caps.image_count.end()) - 1).min((*caps.image_count.start()).max(2)) + }; + + // Swap config + let swap_config = SwapchainConfig { + present_mode: properties.present_mode, + composite_alpha_mode: properties.composite_alpha_mode, + format: properties.format, + extent: Extent2D { + width: properties.extent.width, + height: properties.extent.height, + }, + image_count, + image_layers: 1, + image_usage: ImgUsage::COLOR_ATTACHMENT, + }; + + let _fat = swap_config.framebuffer_attachment(); + let mut targets: Vec<TargetResources> = + Vec::with_capacity(swap_config.image_count as usize); + let mut sync_objects: Vec<SyncObjects> = + Vec::with_capacity(swap_config.image_count as usize); + + for _ in 0..swap_config.image_count { + targets.push( + TargetResources::new(device, cmd_pool, &properties) + .context("Error creating target resources")?, + ); + + sync_objects.push(SyncObjects::new(device).context("Error creating sync objects")?); + } + + // Configure Swapchain + unsafe { + surface + .configure_swapchain(device, swap_config) + .context("Error configuring swapchain")?; + } + + Ok(TargetChain { + surface: ManuallyDrop::new(surface), + targets: targets.into_boxed_slice(), + sync_objects: sync_objects.into_boxed_slice(), + properties, + last_syncs: (image_count - 1) as usize, // This means the next one to be used is index 0 + last_image: 0, + }) + } + + pub fn deactivate( + self, + instance: &mut InstanceT, + device: &mut DeviceT, + cmd_pool: &mut CommandPoolT, + ) { + let surface = self.deactivate_with_recyling(device, cmd_pool); + + unsafe { + instance.destroy_surface(surface); + } + } + + pub fn deactivate_with_recyling( + mut self, + device: &mut DeviceT, + cmd_pool: &mut CommandPoolT, + ) -> SurfaceT { + use core::ptr::read; + unsafe { + for i in 0..self.targets.len() { + read(&self.targets[i]).deactivate(device, cmd_pool); + } + + for i in 0..self.sync_objects.len() { + read(&self.sync_objects[i]).deactivate(device); + } + + self.surface.unconfigure_swapchain(device); + } + + unsafe { ManuallyDrop::into_inner(read(&self.surface)) } + } + + pub fn do_draw_with<'a, DP: DrawPass>( + &'a mut self, + device: &mut DeviceT, + command_queue: &mut QueueT, + dp: &mut DP, + session: &Session, + ) -> Result<()> { + self.last_syncs = (self.last_syncs + 1) % self.sync_objects.len(); + self.last_image = (self.last_image + 1) % self.targets.len() as u32; + + let syncs = &mut self.sync_objects[self.last_syncs]; + let target = &mut self.targets[self.last_image as usize]; + + // Get the image + let (img, _) = unsafe { + self.surface + .acquire_image(core::u64::MAX) + .context("Error getting image from swapchain")? + }; + + // Make sure whatever was last using this has finished + unsafe { + device + .wait_for_fence(&syncs.present_complete, core::u64::MAX) + .context("Error waiting for present_complete")?; + device + .reset_fence(&mut syncs.present_complete) + .context("Error resetting present_complete fence")?; + }; + + // Record commands + unsafe { + target.cmd_buffer.begin_primary(CommandBufferFlags::empty()); + + target.cmd_buffer.pipeline_barrier( + PipelineStage::TOP_OF_PIPE..PipelineStage::TOP_OF_PIPE, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::empty(), Layout::Undefined) + ..(Access::empty(), Layout::ColorAttachmentOptimal), + target: img.borrow(), + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: Some(1), + layer_start: 0, + layer_count: Some(1), + }, + families: None, + }), + ); + + dp.queue_draw(session, img.borrow(), &mut target.cmd_buffer) + .context("Error in draw pass")?; + + target.cmd_buffer.pipeline_barrier( + PipelineStage::BOTTOM_OF_PIPE..PipelineStage::BOTTOM_OF_PIPE, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::empty(), Layout::ColorAttachmentOptimal) + ..(Access::empty(), Layout::Present), + target: img.borrow(), + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: Some(1), + layer_start: 0, + layer_count: Some(1), + }, + families: None, + }), + ); + + target.cmd_buffer.finish(); + } + + // Submit it + unsafe { + command_queue.submit( + once(&*target.cmd_buffer), + empty(), + once(&*syncs.render_complete), + Some(&mut syncs.present_complete), + ); + command_queue + .present(&mut self.surface, img, Some(&mut *syncs.render_complete)) + .context("Error presenting to surface")?; + }; + + Ok(()) + } + + /// Get a reference to the target chain's properties. + pub fn properties(&self) -> &SwapchainProperties { + &self.properties + } +} + +/// Resources for a single target frame, including sync objects +pub struct TargetResources { + /// Command buffer to use when drawing + pub cmd_buffer: ManuallyDrop<CommandBufferT>, +} + +impl TargetResources { + pub fn new( + _device: &mut DeviceT, + cmd_pool: &mut CommandPoolT, + _properties: &SwapchainProperties, + ) -> Result<TargetResources> { + // Command Buffer + let cmd_buffer = unsafe { cmd_pool.allocate_one(hal::command::Level::Primary) }; + + Ok(TargetResources { + cmd_buffer: ManuallyDrop::new(cmd_buffer), + }) + } + + pub fn deactivate(self, _device: &mut DeviceT, cmd_pool: &mut CommandPoolT) { + use core::ptr::read; + unsafe { + cmd_pool.free(once(ManuallyDrop::into_inner(read(&self.cmd_buffer)))); + } + } +} + +pub struct SyncObjects { + /// Triggered when rendering is done + pub render_complete: ManuallyDrop<SemaphoreT>, + + /// Triggered when the image is on screen + pub present_complete: ManuallyDrop<FenceT>, +} + +impl SyncObjects { + pub fn new(device: &mut DeviceT) -> Result<Self> { + // Sync objects + let render_complete = device + .create_semaphore() + .context("Error creating render_complete semaphore")?; + let present_complete = device + .create_fence(true) + .context("Error creating present_complete fence")?; + + Ok(SyncObjects { + render_complete: ManuallyDrop::new(render_complete), + present_complete: ManuallyDrop::new(present_complete), + }) + } + + pub fn deactivate(self, device: &mut DeviceT) { + use core::ptr::read; + + unsafe { + device.destroy_semaphore(ManuallyDrop::into_inner(read(&self.render_complete))); + device.destroy_fence(ManuallyDrop::into_inner(read(&self.present_complete))); + } + } +} diff --git a/stockton-skeleton/src/texture/block.rs b/stockton-skeleton/src/texture/block.rs new file mode 100644 index 0000000..5ac3a94 --- /dev/null +++ b/stockton-skeleton/src/texture/block.rs @@ -0,0 +1,62 @@ +use super::{loader::BlockRef, repo::BLOCK_SIZE}; +use crate::types::*; + +use arrayvec::ArrayVec; +use rendy_memory::{Allocator, Block}; +use std::{iter::once, mem::ManuallyDrop}; + +pub struct TexturesBlock<B: Block<back::Backend>> { + pub id: BlockRef, + pub descriptor_set: ManuallyDrop<RDescriptorSet>, + pub imgs: ArrayVec<[LoadedImage<B>; BLOCK_SIZE]>, +} + +impl<B: Block<back::Backend>> TexturesBlock<B> { + pub fn deactivate<T: Allocator<back::Backend, Block = B>>( + mut self, + device: &mut DeviceT, + tex_alloc: &mut T, + desc_alloc: &mut DescriptorAllocator, + ) { + unsafe { + use std::ptr::read; + + // Descriptor set + desc_alloc.free(once(read(&*self.descriptor_set))); + + // Images + self.imgs + .drain(..) + .map(|x| x.deactivate(device, tex_alloc)) + .for_each(|_| {}); + } + } +} + +pub struct LoadedImage<B: Block<back::Backend>> { + pub mem: ManuallyDrop<B>, + pub img: ManuallyDrop<ImageT>, + pub img_view: ManuallyDrop<ImageViewT>, + pub sampler: ManuallyDrop<SamplerT>, + pub row_size: usize, + pub height: u32, + pub width: u32, +} + +impl<B: Block<back::Backend>> LoadedImage<B> { + pub fn deactivate<T: Allocator<back::Backend, Block = B>>( + self, + device: &mut DeviceT, + alloc: &mut T, + ) { + unsafe { + use std::ptr::read; + + device.destroy_image_view(read(&*self.img_view)); + device.destroy_image(read(&*self.img)); + device.destroy_sampler(read(&*self.sampler)); + + alloc.free(device, read(&*self.mem)); + } + } +} diff --git a/stockton-skeleton/src/texture/image.rs b/stockton-skeleton/src/texture/image.rs new file mode 100644 index 0000000..f984b72 --- /dev/null +++ b/stockton-skeleton/src/texture/image.rs @@ -0,0 +1,43 @@ +use super::PIXEL_SIZE; + +use core::ptr::copy_nonoverlapping; +use std::convert::TryInto; + +use image::RgbaImage; + +/// An object that can be loaded as an image into GPU memory +pub trait LoadableImage { + fn width(&self) -> u32; + fn height(&self) -> u32; + + /// # Safety + /// Ensure the ptr is at least width() * PIXEL_SIZE bytes. + unsafe fn copy_row(&self, y: u32, ptr: *mut u8); + + /// # Safety + /// Ensure the ptr is at least row_size * height() * PIXEL_SIZE bytes. + unsafe fn copy_into(&self, ptr: *mut u8, row_size: usize) { + for y in 0..self.height() as usize { + let dest_base: isize = (y * row_size).try_into().unwrap(); + self.copy_row(y as u32, ptr.offset(dest_base)); + } + } +} + +impl LoadableImage for RgbaImage { + fn width(&self) -> u32 { + self.width() + } + + fn height(&self) -> u32 { + self.height() + } + + unsafe fn copy_row(&self, y: u32, ptr: *mut u8) { + let row_size_bytes = self.width() as usize * PIXEL_SIZE; + let raw: &Vec<u8> = self.as_raw(); + let row = &raw[y as usize * row_size_bytes..(y as usize + 1) * row_size_bytes]; + + copy_nonoverlapping(row.as_ptr(), ptr, row.len()); + } +} diff --git a/stockton-skeleton/src/texture/load.rs b/stockton-skeleton/src/texture/load.rs new file mode 100644 index 0000000..1f33ad5 --- /dev/null +++ b/stockton-skeleton/src/texture/load.rs @@ -0,0 +1,191 @@ +use super::{ + block::LoadedImage, block::TexturesBlock, repo::BLOCK_SIZE, resolver::TextureResolver, + staging_buffer::StagingBuffer, LoadableImage, PIXEL_SIZE, +}; +use crate::types::*; + +use anyhow::{Context, Result}; +use arrayvec::ArrayVec; +use hal::{ + format::{Aspects, Format, Swizzle}, + image::{ + Filter, SamplerDesc, SubresourceLayers, SubresourceRange, Usage as ImgUsage, ViewKind, + WrapMode, + }, + memory::SparseFlags, + MemoryTypeId, +}; +use rendy_memory::{Allocator, Block}; +use std::mem::ManuallyDrop; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum TextureLoadError { + #[error("No available resources")] + NoResources, +} + +pub const FORMAT: Format = Format::Rgba8Srgb; +pub const RESOURCES: SubresourceRange = SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: Some(1), + layer_start: 0, + layer_count: Some(1), +}; +pub const LAYERS: SubresourceLayers = SubresourceLayers { + aspects: Aspects::COLOR, + level: 0, + layers: 0..1, +}; + +pub struct TextureLoadConfig<R: TextureResolver> { + pub resolver: R, + pub filter: Filter, + pub wrap_mode: WrapMode, +} + +pub struct QueuedLoad<B: Block<back::Backend>> { + pub fence: FenceT, + pub buf: CommandBufferT, + pub block: TexturesBlock<B>, + pub staging_bufs: ArrayVec<[StagingBuffer; BLOCK_SIZE]>, +} + +impl<B: Block<back::Backend>> QueuedLoad<B> { + pub fn dissolve( + self, + ) -> ( + (FenceT, CommandBufferT), + ArrayVec<[StagingBuffer; BLOCK_SIZE]>, + TexturesBlock<B>, + ) { + ((self.fence, self.buf), self.staging_bufs, self.block) + } +} + +pub fn tex_size_info<T: LoadableImage>(img: &T, obcpa: hal::buffer::Offset) -> (usize, usize) { + let initial_row_size = PIXEL_SIZE * img.width() as usize; + let row_alignment_mask = obcpa as u32 - 1; + + let row_size = ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize; + let total_size = (row_size * (img.height() as usize)) as u64; + debug_assert!(row_size as usize >= initial_row_size); + + (row_size, total_size as usize) +} + +pub fn create_image_view<T, I>( + device: &mut DeviceT, + allocator: &mut T, + format: Format, + usage: ImgUsage, + img: &I, +) -> Result<(T::Block, ImageT)> +where + T: Allocator<back::Backend>, + I: LoadableImage, +{ + // Make the image + let mut image_ref = unsafe { + use hal::image::{Kind, Tiling, ViewCapabilities}; + + device.create_image( + Kind::D2(img.width(), img.height(), 1, 1), + 1, + format, + Tiling::Optimal, + usage, + SparseFlags::empty(), + ViewCapabilities::empty(), + ) + } + .context("Error creating image")?; + + // Allocate memory + let (block, _) = unsafe { + let requirements = device.get_image_requirements(&image_ref); + + allocator.alloc(device, requirements.size, requirements.alignment) + } + .context("Error allocating memory")?; + + unsafe { + device + .bind_image_memory(block.memory(), block.range().start, &mut image_ref) + .context("Error binding memory to image")?; + } + + Ok((block, image_ref)) +} + +pub unsafe fn load_image<I: LoadableImage, R: TextureResolver>( + device: &mut DeviceT, + staging_allocator: &mut DynamicAllocator, + tex_allocator: &mut DynamicAllocator, + staging_memory_type: MemoryTypeId, + obcpa: u64, + img_data: I, + config: &TextureLoadConfig<R>, +) -> Result<(StagingBuffer, LoadedImage<DynamicBlock>)> { + // Calculate buffer size + let (row_size, total_size) = tex_size_info(&img_data, obcpa); + + // Create staging buffer + let mut staging_buffer = StagingBuffer::new( + device, + staging_allocator, + total_size as u64, + staging_memory_type, + ) + .context("Error creating staging buffer")?; + + // Write to staging buffer + let mapped_memory = staging_buffer + .map_memory(device) + .context("Error mapping staged memory")?; + + img_data.copy_into(mapped_memory, row_size); + + staging_buffer.unmap_memory(device); + + // Create image + let (img_mem, img) = create_image_view( + device, + tex_allocator, + FORMAT, + ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST, + &img_data, + ) + .context("Error creating image")?; + + // Create image view + let img_view = device + .create_image_view( + &img, + ViewKind::D2, + FORMAT, + Swizzle::NO, + ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST, + RESOURCES, + ) + .context("Error creating image view")?; + + // Create sampler + let sampler = device + .create_sampler(&SamplerDesc::new(config.filter, config.wrap_mode)) + .context("Error creating sampler")?; + + Ok(( + staging_buffer, + LoadedImage { + mem: ManuallyDrop::new(img_mem), + img: ManuallyDrop::new(img), + img_view: ManuallyDrop::new(img_view), + sampler: ManuallyDrop::new(sampler), + row_size, + height: img_data.height(), + width: img_data.width(), + }, + )) +} diff --git a/stockton-skeleton/src/texture/loader.rs b/stockton-skeleton/src/texture/loader.rs new file mode 100644 index 0000000..5c85fd3 --- /dev/null +++ b/stockton-skeleton/src/texture/loader.rs @@ -0,0 +1,711 @@ +//! Manages the loading/unloading of textures + +use super::{ + block::{LoadedImage, TexturesBlock}, + load::{load_image, QueuedLoad, TextureLoadConfig, TextureLoadError, LAYERS, RESOURCES}, + repo::BLOCK_SIZE, + resolver::TextureResolver, + PIXEL_SIZE, +}; +use crate::{error::LockPoisoned, types::*, utils::find_memory_type_id}; + +use std::{ + array::IntoIter, + collections::VecDeque, + iter::{empty, once}, + mem::{drop, ManuallyDrop}, + sync::{ + mpsc::{Receiver, Sender}, + Arc, RwLock, + }, + thread::sleep, + time::Duration, +}; + +use anyhow::{Context, Result}; +use arrayvec::ArrayVec; +use hal::{ + command::{BufferImageCopy, CommandBufferFlags}, + format::{Aspects, Format}, + image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange}, + memory::{Barrier, Dependencies, Properties as MemProps, SparseFlags}, + pso::{Descriptor, DescriptorSetWrite, ImageDescriptorType, PipelineStage, ShaderStageFlags}, + queue::family::QueueFamilyId, + MemoryTypeId, +}; +use image::{Rgba, RgbaImage}; +use log::*; +use rendy_descriptor::{DescriptorRanges, DescriptorSetLayoutBinding, DescriptorType}; +use rendy_memory::DynamicConfig; +use thiserror::Error; + +/// The number of command buffers to have in flight simultaneously. +pub const NUM_SIMULTANEOUS_CMDS: usize = 2; + +/// A reference to a texture of the current map +pub type BlockRef = usize; + +/// Manages the loading/unloading of textures +/// This is expected to load the textures, then send the loaded blocks back +pub struct TextureLoader<R: TextureResolver> { + /// Blocks for which commands have been queued and are done loading once the fence is triggered. + commands_queued: ArrayVec<[QueuedLoad<DynamicBlock>; NUM_SIMULTANEOUS_CMDS]>, + + /// The command buffers used and a fence to go with them + buffers: VecDeque<(FenceT, CommandBufferT)>, + + /// The command pool buffers were allocated from + pool: ManuallyDrop<CommandPoolT>, + + /// The GPU we're submitting to + device: Arc<RwLock<DeviceT>>, + + /// The command queue being used + queue: Arc<RwLock<QueueT>>, + + /// The memory allocator being used for textures + tex_allocator: ManuallyDrop<DynamicAllocator>, + + /// The memory allocator for staging memory + staging_allocator: ManuallyDrop<DynamicAllocator>, + + /// Allocator for descriptor sets + descriptor_allocator: ManuallyDrop<DescriptorAllocator>, + + ds_layout: Arc<RwLock<DescriptorSetLayoutT>>, + + /// Type ID for staging memory + staging_memory_type: MemoryTypeId, + + /// From adapter, used for determining alignment + optimal_buffer_copy_pitch_alignment: hal::buffer::Offset, + + /// Configuration for how to find and load textures + config: TextureLoadConfig<R>, + + /// The channel requests come in. + /// Requests should reference a texture **block**, for example textures 8..16 is block 1. + request_channel: Receiver<LoaderRequest>, + + /// The channel blocks are returned to. + return_channel: Sender<TexturesBlock<DynamicBlock>>, + + /// A filler image for descriptors that aren't needed but still need to be written to + blank_image: ManuallyDrop<LoadedImage<DynamicBlock>>, +} + +#[derive(Error, Debug)] +pub enum TextureLoaderError { + #[error("Couldn't find a suitable memory type")] + NoMemoryTypes, +} + +impl<R: TextureResolver> TextureLoader<R> { + pub fn loop_until_exit(mut self) -> Result<TextureLoaderRemains> { + debug!("TextureLoader starting main loop"); + let mut res = Ok(false); + while res.is_ok() { + res = self.main(); + if let Ok(true) = res { + break; + } + + sleep(Duration::from_secs(0)); + } + + match res { + Ok(true) => { + debug!("Starting to deactivate TextureLoader"); + + Ok(self.deactivate()) + } + Err(r) => Err(r.context("Error in TextureLoader loop")), + _ => unreachable!(), + } + } + fn main(&mut self) -> Result<bool> { + let mut device = self + .device + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + // Check for blocks that are finished, then send them back + let mut i = 0; + while i < self.commands_queued.len() { + let signalled = unsafe { device.get_fence_status(&self.commands_queued[i].fence) } + .context("Error checking fence status")?; + + if signalled { + let (assets, mut staging_bufs, block) = self.commands_queued.remove(i).dissolve(); + debug!("Load finished for texture block {:?}", block.id); + + // Destroy staging buffers + for buf in staging_bufs.drain(..) { + buf.deactivate(&mut device, &mut self.staging_allocator); + } + + self.buffers.push_back(assets); + self.return_channel + .send(block) + .context("Error returning texture block")?; + } else { + i += 1; + } + } + + drop(device); + + // Check for messages to start loading blocks + let req_iter: Vec<_> = self.request_channel.try_iter().collect(); + for to_load in req_iter { + match to_load { + LoaderRequest::Load(to_load) => { + // Attempt to load given block + debug!("Attempting to queue load for texture block {:?}", to_load); + + let result = unsafe { self.attempt_queue_load(to_load) }; + match result { + Ok(queued_load) => self.commands_queued.push(queued_load), + Err(x) => match x.downcast_ref::<TextureLoadError>() { + Some(TextureLoadError::NoResources) => { + debug!("No resources, trying again later"); + } + _ => return Err(x).context("Error queuing texture load"), + }, + } + } + LoaderRequest::End => return Ok(true), + } + } + + Ok(false) + } + + pub fn new( + adapter: &Adapter, + device_lock: Arc<RwLock<DeviceT>>, + (family, queue_lock): (QueueFamilyId, Arc<RwLock<QueueT>>), + ds_layout: Arc<RwLock<DescriptorSetLayoutT>>, + (request_channel, return_channel): ( + Receiver<LoaderRequest>, + Sender<TexturesBlock<DynamicBlock>>, + ), + config: TextureLoadConfig<R>, + ) -> Result<Self> { + let mut device = device_lock + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + let device_props = adapter.physical_device.properties(); + + let type_mask = unsafe { + use hal::image::{Kind, Tiling, Usage, ViewCapabilities}; + + // We create an empty image with the same format as used for textures + // this is to get the type_mask required, which will stay the same for + // all colour images of the same tiling. (certain memory flags excluded). + + // Size and alignment don't necessarily stay the same, so we're forced to + // guess at the alignment for our allocator. + + // TODO: Way to tune these options + let img = device + .create_image( + Kind::D2(16, 16, 1, 1), + 1, + Format::Rgba8Srgb, + Tiling::Optimal, + Usage::SAMPLED, + SparseFlags::empty(), + ViewCapabilities::empty(), + ) + .context("Error creating test image to get buffer settings")?; + + let type_mask = device.get_image_requirements(&img).type_mask; + + device.destroy_image(img); + + type_mask + }; + + debug!("Using type mask {:?}", type_mask); + + // Tex Allocator + let mut tex_allocator = { + let props = MemProps::DEVICE_LOCAL; + + DynamicAllocator::new( + find_memory_type_id(adapter, type_mask, props) + .ok_or(TextureLoaderError::NoMemoryTypes) + .context("Couldn't create tex memory allocator")?, + props, + DynamicConfig { + block_size_granularity: 4 * 32 * 32, // 32x32 image + max_chunk_size: u64::pow(2, 63), + min_device_allocation: 4 * 32 * 32, + }, + device_props.limits.non_coherent_atom_size as u64, + ) + }; + + let (staging_memory_type, mut staging_allocator) = { + let props = MemProps::CPU_VISIBLE | MemProps::COHERENT; + let t = find_memory_type_id(adapter, u32::MAX, props) + .ok_or(TextureLoaderError::NoMemoryTypes) + .context("Couldn't create staging memory allocator")?; + ( + t, + DynamicAllocator::new( + t, + props, + DynamicConfig { + block_size_granularity: 4 * 32 * 32, // 32x32 image + max_chunk_size: u64::pow(2, 63), + min_device_allocation: 4 * 32 * 32, + }, + device_props.limits.non_coherent_atom_size as u64, + ), + ) + }; + + // Pool + let mut pool = unsafe { + use hal::pool::CommandPoolCreateFlags; + + device.create_command_pool(family, CommandPoolCreateFlags::RESET_INDIVIDUAL) + } + .context("Error creating command pool")?; + + // Command buffers and fences + debug!("Creating resources..."); + let mut buffers = { + let mut data = VecDeque::with_capacity(NUM_SIMULTANEOUS_CMDS); + + for _ in 0..NUM_SIMULTANEOUS_CMDS { + unsafe { + data.push_back(( + device.create_fence(false).context("Error creating fence")?, + pool.allocate_one(hal::command::Level::Primary), + )); + }; + } + + data + }; + + let optimal_buffer_copy_pitch_alignment = + device_props.limits.optimal_buffer_copy_pitch_alignment; + + let blank_image = unsafe { + Self::get_blank_image( + &mut device, + &mut buffers[0].1, + &queue_lock, + (&mut staging_allocator, &mut tex_allocator), + staging_memory_type, + optimal_buffer_copy_pitch_alignment, + &config, + ) + } + .context("Error creating blank image")?; + + drop(device); + + Ok(TextureLoader { + commands_queued: ArrayVec::new(), + buffers, + pool: ManuallyDrop::new(pool), + device: device_lock, + queue: queue_lock, + ds_layout, + + tex_allocator: ManuallyDrop::new(tex_allocator), + staging_allocator: ManuallyDrop::new(staging_allocator), + descriptor_allocator: ManuallyDrop::new(DescriptorAllocator::new()), + + staging_memory_type, + optimal_buffer_copy_pitch_alignment, + + request_channel, + return_channel, + config, + blank_image: ManuallyDrop::new(blank_image), + }) + } + + unsafe fn attempt_queue_load(&mut self, block_ref: usize) -> Result<QueuedLoad<DynamicBlock>> { + let mut device = self + .device + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + + // Get assets to use + let (mut fence, mut buf) = self + .buffers + .pop_front() + .ok_or(TextureLoadError::NoResources) + .context("Error getting resources to use")?; + + // Create descriptor set + let mut descriptor_set = { + let mut v: ArrayVec<[RDescriptorSet; 1]> = ArrayVec::new(); + self.descriptor_allocator + .allocate( + &device, + &*self + .ds_layout + .read() + .map_err(|_| LockPoisoned::Other) + .context("Error reading descriptor set layout")?, + DescriptorRanges::from_bindings(&[ + DescriptorSetLayoutBinding { + binding: 0, + ty: DescriptorType::Image { + ty: ImageDescriptorType::Sampled { + with_sampler: false, + }, + }, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + DescriptorSetLayoutBinding { + binding: 1, + ty: DescriptorType::Sampler, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + ]), + 1, + &mut v, + ) + .context("Error creating descriptor set")?; + + v.pop().unwrap() + }; + + // Get a command buffer + buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT); + + let mut imgs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new(); + let mut staging_bufs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new(); + + // For each texture in block + for tex_idx in (block_ref * BLOCK_SIZE)..(block_ref + 1) * BLOCK_SIZE { + // Resolve texture + let img_data = self.config.resolver.resolve(tex_idx as u32); + if img_data.is_none() { + // Write a blank descriptor + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 0, + array_offset: tex_idx % BLOCK_SIZE, + descriptors: once(Descriptor::Image( + &*self.blank_image.img_view, + Layout::ShaderReadOnlyOptimal, + )), + }); + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 1, + array_offset: tex_idx % BLOCK_SIZE, + descriptors: once(Descriptor::Sampler(&*self.blank_image.sampler)), + }); + + continue; + } + + let img_data = img_data.unwrap(); + + let array_offset = tex_idx % BLOCK_SIZE; + + let (staging_buffer, img) = load_image( + &mut device, + &mut self.staging_allocator, + &mut self.tex_allocator, + self.staging_memory_type, + self.optimal_buffer_copy_pitch_alignment, + img_data, + &self.config, + )?; + + // Write to descriptor set + { + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 0, + array_offset, + descriptors: once(Descriptor::Image( + &*img.img_view, + Layout::ShaderReadOnlyOptimal, + )), + }); + device.write_descriptor_set(DescriptorSetWrite { + set: descriptor_set.raw_mut(), + binding: 1, + array_offset, + descriptors: once(Descriptor::Sampler(&*img.sampler)), + }); + } + + imgs.push(img); + + staging_bufs.push(staging_buffer); + } + + // Add start pipeline barrier + buf.pipeline_barrier( + PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER, + Dependencies::empty(), + imgs.iter().map(|li| Barrier::Image { + states: (Access::empty(), Layout::Undefined) + ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal), + target: &*li.img, + families: None, + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: None, + layer_start: 0, + layer_count: None, + }, + }), + ); + + // Record copy commands + for (li, sb) in imgs.iter().zip(staging_bufs.iter()) { + buf.copy_buffer_to_image( + &*sb.buf, + &*li.img, + Layout::TransferDstOptimal, + once(BufferImageCopy { + buffer_offset: 0, + buffer_width: (li.row_size / super::PIXEL_SIZE) as u32, + buffer_height: li.height, + image_layers: SubresourceLayers { + aspects: Aspects::COLOR, + level: 0, + layers: 0..1, + }, + image_offset: Offset { x: 0, y: 0, z: 0 }, + image_extent: gfx_hal::image::Extent { + width: li.width, + height: li.height, + depth: 1, + }, + }), + ); + } + buf.pipeline_barrier( + PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE, + Dependencies::empty(), + imgs.iter().map(|li| Barrier::Image { + states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal) + ..(Access::empty(), Layout::ShaderReadOnlyOptimal), + target: &*li.img, + families: None, + range: RESOURCES, + }), + ); + + buf.finish(); + + // Submit command buffer + { + let mut queue = self.queue.write().map_err(|_| LockPoisoned::Queue)?; + + queue.submit(IntoIter::new([&buf]), empty(), empty(), Some(&mut fence)); + } + + Ok(QueuedLoad { + staging_bufs, + fence, + buf, + block: TexturesBlock { + id: block_ref, + imgs, + descriptor_set: ManuallyDrop::new(descriptor_set), + }, + }) + } + + unsafe fn get_blank_image( + device: &mut DeviceT, + buf: &mut CommandBufferT, + queue_lock: &Arc<RwLock<QueueT>>, + (staging_allocator, tex_allocator): (&mut DynamicAllocator, &mut DynamicAllocator), + staging_memory_type: MemoryTypeId, + obcpa: u64, + config: &TextureLoadConfig<R>, + ) -> Result<LoadedImage<DynamicBlock>> { + let img_data = RgbaImage::from_pixel(1, 1, Rgba([255, 0, 255, 255])); + + let height = img_data.height(); + let width = img_data.width(); + let row_alignment_mask = obcpa as u32 - 1; + let initial_row_size = PIXEL_SIZE * img_data.width() as usize; + let row_size = + ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize; + + let (staging_buffer, img) = load_image( + device, + staging_allocator, + tex_allocator, + staging_memory_type, + obcpa, + img_data, + config, + )?; + + buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT); + + buf.pipeline_barrier( + PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::empty(), Layout::Undefined) + ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal), + target: &*img.img, + families: None, + range: SubresourceRange { + aspects: Aspects::COLOR, + level_start: 0, + level_count: None, + layer_start: 0, + layer_count: None, + }, + }), + ); + buf.copy_buffer_to_image( + &*staging_buffer.buf, + &*img.img, + Layout::TransferDstOptimal, + once(BufferImageCopy { + buffer_offset: 0, + buffer_width: (row_size / super::PIXEL_SIZE) as u32, + buffer_height: height, + image_layers: LAYERS, + image_offset: Offset { x: 0, y: 0, z: 0 }, + image_extent: Extent { + width, + height, + depth: 1, + }, + }), + ); + + buf.pipeline_barrier( + PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE, + Dependencies::empty(), + once(Barrier::Image { + states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal) + ..(Access::empty(), Layout::ShaderReadOnlyOptimal), + target: &*img.img, + families: None, + range: RESOURCES, + }), + ); + buf.finish(); + + let mut fence = device.create_fence(false).context("Error creating fence")?; + + { + let mut queue = queue_lock.write().map_err(|_| LockPoisoned::Queue)?; + + queue.submit( + IntoIter::new([buf as &CommandBufferT]), + empty(), + empty(), + Some(&mut fence), + ); + } + + device + .wait_for_fence(&fence, std::u64::MAX) + .context("Error waiting for copy")?; + + device.destroy_fence(fence); + + staging_buffer.deactivate(device, staging_allocator); + + Ok(img) + } + + /// Safely destroy all the vulkan stuff in this instance + /// Note that this returns the memory allocators, from which should be freed any TextureBlocks + /// All in-progress things are sent to return_channel. + fn deactivate(mut self) -> TextureLoaderRemains { + use std::ptr::read; + + let mut device = self.device.write().unwrap(); + + unsafe { + // Wait for any currently queued loads to be done + while self.commands_queued.len() > 0 { + let mut i = 0; + while i < self.commands_queued.len() { + let signalled = device + .get_fence_status(&self.commands_queued[i].fence) + .expect("Device lost by TextureManager"); + + if signalled { + // Destroy finished ones + let (assets, mut staging_bufs, block) = + self.commands_queued.remove(i).dissolve(); + + device.destroy_fence(assets.0); + // Command buffer will be freed when we reset the command pool + // Destroy staging buffers + for buf in staging_bufs.drain(..) { + buf.deactivate(&mut device, &mut self.staging_allocator); + } + + self.return_channel + .send(block) + .expect("Sending through return channel failed"); + } else { + i += 1; + } + } + + sleep(Duration::from_secs(0)); + } + + // Destroy blank image + read(&*self.blank_image).deactivate(&mut device, &mut *self.tex_allocator); + + // Destroy fences + + self.buffers + .drain(..) + .map(|(f, _)| device.destroy_fence(f)) + .for_each(|_| {}); + + // Free command pool + self.pool.reset(true); + device.destroy_command_pool(read(&*self.pool)); + + debug!("Done deactivating TextureLoader"); + + TextureLoaderRemains { + tex_allocator: ManuallyDrop::new(read(&*self.tex_allocator)), + descriptor_allocator: ManuallyDrop::new(read(&*self.descriptor_allocator)), + } + } + } +} + +pub struct TextureLoaderRemains { + pub tex_allocator: ManuallyDrop<DynamicAllocator>, + pub descriptor_allocator: ManuallyDrop<DescriptorAllocator>, +} + +pub enum LoaderRequest { + /// Load the given block + Load(BlockRef), + + /// Stop looping and deactivate + End, +} diff --git a/stockton-skeleton/src/texture/mod.rs b/stockton-skeleton/src/texture/mod.rs new file mode 100644 index 0000000..aef1b03 --- /dev/null +++ b/stockton-skeleton/src/texture/mod.rs @@ -0,0 +1,18 @@ +//! Everything related to loading textures into GPU memory + +mod block; +mod image; +mod load; +mod loader; +mod repo; +pub mod resolver; +mod staging_buffer; + +pub use self::block::TexturesBlock; +pub use self::image::LoadableImage; +pub use self::load::TextureLoadConfig; +pub use self::loader::BlockRef; +pub use self::repo::{TexLoadQueue, TextureRepo}; + +/// The size of each pixel in an image +pub const PIXEL_SIZE: usize = std::mem::size_of::<u8>() * 4; diff --git a/stockton-skeleton/src/texture/repo.rs b/stockton-skeleton/src/texture/repo.rs new file mode 100644 index 0000000..341d355 --- /dev/null +++ b/stockton-skeleton/src/texture/repo.rs @@ -0,0 +1,199 @@ +use super::{ + block::TexturesBlock, + load::TextureLoadConfig, + loader::{BlockRef, LoaderRequest, TextureLoader, TextureLoaderRemains, NUM_SIMULTANEOUS_CMDS}, + resolver::TextureResolver, +}; +use crate::error::LockPoisoned; +use crate::queue_negotiator::QueueFamilySelector; +use crate::types::*; + +use std::{ + array::IntoIter, + collections::HashMap, + iter::empty, + mem::ManuallyDrop, + sync::{ + mpsc::{channel, Receiver, Sender}, + Arc, RwLock, RwLockReadGuard, + }, + thread::JoinHandle, +}; + +use anyhow::{Context, Result}; +use hal::{ + pso::{DescriptorSetLayoutBinding, DescriptorType, ImageDescriptorType, ShaderStageFlags}, + queue::family::QueueFamilyId, +}; +use log::debug; + +/// The number of textures in one 'block' +/// The textures of the loaded file are divided into blocks of this size. +/// Whenever a texture is needed, the whole block its in is loaded. +pub const BLOCK_SIZE: usize = 8; + +pub struct TextureRepo { + joiner: ManuallyDrop<JoinHandle<Result<TextureLoaderRemains>>>, + ds_layout: Arc<RwLock<DescriptorSetLayoutT>>, + req_send: Sender<LoaderRequest>, + resp_recv: Receiver<TexturesBlock<DynamicBlock>>, + blocks: HashMap<BlockRef, Option<TexturesBlock<DynamicBlock>>>, +} + +impl TextureRepo { + pub fn new<R: 'static + TextureResolver + Send + Sync>( + device_lock: Arc<RwLock<DeviceT>>, + family: QueueFamilyId, + queue: Arc<RwLock<QueueT>>, + adapter: &Adapter, + config: TextureLoadConfig<R>, + ) -> Result<Self> { + // Create Channels + let (req_send, req_recv) = channel(); + let (resp_send, resp_recv) = channel(); + let device = device_lock + .write() + .map_err(|_| LockPoisoned::Device) + .context("Error getting device lock")?; + + // Create descriptor set layout + let ds_lock = Arc::new(RwLock::new( + unsafe { + device.create_descriptor_set_layout( + IntoIter::new([ + DescriptorSetLayoutBinding { + binding: 0, + ty: DescriptorType::Image { + ty: ImageDescriptorType::Sampled { + with_sampler: false, + }, + }, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + DescriptorSetLayoutBinding { + binding: 1, + ty: DescriptorType::Sampler, + count: BLOCK_SIZE, + stage_flags: ShaderStageFlags::FRAGMENT, + immutable_samplers: false, + }, + ]), + empty(), + ) + } + .context("Error creating descriptor set layout")?, + )); + + debug!("Created descriptor set layout {:?}", ds_lock); + + drop(device); + + let joiner = { + let loader = TextureLoader::new( + adapter, + device_lock.clone(), + (family, queue), + ds_lock.clone(), + (req_recv, resp_send), + config, + )?; + + std::thread::spawn(move || loader.loop_until_exit()) + }; + + Ok(TextureRepo { + joiner: ManuallyDrop::new(joiner), + ds_layout: ds_lock, + blocks: HashMap::new(), + req_send, + resp_recv, + }) + } + + pub fn get_ds_layout(&self) -> Result<RwLockReadGuard<DescriptorSetLayoutT>> { + self.ds_layout + .read() + .map_err(|_| LockPoisoned::Other) + .context("Error locking descriptor set layout") + } + + pub fn queue_load(&mut self, block_id: BlockRef) -> Result<()> { + if self.blocks.contains_key(&block_id) { + return Ok(()); + } + + self.force_queue_load(block_id) + } + + pub fn force_queue_load(&mut self, block_id: BlockRef) -> Result<()> { + self.req_send + .send(LoaderRequest::Load(block_id)) + .context("Error queuing texture block load")?; + + self.blocks.insert(block_id, None); + + Ok(()) + } + + pub fn attempt_get_descriptor_set(&mut self, block_id: BlockRef) -> Option<&DescriptorSetT> { + self.blocks + .get(&block_id) + .and_then(|opt| opt.as_ref().map(|z| z.descriptor_set.raw())) + } + + pub fn process_responses(&mut self) { + let resp_iter: Vec<_> = self.resp_recv.try_iter().collect(); + for resp in resp_iter { + debug!("Got block {:?} back from loader", resp.id); + self.blocks.insert(resp.id, Some(resp)); + } + } + + pub fn deactivate(mut self, device_lock: &Arc<RwLock<DeviceT>>) { + unsafe { + use std::ptr::read; + + // Join the loader thread + self.req_send.send(LoaderRequest::End).unwrap(); + let mut remains = read(&*self.joiner).join().unwrap().unwrap(); + + // Process any ones that just got done loading + self.process_responses(); + + // Only now can we lock device without deadlocking + let mut device = device_lock.write().unwrap(); + + // Return all the texture memory and descriptors. + for (_, v) in self.blocks.drain() { + if let Some(block) = v { + block.deactivate( + &mut device, + &mut *remains.tex_allocator, + &mut remains.descriptor_allocator, + ); + } + } + + // Dispose of both allocators + read(&*remains.tex_allocator).dispose(); + read(&*remains.descriptor_allocator).dispose(&device); + + // Deactivate DS Layout + let ds_layout = Arc::try_unwrap(self.ds_layout) + .unwrap() + .into_inner() + .unwrap(); + device.destroy_descriptor_set_layout(ds_layout); + } + } +} + +pub struct TexLoadQueue; + +impl QueueFamilySelector for TexLoadQueue { + fn is_suitable(&self, family: &QueueFamilyT) -> bool { + family.queue_type().supports_transfer() && family.max_queues() >= NUM_SIMULTANEOUS_CMDS + } +} diff --git a/stockton-skeleton/src/texture/resolver.rs b/stockton-skeleton/src/texture/resolver.rs new file mode 100644 index 0000000..f66b724 --- /dev/null +++ b/stockton-skeleton/src/texture/resolver.rs @@ -0,0 +1,55 @@ +//! Resolves a texture in a BSP File to an image + +use crate::texture::image::LoadableImage; +use stockton_levels::{parts::IsTexture, prelude::HasTextures}; + +use std::{ + path::Path, + sync::{Arc, RwLock}, +}; + +use image::{io::Reader, RgbaImage}; + +/// An object that can be used to resolve a texture from a BSP File +pub trait TextureResolver { + type Image: LoadableImage; + + /// Get the given texture, or None if it's corrupt/not there. + fn resolve(&mut self, texture_id: u32) -> Option<Self::Image>; +} + +/// A basic filesystem resolver which gets the texture name from any HasTextures Object. +pub struct FsResolver<'a, T: HasTextures> { + path: &'a Path, + map_lock: Arc<RwLock<T>>, +} + +impl<'a, T: HasTextures> FsResolver<'a, T> { + pub fn new(path: &'a Path, map_lock: Arc<RwLock<T>>) -> Self { + FsResolver { path, map_lock } + } +} + +impl<'a, T: HasTextures> TextureResolver for FsResolver<'a, T> { + type Image = RgbaImage; + + fn resolve(&mut self, tex: u32) -> Option<Self::Image> { + let map = self.map_lock.read().unwrap(); + let tex = map.get_texture(tex)?; + let path = self.path.join(&tex.name()); + + // drop(tex); + // drop(map); + + if let Ok(file) = Reader::open(path) { + if let Ok(guessed) = file.with_guessed_format() { + if let Ok(decoded) = guessed.decode() { + return Some(decoded.into_rgba8()); + } + } + } + + log::warn!("Couldn't resolve texture {:?}", tex.name()); + None + } +} diff --git a/stockton-skeleton/src/texture/staging_buffer.rs b/stockton-skeleton/src/texture/staging_buffer.rs new file mode 100644 index 0000000..8d2ae17 --- /dev/null +++ b/stockton-skeleton/src/texture/staging_buffer.rs @@ -0,0 +1,59 @@ +#![allow(mutable_transmutes)] +use crate::types::*; + +use std::mem::ManuallyDrop; + +use anyhow::{Context, Result}; +use hal::{device::MapError, memory::SparseFlags, MemoryTypeId}; +use rendy_memory::{Allocator, Block}; + +pub struct StagingBuffer { + pub buf: ManuallyDrop<BufferT>, + pub mem: ManuallyDrop<DynamicBlock>, +} + +impl StagingBuffer { + const USAGE: hal::buffer::Usage = hal::buffer::Usage::TRANSFER_SRC; + + pub fn new( + device: &mut DeviceT, + alloc: &mut DynamicAllocator, + size: u64, + _memory_type_id: MemoryTypeId, + ) -> Result<StagingBuffer> { + let mut buffer = unsafe { device.create_buffer(size, Self::USAGE, SparseFlags::empty()) } + .context("Error creating buffer")?; + + let requirements = unsafe { device.get_buffer_requirements(&buffer) }; + + let (memory, _) = alloc + .alloc(device, requirements.size, requirements.alignment) + .context("Error allocating staging memory")?; + + unsafe { device.bind_buffer_memory(memory.memory(), 0, &mut buffer) } + .context("Error binding staging memory to buffer")?; + + Ok(StagingBuffer { + buf: ManuallyDrop::new(buffer), + mem: ManuallyDrop::new(memory), + }) + } + + pub unsafe fn map_memory(&mut self, device: &mut DeviceT) -> Result<*mut u8, MapError> { + let range = 0..(self.mem.range().end - self.mem.range().start); + Ok(self.mem.map(device, range)?.ptr().as_mut()) + } + pub unsafe fn unmap_memory(&mut self, device: &mut DeviceT) { + self.mem.unmap(device); + } + + pub fn deactivate(self, device: &mut DeviceT, alloc: &mut DynamicAllocator) { + unsafe { + use std::ptr::read; + // Destroy buffer + device.destroy_buffer(read(&*self.buf)); + // Free memory + alloc.free(device, read(&*self.mem)); + } + } +} diff --git a/stockton-skeleton/src/types.rs b/stockton-skeleton/src/types.rs new file mode 100644 index 0000000..03c6e37 --- /dev/null +++ b/stockton-skeleton/src/types.rs @@ -0,0 +1,35 @@ +//! Convenience module to reference types that are stored in the backend's enum + +pub use hal::prelude::*; + +pub type InstanceT = <back::Backend as hal::Backend>::Instance; +pub type DeviceT = <back::Backend as hal::Backend>::Device; +pub type BufferT = <back::Backend as hal::Backend>::Buffer; +pub type MemoryT = <back::Backend as hal::Backend>::Memory; +pub type SurfaceT = <back::Backend as hal::Backend>::Surface; +pub type SemaphoreT = <back::Backend as hal::Backend>::Semaphore; +pub type FenceT = <back::Backend as hal::Backend>::Fence; +pub type CommandPoolT = <back::Backend as hal::Backend>::CommandPool; +pub type CommandBufferT = <back::Backend as hal::Backend>::CommandBuffer; +pub type QueueT = <back::Backend as hal::Backend>::Queue; +pub type QueueFamilyT = <back::Backend as hal::Backend>::QueueFamily; +pub type DescriptorSetLayoutT = <back::Backend as hal::Backend>::DescriptorSetLayout; +pub type DescriptorSetT = <back::Backend as hal::Backend>::DescriptorSet; +pub type PipelineLayoutT = <back::Backend as hal::Backend>::PipelineLayout; +pub type GraphicsPipelineT = <back::Backend as hal::Backend>::GraphicsPipeline; +pub type ShaderModuleT = <back::Backend as hal::Backend>::ShaderModule; +pub type SamplerT = <back::Backend as hal::Backend>::Sampler; +pub type ImageT = <back::Backend as hal::Backend>::Image; +pub type ImageViewT = <back::Backend as hal::Backend>::ImageView; +pub type FramebufferT = <back::Backend as hal::Backend>::Framebuffer; +pub type RenderPassT = <back::Backend as hal::Backend>::RenderPass; + +pub type Adapter = hal::adapter::Adapter<back::Backend>; +pub type EntryPoint<'a> = hal::pso::EntryPoint<'a, back::Backend>; +pub type QueueGroup = hal::queue::QueueGroup<back::Backend>; + +pub type DescriptorAllocator = rendy_descriptor::DescriptorAllocator<back::Backend>; +pub type DynamicAllocator = rendy_memory::DynamicAllocator<back::Backend>; +pub type DynamicBlock = rendy_memory::DynamicBlock<back::Backend>; + +pub type RDescriptorSet = rendy_descriptor::DescriptorSet<back::Backend>; diff --git a/stockton-skeleton/src/utils.rs b/stockton-skeleton/src/utils.rs new file mode 100644 index 0000000..152ba10 --- /dev/null +++ b/stockton-skeleton/src/utils.rs @@ -0,0 +1,19 @@ +use crate::types::*; +use hal::{memory::Properties as MemProperties, MemoryTypeId}; + +pub fn find_memory_type_id( + adapter: &Adapter, + type_mask: u32, + props: MemProperties, +) -> Option<MemoryTypeId> { + adapter + .physical_device + .memory_properties() + .memory_types + .iter() + .enumerate() + .find(|&(id, memory_type)| { + type_mask & (1 << id) != 0 && memory_type.properties.contains(props) + }) + .map(|(id, _)| MemoryTypeId(id)) +} |