aboutsummaryrefslogtreecommitdiff
path: root/rendy-descriptor
diff options
context:
space:
mode:
authortcmal <me@aria.rip>2024-08-25 17:44:22 +0100
committertcmal <me@aria.rip>2024-08-25 17:44:22 +0100
commitd7d0f0277c437004ed476393542da39c84c5cf9f (patch)
tree65c369e00fc1416c74019fde4456b0bf90d3067c /rendy-descriptor
parent10b3d4ac59e826b31d2114999e31893390acfb9c (diff)
chore(render): update hal and fix some errors
Diffstat (limited to 'rendy-descriptor')
-rw-r--r--rendy-descriptor/Cargo.toml17
-rw-r--r--rendy-descriptor/src/allocator.rs396
-rw-r--r--rendy-descriptor/src/lib.rs4
-rw-r--r--rendy-descriptor/src/ranges.rs278
4 files changed, 695 insertions, 0 deletions
diff --git a/rendy-descriptor/Cargo.toml b/rendy-descriptor/Cargo.toml
new file mode 100644
index 0000000..8e4a4b9
--- /dev/null
+++ b/rendy-descriptor/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "rendy-descriptor"
+version = "0.5.1"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+edition = "2018"
+repository = "https://github.com/amethyst/rendy"
+license = "MIT OR Apache-2.0"
+documentation = "https://docs.rs/rendy-descriptor"
+keywords = ["graphics", "gfx-hal", "rendy"]
+categories = ["rendering"]
+description = "Rendy's descriptor allocator"
+
+[dependencies]
+gfx-hal = "^0.8"
+log = "0.4.11"
+relevant = { version = "0.4.2", features = ["log"] }
+smallvec = "1.5.1"
diff --git a/rendy-descriptor/src/allocator.rs b/rendy-descriptor/src/allocator.rs
new file mode 100644
index 0000000..36e059b
--- /dev/null
+++ b/rendy-descriptor/src/allocator.rs
@@ -0,0 +1,396 @@
+use {
+ crate::ranges::*,
+ gfx_hal::{
+ device::{Device, OutOfMemory},
+ pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags},
+ Backend,
+ },
+ smallvec::{smallvec, SmallVec},
+ std::{
+ collections::{HashMap, VecDeque},
+ ops::Deref,
+ },
+};
+
+const MIN_SETS: u32 = 64;
+const MAX_SETS: u32 = 512;
+
+/// Descriptor set from allocator.
+#[derive(Debug)]
+pub struct DescriptorSet<B: Backend> {
+ raw: B::DescriptorSet,
+ pool: u64,
+ ranges: DescriptorRanges,
+}
+
+impl<B> DescriptorSet<B>
+where
+ B: Backend,
+{
+ /// Get raw set
+ pub fn raw(&self) -> &B::DescriptorSet {
+ &self.raw
+ }
+
+ /// Get raw set
+ /// It must not be replaced.
+ pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet {
+ &mut self.raw
+ }
+}
+
+impl<B> Deref for DescriptorSet<B>
+where
+ B: Backend,
+{
+ type Target = B::DescriptorSet;
+
+ fn deref(&self) -> &B::DescriptorSet {
+ &self.raw
+ }
+}
+
+#[derive(Debug)]
+struct Allocation<B: Backend> {
+ sets: SmallVec<[B::DescriptorSet; 1]>,
+ pools: Vec<u64>,
+}
+
+#[derive(Debug)]
+struct DescriptorPool<B: Backend> {
+ raw: B::DescriptorPool,
+ size: u32,
+
+ // Number of free sets left.
+ free: u32,
+
+ // Number of sets freed (they can't be reused until gfx-hal 0.2)
+ freed: u32,
+}
+
+unsafe fn allocate_from_pool<B: Backend>(
+ raw: &mut B::DescriptorPool,
+ layout: &B::DescriptorSetLayout,
+ count: u32,
+ allocation: &mut SmallVec<[B::DescriptorSet; 1]>,
+) -> Result<(), OutOfMemory> {
+ let sets_were = allocation.len();
+ raw.allocate(std::iter::repeat(layout).take(count as usize), allocation)
+ .map_err(|err| match err {
+ AllocationError::OutOfMemory(x) => x,
+ err => {
+ // We check pool for free descriptors and sets before calling this function,
+ // so it can't be exhausted.
+ // And it can't be fragmented either according to spec
+ //
+ // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo
+ //
+ // """
+ // Additionally, if all sets allocated from the pool since it was created or most recently reset
+ // use the same number of descriptors (of each type) and the requested allocation also
+ // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure
+ // """
+ unreachable!("Unexpected error: {:?}", err);
+ }
+ })?;
+ assert_eq!(allocation.len(), sets_were + count as usize);
+ Ok(())
+}
+
+#[derive(Debug)]
+struct DescriptorBucket<B: Backend> {
+ pools_offset: u64,
+ pools: VecDeque<DescriptorPool<B>>,
+ total: u64,
+}
+
+impl<B> DescriptorBucket<B>
+where
+ B: Backend,
+{
+ fn new() -> Self {
+ DescriptorBucket {
+ pools_offset: 0,
+ pools: VecDeque::new(),
+ total: 0,
+ }
+ }
+
+ fn new_pool_size(&self, count: u32) -> u32 {
+ MIN_SETS // at least MIN_SETS
+ .max(count) // at least enough for allocation
+ .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS
+ .next_power_of_two() // rounded up to nearest 2^N
+ }
+
+ unsafe fn dispose(mut self, device: &B::Device) {
+ if self.total > 0 {
+ log::error!("Not all descriptor sets were deallocated");
+ }
+
+ while let Some(pool) = self.pools.pop_front() {
+ assert!(pool.freed + pool.free <= pool.size);
+ if pool.freed + pool.free < pool.size {
+ log::error!(
+ "Descriptor pool is still in use during allocator disposal. {:?}",
+ pool
+ );
+ } else {
+ log::trace!("Destroying used up descriptor pool");
+ device.destroy_descriptor_pool(pool.raw);
+ self.pools_offset += 1;
+ }
+ }
+
+ self.pools
+ .drain(..)
+ .for_each(|pool| device.destroy_descriptor_pool(pool.raw));
+ }
+
+ unsafe fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_ranges: DescriptorRanges,
+ mut count: u32,
+ allocation: &mut Allocation<B>,
+ ) -> Result<(), OutOfMemory> {
+ if count == 0 {
+ return Ok(());
+ }
+
+ for (index, pool) in self.pools.iter_mut().enumerate().rev() {
+ if pool.free == 0 {
+ continue;
+ }
+
+ let allocate = pool.free.min(count);
+ log::trace!("Allocate {} from exising pool", allocate);
+ allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
+ allocation.pools.extend(
+ std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
+ );
+ count -= allocate;
+ pool.free -= allocate;
+ self.total += allocate as u64;
+
+ if count == 0 {
+ return Ok(());
+ }
+ }
+
+ while count > 0 {
+ let size = self.new_pool_size(count);
+ let pool_ranges = layout_ranges * size;
+ log::trace!(
+ "Create new pool with {} sets and {:?} descriptors",
+ size,
+ pool_ranges,
+ );
+ let raw = device.create_descriptor_pool(
+ size as usize,
+ pool_ranges.into_iter(),
+ DescriptorPoolCreateFlags::empty(),
+ )?;
+ let allocate = size.min(count);
+
+ self.pools.push_back(DescriptorPool {
+ raw,
+ size,
+ free: size,
+ freed: 0,
+ });
+ let index = self.pools.len() - 1;
+ let pool = self.pools.back_mut().unwrap();
+
+ allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
+ allocation.pools.extend(
+ std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
+ );
+
+ count -= allocate;
+ pool.free -= allocate;
+ self.total += allocate as u64;
+ }
+
+ Ok(())
+ }
+
+ unsafe fn free(&mut self, sets: impl IntoIterator<Item = B::DescriptorSet>, pool: u64) {
+ let pool = &mut self.pools[(pool - self.pools_offset) as usize];
+ let freed = sets.into_iter().count() as u32;
+ pool.freed += freed;
+ self.total -= freed as u64;
+ log::trace!("Freed {} from descriptor bucket", freed);
+ }
+
+ unsafe fn cleanup(&mut self, device: &B::Device) {
+ while let Some(pool) = self.pools.pop_front() {
+ if pool.freed < pool.size {
+ self.pools.push_front(pool);
+ break;
+ }
+ log::trace!("Destroying used up descriptor pool");
+ device.destroy_descriptor_pool(pool.raw);
+ self.pools_offset += 1;
+ }
+ }
+}
+
+/// Descriptor allocator.
+/// Can be used to allocate descriptor sets for any layout.
+#[derive(Debug)]
+pub struct DescriptorAllocator<B: Backend> {
+ buckets: HashMap<DescriptorRanges, DescriptorBucket<B>>,
+ allocation: Allocation<B>,
+ relevant: relevant::Relevant,
+ total: u64,
+}
+
+impl<B> DescriptorAllocator<B>
+where
+ B: Backend,
+{
+ /// Create new allocator instance.
+ pub fn new() -> Self {
+ DescriptorAllocator {
+ buckets: HashMap::new(),
+ allocation: Allocation {
+ sets: SmallVec::new(),
+ pools: Vec::new(),
+ },
+ relevant: relevant::Relevant,
+ total: 0,
+ }
+ }
+
+ /// Destroy allocator instance.
+ /// All sets allocated from this allocator become invalid.
+ pub unsafe fn dispose(mut self, device: &B::Device) {
+ self.buckets
+ .drain()
+ .for_each(|(_, bucket)| bucket.dispose(device));
+ self.relevant.dispose();
+ }
+
+ /// Allocate descriptor set with specified layout.
+ /// `DescriptorRanges` must match descriptor numbers of the layout.
+ /// `DescriptorRanges` can be constructed [from bindings] that were used
+ /// to create layout instance.
+ ///
+ /// [from bindings]: .
+ pub unsafe fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_ranges: DescriptorRanges,
+ count: u32,
+ extend: &mut impl Extend<DescriptorSet<B>>,
+ ) -> Result<(), OutOfMemory> {
+ if count == 0 {
+ return Ok(());
+ }
+
+ log::trace!(
+ "Allocating {} sets with layout {:?} @ {:?}",
+ count,
+ layout,
+ layout_ranges
+ );
+
+ let bucket = self
+ .buckets
+ .entry(layout_ranges)
+ .or_insert_with(DescriptorBucket::new);
+ match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) {
+ Ok(()) => {
+ extend.extend(
+ Iterator::zip(
+ self.allocation.pools.drain(..),
+ self.allocation.sets.drain(..),
+ )
+ .map(|(pool, set)| DescriptorSet {
+ raw: set,
+ ranges: layout_ranges,
+ pool,
+ }),
+ );
+ Ok(())
+ }
+ Err(err) => {
+ // Free sets allocated so far.
+ let mut last = None;
+ for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() {
+ match last {
+ Some(last) if last == pool => {
+ // same pool, continue
+ }
+ Some(last) => {
+ // Free contiguous range of sets from one pool in one go.
+ bucket.free(self.allocation.sets.drain(index + 1..), last);
+ }
+ None => last = Some(pool),
+ }
+ }
+
+ if let Some(last) = last {
+ bucket.free(self.allocation.sets.drain(0..), last);
+ }
+
+ Err(err)
+ }
+ }
+ }
+
+ /// Free descriptor sets.
+ ///
+ /// # Safety
+ ///
+ /// None of descriptor sets can be referenced in any pending command buffers.
+ /// All command buffers where at least one of descriptor sets referenced
+ /// move to invalid state.
+ pub unsafe fn free(&mut self, all_sets: impl IntoIterator<Item = DescriptorSet<B>>) {
+ let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None;
+
+ // Collect contig
+ for set in all_sets {
+ match &mut free {
+ slot @ None => {
+ slot.replace((set.ranges, set.pool, smallvec![set.raw]));
+ }
+ Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => {
+ raw_sets.push(set.raw);
+ }
+ Some((ranges, pool, raw_sets)) => {
+ let bucket = self
+ .buckets
+ .get_mut(ranges)
+ .expect("Set should be allocated from this allocator");
+ debug_assert!(bucket.total >= raw_sets.len() as u64);
+
+ bucket.free(raw_sets.drain(..), *pool);
+ *pool = set.pool;
+ *ranges = set.ranges;
+ raw_sets.push(set.raw);
+ }
+ }
+ }
+
+ if let Some((ranges, pool, raw_sets)) = free {
+ let bucket = self
+ .buckets
+ .get_mut(&ranges)
+ .expect("Set should be allocated from this allocator");
+ debug_assert!(bucket.total >= raw_sets.len() as u64);
+
+ bucket.free(raw_sets, pool);
+ }
+ }
+
+ /// Perform cleanup to allow resources reuse.
+ pub unsafe fn cleanup(&mut self, device: &B::Device) {
+ self.buckets
+ .values_mut()
+ .for_each(|bucket| bucket.cleanup(device));
+ }
+}
diff --git a/rendy-descriptor/src/lib.rs b/rendy-descriptor/src/lib.rs
new file mode 100644
index 0000000..18d5e0e
--- /dev/null
+++ b/rendy-descriptor/src/lib.rs
@@ -0,0 +1,4 @@
+mod allocator;
+mod ranges;
+
+pub use {allocator::*, ranges::*};
diff --git a/rendy-descriptor/src/ranges.rs b/rendy-descriptor/src/ranges.rs
new file mode 100644
index 0000000..d936ab1
--- /dev/null
+++ b/rendy-descriptor/src/ranges.rs
@@ -0,0 +1,278 @@
+use std::{
+ cmp::Ordering,
+ ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign},
+};
+
+pub use gfx_hal::pso::{
+ BufferDescriptorFormat, BufferDescriptorType, DescriptorRangeDesc, DescriptorSetLayoutBinding,
+ DescriptorType, ImageDescriptorType,
+};
+
+const DESCRIPTOR_TYPES_COUNT: usize = 11;
+
+const DESCRIPTOR_TYPES: [DescriptorType; DESCRIPTOR_TYPES_COUNT] = [
+ DescriptorType::Sampler,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled { with_sampler: true },
+ },
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Storage { read_only: false },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Texel,
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Texel,
+ },
+ DescriptorType::InputAttachment,
+];
+
+fn descriptor_type_index(ty: &DescriptorType) -> usize {
+ match ty {
+ DescriptorType::Sampler => 0,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled { with_sampler: true },
+ } => 1,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ } => 2,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Storage { read_only: _ },
+ } => 3,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ } => 4,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ } => 5,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ } => 6,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ } => 7,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format: BufferDescriptorFormat::Texel,
+ } => 8,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Texel,
+ } => 9,
+ DescriptorType::InputAttachment => 10,
+ }
+}
+
+/// Number of descriptors per type.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct DescriptorRanges {
+ counts: [u32; DESCRIPTOR_TYPES_COUNT],
+}
+
+impl DescriptorRanges {
+ /// Create new instance without descriptors.
+ pub fn zero() -> Self {
+ DescriptorRanges {
+ counts: [0; DESCRIPTOR_TYPES_COUNT],
+ }
+ }
+
+ /// Add a single layout binding.
+ /// Useful when created with `DescriptorRanges::zero()`.
+ pub fn add_binding(&mut self, binding: DescriptorSetLayoutBinding) {
+ self.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ /// Iterate through ranges yelding
+ /// descriptor types and their amount.
+ pub fn iter(&self) -> DescriptorRangesIter<'_> {
+ DescriptorRangesIter {
+ counts: &self.counts,
+ index: 0,
+ }
+ }
+
+ /// Read as slice.
+ pub fn counts(&self) -> &[u32] {
+ &self.counts
+ }
+
+ /// Read or write as slice.
+ pub fn counts_mut(&mut self) -> &mut [u32] {
+ &mut self.counts
+ }
+
+ /// Calculate ranges from bindings.
+ pub fn from_bindings(bindings: &[DescriptorSetLayoutBinding]) -> Self {
+ let mut descs = Self::zero();
+
+ for binding in bindings {
+ descs.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ descs
+ }
+
+ /// Calculate ranges from bindings, specified with an iterator.
+ pub fn from_binding_iter<I>(bindings: I) -> Self
+ where
+ I: Iterator<Item = DescriptorSetLayoutBinding>,
+ {
+ let mut descs = Self::zero();
+
+ for binding in bindings {
+ descs.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ descs
+ }
+}
+
+impl PartialOrd for DescriptorRanges {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ let mut ord = self.counts[0].partial_cmp(&other.counts[0])?;
+ for i in 1..DESCRIPTOR_TYPES_COUNT {
+ match (ord, self.counts[i].partial_cmp(&other.counts[i])?) {
+ (Ordering::Less, Ordering::Greater) | (Ordering::Greater, Ordering::Less) => {
+ return None;
+ }
+ (Ordering::Equal, new) => ord = new,
+ _ => (),
+ }
+ }
+ Some(ord)
+ }
+}
+
+impl Add for DescriptorRanges {
+ type Output = Self;
+ fn add(mut self, rhs: Self) -> Self {
+ self += rhs;
+ self
+ }
+}
+
+impl AddAssign for DescriptorRanges {
+ fn add_assign(&mut self, rhs: Self) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] += rhs.counts[i];
+ }
+ }
+}
+
+impl Sub for DescriptorRanges {
+ type Output = Self;
+ fn sub(mut self, rhs: Self) -> Self {
+ self -= rhs;
+ self
+ }
+}
+
+impl SubAssign for DescriptorRanges {
+ fn sub_assign(&mut self, rhs: Self) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] -= rhs.counts[i];
+ }
+ }
+}
+
+impl Mul<u32> for DescriptorRanges {
+ type Output = Self;
+ fn mul(mut self, rhs: u32) -> Self {
+ self *= rhs;
+ self
+ }
+}
+
+impl MulAssign<u32> for DescriptorRanges {
+ fn mul_assign(&mut self, rhs: u32) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] *= rhs;
+ }
+ }
+}
+
+impl<'a> IntoIterator for &'a DescriptorRanges {
+ type Item = DescriptorRangeDesc;
+ type IntoIter = DescriptorRangesIter<'a>;
+
+ fn into_iter(self) -> DescriptorRangesIter<'a> {
+ self.iter()
+ }
+}
+
+/// Iterator over descriptor ranges.
+pub struct DescriptorRangesIter<'a> {
+ counts: &'a [u32; DESCRIPTOR_TYPES_COUNT],
+ index: u8,
+}
+
+impl<'a> Iterator for DescriptorRangesIter<'a> {
+ type Item = DescriptorRangeDesc;
+
+ fn next(&mut self) -> Option<DescriptorRangeDesc> {
+ loop {
+ let index = self.index as usize;
+ if index >= DESCRIPTOR_TYPES_COUNT {
+ return None;
+ } else {
+ self.index += 1;
+ if self.counts[index] > 0 {
+ return Some(DescriptorRangeDesc {
+ count: self.counts[index] as usize,
+ ty: DESCRIPTOR_TYPES[index],
+ });
+ }
+ }
+ }
+ }
+}