aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.toml6
-rw-r--r--README.md1
-rw-r--r--rendy-descriptor/Cargo.toml17
-rw-r--r--rendy-descriptor/src/allocator.rs396
-rw-r--r--rendy-descriptor/src/lib.rs4
-rw-r--r--rendy-descriptor/src/ranges.rs278
-rw-r--r--rendy-memory/Cargo.toml27
-rw-r--r--rendy-memory/src/allocator/dedicated.rs218
-rw-r--r--rendy-memory/src/allocator/dynamic.rs716
-rw-r--r--rendy-memory/src/allocator/linear.rs363
-rw-r--r--rendy-memory/src/allocator/mod.rs50
-rw-r--r--rendy-memory/src/block.rs36
-rw-r--r--rendy-memory/src/heaps/heap.rs49
-rw-r--r--rendy-memory/src/heaps/memory_type.rs158
-rw-r--r--rendy-memory/src/heaps/mod.rs327
-rw-r--r--rendy-memory/src/lib.rs31
-rw-r--r--rendy-memory/src/mapping/mod.rs345
-rw-r--r--rendy-memory/src/mapping/range.rs85
-rw-r--r--rendy-memory/src/mapping/write.rs73
-rw-r--r--rendy-memory/src/memory.rs98
-rw-r--r--rendy-memory/src/usage.rs210
-rw-r--r--rendy-memory/src/util.rs157
-rw-r--r--rendy-memory/src/utilization.rs137
-rw-r--r--stockton-render/Cargo.toml9
-rw-r--r--stockton-render/src/draw/buffer.rs84
-rw-r--r--stockton-render/src/draw/context.rs166
-rw-r--r--stockton-render/src/draw/depth_buffer.rs96
-rw-r--r--stockton-render/src/draw/draw_buffers.rs4
-rw-r--r--stockton-render/src/draw/macros.rs89
-rw-r--r--stockton-render/src/draw/mod.rs3
-rw-r--r--stockton-render/src/draw/pipeline.rs109
-rw-r--r--stockton-render/src/draw/queue_negotiator.rs72
-rw-r--r--stockton-render/src/draw/render.rs19
-rw-r--r--stockton-render/src/draw/target.rs334
-rw-r--r--stockton-render/src/draw/texture/block.rs11
-rw-r--r--stockton-render/src/draw/texture/load.rs464
-rw-r--r--stockton-render/src/draw/texture/loader.rs125
-rw-r--r--stockton-render/src/draw/texture/repo.rs75
-rw-r--r--stockton-render/src/draw/texture/staging_buffer.rs23
-rw-r--r--stockton-render/src/draw/ui/pipeline.rs110
-rw-r--r--stockton-render/src/draw/ui/render.rs18
-rwxr-xr-xstockton-render/src/draw/ui/texture.rs6
-rw-r--r--stockton-render/src/draw/utils.rs2
-rw-r--r--stockton-render/src/error.rs17
-rw-r--r--stockton-render/src/types.rs88
45 files changed, 4810 insertions, 896 deletions
diff --git a/Cargo.toml b/Cargo.toml
index da8eb6c..4403a53 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,8 @@ members = [
"stockton-render",
"stockton-levels",
"stockton-contrib",
+ "rendy-memory",
+ "rendy-descriptor",
"examples/render-bsp",
- "examples/input-codegen"
-] \ No newline at end of file
+ "examples/input-codegen",
+]
diff --git a/README.md b/README.md
index e55df58..d0580db 100644
--- a/README.md
+++ b/README.md
@@ -14,5 +14,6 @@ Code & Assets (including from `rust-bsp`) are licensed under the GNU GPL v3.0, a
Exceptions:
+ - `rendy-memory` and `rendy-descriptor` are both modified from [here](https://github.com/amethyst/rendy) and are licensed under MIT.
- `examples/render-quad/data/test1.png` - [Photo by Lisa Fotios from Pexels](https://www.pexels.com/photo/white-petaled-flowers-painting-2224220/)
- `examples/render-quad/data/test2.png` - [Photo by Elina Sazonova from Pexels](https://www.pexels.com/photo/brown-tabby-cat-on-pink-textile-3971972/) \ No newline at end of file
diff --git a/rendy-descriptor/Cargo.toml b/rendy-descriptor/Cargo.toml
new file mode 100644
index 0000000..8e4a4b9
--- /dev/null
+++ b/rendy-descriptor/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "rendy-descriptor"
+version = "0.5.1"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+edition = "2018"
+repository = "https://github.com/amethyst/rendy"
+license = "MIT OR Apache-2.0"
+documentation = "https://docs.rs/rendy-descriptor"
+keywords = ["graphics", "gfx-hal", "rendy"]
+categories = ["rendering"]
+description = "Rendy's descriptor allocator"
+
+[dependencies]
+gfx-hal = "^0.8"
+log = "0.4.11"
+relevant = { version = "0.4.2", features = ["log"] }
+smallvec = "1.5.1"
diff --git a/rendy-descriptor/src/allocator.rs b/rendy-descriptor/src/allocator.rs
new file mode 100644
index 0000000..36e059b
--- /dev/null
+++ b/rendy-descriptor/src/allocator.rs
@@ -0,0 +1,396 @@
+use {
+ crate::ranges::*,
+ gfx_hal::{
+ device::{Device, OutOfMemory},
+ pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags},
+ Backend,
+ },
+ smallvec::{smallvec, SmallVec},
+ std::{
+ collections::{HashMap, VecDeque},
+ ops::Deref,
+ },
+};
+
+const MIN_SETS: u32 = 64;
+const MAX_SETS: u32 = 512;
+
+/// Descriptor set from allocator.
+#[derive(Debug)]
+pub struct DescriptorSet<B: Backend> {
+ raw: B::DescriptorSet,
+ pool: u64,
+ ranges: DescriptorRanges,
+}
+
+impl<B> DescriptorSet<B>
+where
+ B: Backend,
+{
+ /// Get raw set
+ pub fn raw(&self) -> &B::DescriptorSet {
+ &self.raw
+ }
+
+ /// Get raw set
+ /// It must not be replaced.
+ pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet {
+ &mut self.raw
+ }
+}
+
+impl<B> Deref for DescriptorSet<B>
+where
+ B: Backend,
+{
+ type Target = B::DescriptorSet;
+
+ fn deref(&self) -> &B::DescriptorSet {
+ &self.raw
+ }
+}
+
+#[derive(Debug)]
+struct Allocation<B: Backend> {
+ sets: SmallVec<[B::DescriptorSet; 1]>,
+ pools: Vec<u64>,
+}
+
+#[derive(Debug)]
+struct DescriptorPool<B: Backend> {
+ raw: B::DescriptorPool,
+ size: u32,
+
+ // Number of free sets left.
+ free: u32,
+
+ // Number of sets freed (they can't be reused until gfx-hal 0.2)
+ freed: u32,
+}
+
+unsafe fn allocate_from_pool<B: Backend>(
+ raw: &mut B::DescriptorPool,
+ layout: &B::DescriptorSetLayout,
+ count: u32,
+ allocation: &mut SmallVec<[B::DescriptorSet; 1]>,
+) -> Result<(), OutOfMemory> {
+ let sets_were = allocation.len();
+ raw.allocate(std::iter::repeat(layout).take(count as usize), allocation)
+ .map_err(|err| match err {
+ AllocationError::OutOfMemory(x) => x,
+ err => {
+ // We check pool for free descriptors and sets before calling this function,
+ // so it can't be exhausted.
+ // And it can't be fragmented either according to spec
+ //
+ // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo
+ //
+ // """
+ // Additionally, if all sets allocated from the pool since it was created or most recently reset
+ // use the same number of descriptors (of each type) and the requested allocation also
+ // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure
+ // """
+ unreachable!("Unexpected error: {:?}", err);
+ }
+ })?;
+ assert_eq!(allocation.len(), sets_were + count as usize);
+ Ok(())
+}
+
+#[derive(Debug)]
+struct DescriptorBucket<B: Backend> {
+ pools_offset: u64,
+ pools: VecDeque<DescriptorPool<B>>,
+ total: u64,
+}
+
+impl<B> DescriptorBucket<B>
+where
+ B: Backend,
+{
+ fn new() -> Self {
+ DescriptorBucket {
+ pools_offset: 0,
+ pools: VecDeque::new(),
+ total: 0,
+ }
+ }
+
+ fn new_pool_size(&self, count: u32) -> u32 {
+ MIN_SETS // at least MIN_SETS
+ .max(count) // at least enough for allocation
+ .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS
+ .next_power_of_two() // rounded up to nearest 2^N
+ }
+
+ unsafe fn dispose(mut self, device: &B::Device) {
+ if self.total > 0 {
+ log::error!("Not all descriptor sets were deallocated");
+ }
+
+ while let Some(pool) = self.pools.pop_front() {
+ assert!(pool.freed + pool.free <= pool.size);
+ if pool.freed + pool.free < pool.size {
+ log::error!(
+ "Descriptor pool is still in use during allocator disposal. {:?}",
+ pool
+ );
+ } else {
+ log::trace!("Destroying used up descriptor pool");
+ device.destroy_descriptor_pool(pool.raw);
+ self.pools_offset += 1;
+ }
+ }
+
+ self.pools
+ .drain(..)
+ .for_each(|pool| device.destroy_descriptor_pool(pool.raw));
+ }
+
+ unsafe fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_ranges: DescriptorRanges,
+ mut count: u32,
+ allocation: &mut Allocation<B>,
+ ) -> Result<(), OutOfMemory> {
+ if count == 0 {
+ return Ok(());
+ }
+
+ for (index, pool) in self.pools.iter_mut().enumerate().rev() {
+ if pool.free == 0 {
+ continue;
+ }
+
+ let allocate = pool.free.min(count);
+ log::trace!("Allocate {} from exising pool", allocate);
+ allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
+ allocation.pools.extend(
+ std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
+ );
+ count -= allocate;
+ pool.free -= allocate;
+ self.total += allocate as u64;
+
+ if count == 0 {
+ return Ok(());
+ }
+ }
+
+ while count > 0 {
+ let size = self.new_pool_size(count);
+ let pool_ranges = layout_ranges * size;
+ log::trace!(
+ "Create new pool with {} sets and {:?} descriptors",
+ size,
+ pool_ranges,
+ );
+ let raw = device.create_descriptor_pool(
+ size as usize,
+ pool_ranges.into_iter(),
+ DescriptorPoolCreateFlags::empty(),
+ )?;
+ let allocate = size.min(count);
+
+ self.pools.push_back(DescriptorPool {
+ raw,
+ size,
+ free: size,
+ freed: 0,
+ });
+ let index = self.pools.len() - 1;
+ let pool = self.pools.back_mut().unwrap();
+
+ allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
+ allocation.pools.extend(
+ std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
+ );
+
+ count -= allocate;
+ pool.free -= allocate;
+ self.total += allocate as u64;
+ }
+
+ Ok(())
+ }
+
+ unsafe fn free(&mut self, sets: impl IntoIterator<Item = B::DescriptorSet>, pool: u64) {
+ let pool = &mut self.pools[(pool - self.pools_offset) as usize];
+ let freed = sets.into_iter().count() as u32;
+ pool.freed += freed;
+ self.total -= freed as u64;
+ log::trace!("Freed {} from descriptor bucket", freed);
+ }
+
+ unsafe fn cleanup(&mut self, device: &B::Device) {
+ while let Some(pool) = self.pools.pop_front() {
+ if pool.freed < pool.size {
+ self.pools.push_front(pool);
+ break;
+ }
+ log::trace!("Destroying used up descriptor pool");
+ device.destroy_descriptor_pool(pool.raw);
+ self.pools_offset += 1;
+ }
+ }
+}
+
+/// Descriptor allocator.
+/// Can be used to allocate descriptor sets for any layout.
+#[derive(Debug)]
+pub struct DescriptorAllocator<B: Backend> {
+ buckets: HashMap<DescriptorRanges, DescriptorBucket<B>>,
+ allocation: Allocation<B>,
+ relevant: relevant::Relevant,
+ total: u64,
+}
+
+impl<B> DescriptorAllocator<B>
+where
+ B: Backend,
+{
+ /// Create new allocator instance.
+ pub fn new() -> Self {
+ DescriptorAllocator {
+ buckets: HashMap::new(),
+ allocation: Allocation {
+ sets: SmallVec::new(),
+ pools: Vec::new(),
+ },
+ relevant: relevant::Relevant,
+ total: 0,
+ }
+ }
+
+ /// Destroy allocator instance.
+ /// All sets allocated from this allocator become invalid.
+ pub unsafe fn dispose(mut self, device: &B::Device) {
+ self.buckets
+ .drain()
+ .for_each(|(_, bucket)| bucket.dispose(device));
+ self.relevant.dispose();
+ }
+
+ /// Allocate descriptor set with specified layout.
+ /// `DescriptorRanges` must match descriptor numbers of the layout.
+ /// `DescriptorRanges` can be constructed [from bindings] that were used
+ /// to create layout instance.
+ ///
+ /// [from bindings]: .
+ pub unsafe fn allocate(
+ &mut self,
+ device: &B::Device,
+ layout: &B::DescriptorSetLayout,
+ layout_ranges: DescriptorRanges,
+ count: u32,
+ extend: &mut impl Extend<DescriptorSet<B>>,
+ ) -> Result<(), OutOfMemory> {
+ if count == 0 {
+ return Ok(());
+ }
+
+ log::trace!(
+ "Allocating {} sets with layout {:?} @ {:?}",
+ count,
+ layout,
+ layout_ranges
+ );
+
+ let bucket = self
+ .buckets
+ .entry(layout_ranges)
+ .or_insert_with(DescriptorBucket::new);
+ match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) {
+ Ok(()) => {
+ extend.extend(
+ Iterator::zip(
+ self.allocation.pools.drain(..),
+ self.allocation.sets.drain(..),
+ )
+ .map(|(pool, set)| DescriptorSet {
+ raw: set,
+ ranges: layout_ranges,
+ pool,
+ }),
+ );
+ Ok(())
+ }
+ Err(err) => {
+ // Free sets allocated so far.
+ let mut last = None;
+ for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() {
+ match last {
+ Some(last) if last == pool => {
+ // same pool, continue
+ }
+ Some(last) => {
+ // Free contiguous range of sets from one pool in one go.
+ bucket.free(self.allocation.sets.drain(index + 1..), last);
+ }
+ None => last = Some(pool),
+ }
+ }
+
+ if let Some(last) = last {
+ bucket.free(self.allocation.sets.drain(0..), last);
+ }
+
+ Err(err)
+ }
+ }
+ }
+
+ /// Free descriptor sets.
+ ///
+ /// # Safety
+ ///
+ /// None of descriptor sets can be referenced in any pending command buffers.
+ /// All command buffers where at least one of descriptor sets referenced
+ /// move to invalid state.
+ pub unsafe fn free(&mut self, all_sets: impl IntoIterator<Item = DescriptorSet<B>>) {
+ let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None;
+
+ // Collect contig
+ for set in all_sets {
+ match &mut free {
+ slot @ None => {
+ slot.replace((set.ranges, set.pool, smallvec![set.raw]));
+ }
+ Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => {
+ raw_sets.push(set.raw);
+ }
+ Some((ranges, pool, raw_sets)) => {
+ let bucket = self
+ .buckets
+ .get_mut(ranges)
+ .expect("Set should be allocated from this allocator");
+ debug_assert!(bucket.total >= raw_sets.len() as u64);
+
+ bucket.free(raw_sets.drain(..), *pool);
+ *pool = set.pool;
+ *ranges = set.ranges;
+ raw_sets.push(set.raw);
+ }
+ }
+ }
+
+ if let Some((ranges, pool, raw_sets)) = free {
+ let bucket = self
+ .buckets
+ .get_mut(&ranges)
+ .expect("Set should be allocated from this allocator");
+ debug_assert!(bucket.total >= raw_sets.len() as u64);
+
+ bucket.free(raw_sets, pool);
+ }
+ }
+
+ /// Perform cleanup to allow resources reuse.
+ pub unsafe fn cleanup(&mut self, device: &B::Device) {
+ self.buckets
+ .values_mut()
+ .for_each(|bucket| bucket.cleanup(device));
+ }
+}
diff --git a/rendy-descriptor/src/lib.rs b/rendy-descriptor/src/lib.rs
new file mode 100644
index 0000000..18d5e0e
--- /dev/null
+++ b/rendy-descriptor/src/lib.rs
@@ -0,0 +1,4 @@
+mod allocator;
+mod ranges;
+
+pub use {allocator::*, ranges::*};
diff --git a/rendy-descriptor/src/ranges.rs b/rendy-descriptor/src/ranges.rs
new file mode 100644
index 0000000..d936ab1
--- /dev/null
+++ b/rendy-descriptor/src/ranges.rs
@@ -0,0 +1,278 @@
+use std::{
+ cmp::Ordering,
+ ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign},
+};
+
+pub use gfx_hal::pso::{
+ BufferDescriptorFormat, BufferDescriptorType, DescriptorRangeDesc, DescriptorSetLayoutBinding,
+ DescriptorType, ImageDescriptorType,
+};
+
+const DESCRIPTOR_TYPES_COUNT: usize = 11;
+
+const DESCRIPTOR_TYPES: [DescriptorType; DESCRIPTOR_TYPES_COUNT] = [
+ DescriptorType::Sampler,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled { with_sampler: true },
+ },
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Storage { read_only: false },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: false },
+ format: BufferDescriptorFormat::Texel,
+ },
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Texel,
+ },
+ DescriptorType::InputAttachment,
+];
+
+fn descriptor_type_index(ty: &DescriptorType) -> usize {
+ match ty {
+ DescriptorType::Sampler => 0,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled { with_sampler: true },
+ } => 1,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ } => 2,
+ DescriptorType::Image {
+ ty: ImageDescriptorType::Storage { read_only: _ },
+ } => 3,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ } => 4,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: true,
+ },
+ } => 5,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ } => 6,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format:
+ BufferDescriptorFormat::Structured {
+ dynamic_offset: false,
+ },
+ } => 7,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Storage { read_only: _ },
+ format: BufferDescriptorFormat::Texel,
+ } => 8,
+ DescriptorType::Buffer {
+ ty: BufferDescriptorType::Uniform,
+ format: BufferDescriptorFormat::Texel,
+ } => 9,
+ DescriptorType::InputAttachment => 10,
+ }
+}
+
+/// Number of descriptors per type.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct DescriptorRanges {
+ counts: [u32; DESCRIPTOR_TYPES_COUNT],
+}
+
+impl DescriptorRanges {
+ /// Create new instance without descriptors.
+ pub fn zero() -> Self {
+ DescriptorRanges {
+ counts: [0; DESCRIPTOR_TYPES_COUNT],
+ }
+ }
+
+ /// Add a single layout binding.
+ /// Useful when created with `DescriptorRanges::zero()`.
+ pub fn add_binding(&mut self, binding: DescriptorSetLayoutBinding) {
+ self.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ /// Iterate through ranges yelding
+ /// descriptor types and their amount.
+ pub fn iter(&self) -> DescriptorRangesIter<'_> {
+ DescriptorRangesIter {
+ counts: &self.counts,
+ index: 0,
+ }
+ }
+
+ /// Read as slice.
+ pub fn counts(&self) -> &[u32] {
+ &self.counts
+ }
+
+ /// Read or write as slice.
+ pub fn counts_mut(&mut self) -> &mut [u32] {
+ &mut self.counts
+ }
+
+ /// Calculate ranges from bindings.
+ pub fn from_bindings(bindings: &[DescriptorSetLayoutBinding]) -> Self {
+ let mut descs = Self::zero();
+
+ for binding in bindings {
+ descs.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ descs
+ }
+
+ /// Calculate ranges from bindings, specified with an iterator.
+ pub fn from_binding_iter<I>(bindings: I) -> Self
+ where
+ I: Iterator<Item = DescriptorSetLayoutBinding>,
+ {
+ let mut descs = Self::zero();
+
+ for binding in bindings {
+ descs.counts[descriptor_type_index(&binding.ty)] += binding.count as u32;
+ }
+
+ descs
+ }
+}
+
+impl PartialOrd for DescriptorRanges {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ let mut ord = self.counts[0].partial_cmp(&other.counts[0])?;
+ for i in 1..DESCRIPTOR_TYPES_COUNT {
+ match (ord, self.counts[i].partial_cmp(&other.counts[i])?) {
+ (Ordering::Less, Ordering::Greater) | (Ordering::Greater, Ordering::Less) => {
+ return None;
+ }
+ (Ordering::Equal, new) => ord = new,
+ _ => (),
+ }
+ }
+ Some(ord)
+ }
+}
+
+impl Add for DescriptorRanges {
+ type Output = Self;
+ fn add(mut self, rhs: Self) -> Self {
+ self += rhs;
+ self
+ }
+}
+
+impl AddAssign for DescriptorRanges {
+ fn add_assign(&mut self, rhs: Self) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] += rhs.counts[i];
+ }
+ }
+}
+
+impl Sub for DescriptorRanges {
+ type Output = Self;
+ fn sub(mut self, rhs: Self) -> Self {
+ self -= rhs;
+ self
+ }
+}
+
+impl SubAssign for DescriptorRanges {
+ fn sub_assign(&mut self, rhs: Self) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] -= rhs.counts[i];
+ }
+ }
+}
+
+impl Mul<u32> for DescriptorRanges {
+ type Output = Self;
+ fn mul(mut self, rhs: u32) -> Self {
+ self *= rhs;
+ self
+ }
+}
+
+impl MulAssign<u32> for DescriptorRanges {
+ fn mul_assign(&mut self, rhs: u32) {
+ for i in 0..DESCRIPTOR_TYPES_COUNT {
+ self.counts[i] *= rhs;
+ }
+ }
+}
+
+impl<'a> IntoIterator for &'a DescriptorRanges {
+ type Item = DescriptorRangeDesc;
+ type IntoIter = DescriptorRangesIter<'a>;
+
+ fn into_iter(self) -> DescriptorRangesIter<'a> {
+ self.iter()
+ }
+}
+
+/// Iterator over descriptor ranges.
+pub struct DescriptorRangesIter<'a> {
+ counts: &'a [u32; DESCRIPTOR_TYPES_COUNT],
+ index: u8,
+}
+
+impl<'a> Iterator for DescriptorRangesIter<'a> {
+ type Item = DescriptorRangeDesc;
+
+ fn next(&mut self) -> Option<DescriptorRangeDesc> {
+ loop {
+ let index = self.index as usize;
+ if index >= DESCRIPTOR_TYPES_COUNT {
+ return None;
+ } else {
+ self.index += 1;
+ if self.counts[index] > 0 {
+ return Some(DescriptorRangeDesc {
+ count: self.counts[index] as usize,
+ ty: DESCRIPTOR_TYPES[index],
+ });
+ }
+ }
+ }
+ }
+}
diff --git a/rendy-memory/Cargo.toml b/rendy-memory/Cargo.toml
new file mode 100644
index 0000000..8a0d29f
--- /dev/null
+++ b/rendy-memory/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rendy-memory"
+version = "0.5.3"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+edition = "2018"
+repository = "https://github.com/amethyst/rendy"
+license = "MIT OR Apache-2.0"
+documentation = "https://docs.rs/rendy-memory"
+keywords = ["graphics", "gfx-hal", "rendy"]
+categories = ["rendering"]
+description = "Rendy's memory manager"
+
+[features]
+serde-1 = ["serde", "gfx-hal/serde"]
+
+[dependencies]
+gfx-hal = "^0.8"
+log = "0.4.11"
+hibitset = { version = "0.6.3", default-features = false }
+relevant = { version = "0.4.2", features = ["log"] }
+serde = { version = "1.0.118", optional = true, features = ["derive"] }
+smallvec = "1.5.1"
+slab = "0.4.2"
+colorful = "0.2.1"
+
+[dev-dependencies]
+rand = "0.8.0"
diff --git a/rendy-memory/src/allocator/dedicated.rs b/rendy-memory/src/allocator/dedicated.rs
new file mode 100644
index 0000000..7806509
--- /dev/null
+++ b/rendy-memory/src/allocator/dedicated.rs
@@ -0,0 +1,218 @@
+use std::{ops::Range, ptr::NonNull};
+
+use {
+ crate::{
+ allocator::{Allocator, Kind},
+ block::Block,
+ mapping::{mapped_sub_range, MappedRange},
+ memory::*,
+ util::*,
+ },
+ gfx_hal::{device::Device as _, Backend},
+};
+
+/// Memory block allocated from `DedicatedAllocator`
+#[derive(Debug)]
+pub struct DedicatedBlock<B: Backend> {
+ memory: Memory<B>,
+ mapping: Option<(NonNull<u8>, Range<u64>)>,
+}
+
+unsafe impl<B> Send for DedicatedBlock<B> where B: Backend {}
+unsafe impl<B> Sync for DedicatedBlock<B> where B: Backend {}
+
+impl<B> DedicatedBlock<B>
+where
+ B: Backend,
+{
+ /// Get inner memory.
+ /// Panics if mapped.
+ pub fn unwrap_memory(self) -> Memory<B> {
+ assert!(self.mapping.is_none());
+ self.memory
+ }
+
+ /// Make unmapped block.
+ pub fn from_memory(memory: Memory<B>) -> Self {
+ DedicatedBlock {
+ memory,
+ mapping: None,
+ }
+ }
+}
+
+impl<B> Block<B> for DedicatedBlock<B>
+where
+ B: Backend,
+{
+ #[inline]
+ fn properties(&self) -> gfx_hal::memory::Properties {
+ self.memory.properties()
+ }
+
+ #[inline]
+ fn memory(&self) -> &B::Memory {
+ self.memory.raw()
+ }
+
+ #[inline]
+ fn range(&self) -> Range<u64> {
+ 0..self.memory.size()
+ }
+
+ fn map<'a>(
+ &'a mut self,
+ device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError> {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ if !self.memory.host_visible() {
+ //TODO: invalid access error
+ return Err(gfx_hal::device::MapError::MappingFailed);
+ }
+
+ let requested_range = range.clone();
+ let mapping_range = if !self.memory.host_coherent() {
+ align_range(range, self.memory.non_coherent_atom_size())
+ } else {
+ range
+ };
+
+ unsafe {
+ if let Some(ptr) = self
+ .mapping
+ .clone()
+ .and_then(|(ptr, range)| mapped_sub_range(ptr, range, mapping_range.clone()))
+ {
+ Ok(MappedRange::from_raw(
+ &self.memory,
+ ptr,
+ mapping_range,
+ requested_range,
+ ))
+ } else {
+ self.unmap(device);
+ let ptr = device.map_memory(
+ self.memory.raw_mut(),
+ gfx_hal::memory::Segment {
+ offset: mapping_range.start,
+ size: Some(mapping_range.end - mapping_range.start),
+ },
+ )?;
+ let ptr = NonNull::new(ptr).expect("Memory mapping shouldn't return nullptr");
+ let mapping =
+ MappedRange::from_raw(&self.memory, ptr, mapping_range, requested_range);
+ self.mapping = Some((mapping.ptr(), mapping.range()));
+ Ok(mapping)
+ }
+ }
+ }
+
+ fn unmap(&mut self, device: &B::Device) {
+ if self.mapping.take().is_some() {
+ unsafe {
+ // trace!("Unmap memory: {:#?}", self.memory);
+ device.unmap_memory(self.memory.raw_mut());
+ }
+ }
+ }
+}
+
+/// Dedicated memory allocator that uses memory object per allocation requested.
+///
+/// This allocator suites best huge allocations.
+/// From 32 MiB when GPU has 4-8 GiB memory total.
+///
+/// `Heaps` use this allocator when none of sub-allocators bound to the memory type
+/// can handle size required.
+/// TODO: Check if resource prefers dedicated memory.
+#[derive(Debug)]
+pub struct DedicatedAllocator {
+ memory_type: gfx_hal::MemoryTypeId,
+ memory_properties: gfx_hal::memory::Properties,
+ non_coherent_atom_size: u64,
+ used: u64,
+}
+
+impl DedicatedAllocator {
+ /// Get properties required by the allocator.
+ pub fn properties_required() -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::empty()
+ }
+
+ /// Create new `LinearAllocator`
+ /// for `memory_type` with `memory_properties` specified
+ pub fn new(
+ memory_type: gfx_hal::MemoryTypeId,
+ memory_properties: gfx_hal::memory::Properties,
+ non_coherent_atom_size: u64,
+ ) -> Self {
+ DedicatedAllocator {
+ memory_type,
+ memory_properties,
+ non_coherent_atom_size,
+ used: 0,
+ }
+ }
+}
+
+impl<B> Allocator<B> for DedicatedAllocator
+where
+ B: Backend,
+{
+ type Block = DedicatedBlock<B>;
+
+ fn kind() -> Kind {
+ Kind::Dedicated
+ }
+
+ #[inline]
+ fn alloc(
+ &mut self,
+ device: &B::Device,
+ size: u64,
+ _align: u64,
+ ) -> Result<(DedicatedBlock<B>, u64), gfx_hal::device::AllocationError> {
+ let size = if is_non_coherent_visible(self.memory_properties) {
+ align_size(size, self.non_coherent_atom_size)
+ } else {
+ size
+ };
+
+ let memory = unsafe {
+ Memory::from_raw(
+ device.allocate_memory(self.memory_type, size)?,
+ size,
+ self.memory_properties,
+ self.non_coherent_atom_size,
+ )
+ };
+
+ self.used += size;
+
+ Ok((DedicatedBlock::from_memory(memory), size))
+ }
+
+ #[inline]
+ fn free(&mut self, device: &B::Device, mut block: DedicatedBlock<B>) -> u64 {
+ block.unmap(device);
+ let size = block.memory.size();
+ self.used -= size;
+ unsafe {
+ device.free_memory(block.memory.into_raw());
+ }
+ size
+ }
+}
+
+impl Drop for DedicatedAllocator {
+ fn drop(&mut self) {
+ if self.used > 0 {
+ log::error!("Not all allocation from DedicatedAllocator was freed");
+ }
+ }
+}
diff --git a/rendy-memory/src/allocator/dynamic.rs b/rendy-memory/src/allocator/dynamic.rs
new file mode 100644
index 0000000..a41cc27
--- /dev/null
+++ b/rendy-memory/src/allocator/dynamic.rs
@@ -0,0 +1,716 @@
+use std::{
+ collections::{BTreeSet, HashMap},
+ ops::Range,
+ ptr::NonNull,
+ thread,
+};
+
+use {
+ crate::{
+ allocator::{Allocator, Kind},
+ block::Block,
+ mapping::*,
+ memory::*,
+ util::*,
+ },
+ gfx_hal::{device::Device as _, Backend},
+ hibitset::{BitSet, BitSetLike as _},
+};
+
+/// Memory block allocated from `DynamicAllocator`
+#[derive(Debug)]
+pub struct DynamicBlock<B: Backend> {
+ block_index: u32,
+ chunk_index: u32,
+ count: u32,
+ memory: *const Memory<B>,
+ ptr: Option<NonNull<u8>>,
+ range: Range<u64>,
+ relevant: relevant::Relevant,
+}
+
+unsafe impl<B> Send for DynamicBlock<B> where B: Backend {}
+unsafe impl<B> Sync for DynamicBlock<B> where B: Backend {}
+
+impl<B> DynamicBlock<B>
+where
+ B: Backend,
+{
+ fn shared_memory(&self) -> &Memory<B> {
+ // Memory won't be freed until last block created from it deallocated.
+ unsafe { &*self.memory }
+ }
+
+ fn size(&self) -> u64 {
+ self.range.end - self.range.start
+ }
+
+ fn dispose(self) {
+ self.relevant.dispose();
+ }
+}
+
+impl<B> Block<B> for DynamicBlock<B>
+where
+ B: Backend,
+{
+ #[inline]
+ fn properties(&self) -> gfx_hal::memory::Properties {
+ self.shared_memory().properties()
+ }
+
+ #[inline]
+ fn memory(&self) -> &B::Memory {
+ self.shared_memory().raw()
+ }
+
+ #[inline]
+ fn range(&self) -> Range<u64> {
+ self.range.clone()
+ }
+
+ #[inline]
+ fn map<'a>(
+ &'a mut self,
+ _device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError> {
+ debug_assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ if !self.shared_memory().host_visible() {
+ //TODO: invalid access error
+ return Err(gfx_hal::device::MapError::MappingFailed);
+ }
+
+ let requested_range = relative_to_sub_range(self.range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let mapping_range = if !self.shared_memory().host_coherent() {
+ align_range(
+ requested_range.clone(),
+ self.shared_memory().non_coherent_atom_size(),
+ )
+ } else {
+ requested_range.clone()
+ };
+
+ if let Some(ptr) = self.ptr {
+ let ptr = mapped_sub_range(ptr, self.range.clone(), mapping_range.clone()).unwrap();
+ let mapping = unsafe {
+ MappedRange::from_raw(self.shared_memory(), ptr, mapping_range, requested_range)
+ };
+ Ok(mapping)
+ } else {
+ Err(gfx_hal::device::MapError::MappingFailed)
+ }
+ }
+
+ #[inline]
+ fn unmap(&mut self, _device: &B::Device) {}
+}
+
+/// Config for `DynamicAllocator`.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct DynamicConfig {
+ /// All requests are rounded up to multiple of this value.
+ pub block_size_granularity: u64,
+
+ /// Maximum chunk of blocks size.
+ /// Actual chunk size is `min(max_chunk_size, block_size * blocks_per_chunk)`
+ pub max_chunk_size: u64,
+
+ /// Minimum size of device allocation.
+ pub min_device_allocation: u64,
+}
+
+/// No-fragmentation allocator.
+/// Suitable for any type of small allocations.
+/// Every freed block can be reused.
+#[derive(Debug)]
+pub struct DynamicAllocator<B: Backend> {
+ /// Memory type that this allocator allocates.
+ memory_type: gfx_hal::MemoryTypeId,
+
+ /// Memory properties of the memory type.
+ memory_properties: gfx_hal::memory::Properties,
+
+ /// All requests are rounded up to multiple of this value.
+ block_size_granularity: u64,
+
+ /// Maximum chunk of blocks size.
+ max_chunk_size: u64,
+
+ /// Minimum size of device allocation.
+ min_device_allocation: u64,
+
+ /// Chunk lists.
+ sizes: HashMap<u64, SizeEntry<B>>,
+
+ /// Ordered set of sizes that have allocated chunks.
+ chunks: BTreeSet<u64>,
+ non_coherent_atom_size: u64,
+}
+
+unsafe impl<B> Send for DynamicAllocator<B> where B: Backend {}
+unsafe impl<B> Sync for DynamicAllocator<B> where B: Backend {}
+
+#[derive(Debug)]
+struct SizeEntry<B: Backend> {
+ /// Total count of allocated blocks with size corresponding to this entry.
+ total_blocks: u64,
+
+ /// Bits per ready (non-exhausted) chunks with free blocks.
+ ready_chunks: BitSet,
+
+ /// List of chunks.
+ chunks: slab::Slab<Chunk<B>>,
+}
+
+impl<B> Default for SizeEntry<B>
+where
+ B: Backend,
+{
+ fn default() -> Self {
+ SizeEntry {
+ chunks: Default::default(),
+ total_blocks: 0,
+ ready_chunks: Default::default(),
+ }
+ }
+}
+
+const MAX_BLOCKS_PER_CHUNK: u32 = 64;
+const MIN_BLOCKS_PER_CHUNK: u32 = 8;
+
+impl<B> DynamicAllocator<B>
+where
+ B: Backend,
+{
+ /// Create new `DynamicAllocator`
+ /// for `memory_type` with `memory_properties` specified,
+ /// with `DynamicConfig` provided.
+ pub fn new(
+ memory_type: gfx_hal::MemoryTypeId,
+ memory_properties: gfx_hal::memory::Properties,
+ config: DynamicConfig,
+ non_coherent_atom_size: u64,
+ ) -> Self {
+ log::trace!(
+ "Create new allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'",
+ memory_type,
+ memory_properties,
+ config
+ );
+
+ assert!(
+ config.block_size_granularity.is_power_of_two(),
+ "Allocation granularity must be power of two"
+ );
+
+ let block_size_granularity = if is_non_coherent_visible(memory_properties) {
+ non_coherent_atom_size
+ .max(config.block_size_granularity)
+ .next_power_of_two()
+ } else {
+ config.block_size_granularity
+ };
+
+ assert!(
+ config.max_chunk_size.is_power_of_two(),
+ "Max chunk size must be power of two"
+ );
+
+ assert!(
+ config.min_device_allocation.is_power_of_two(),
+ "Min device allocation must be power of two"
+ );
+
+ assert!(
+ config.min_device_allocation <= config.max_chunk_size,
+ "Min device allocation must be less than or equalt to max chunk size"
+ );
+
+ if memory_properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE) {
+ debug_assert!(
+ fits_usize(config.max_chunk_size),
+ "Max chunk size must fit usize for mapping"
+ );
+ }
+
+ DynamicAllocator {
+ memory_type,
+ memory_properties,
+ block_size_granularity,
+ max_chunk_size: config.max_chunk_size,
+ min_device_allocation: config.min_device_allocation,
+ sizes: HashMap::new(),
+ chunks: BTreeSet::new(),
+ non_coherent_atom_size,
+ }
+ }
+
+ /// Maximum allocation size.
+ pub fn max_allocation(&self) -> u64 {
+ self.max_chunk_size / MIN_BLOCKS_PER_CHUNK as u64
+ }
+
+ /// Allocate memory chunk from device.
+ fn alloc_chunk_from_device(
+ &self,
+ device: &B::Device,
+ block_size: u64,
+ chunk_size: u64,
+ ) -> Result<Chunk<B>, gfx_hal::device::AllocationError> {
+ log::trace!(
+ "Allocate chunk of size: {} for blocks of size {} from device",
+ chunk_size,
+ block_size
+ );
+
+ // Allocate from device.
+ let (memory, mapping) = unsafe {
+ // Valid memory type specified.
+ let mut raw = device.allocate_memory(self.memory_type, chunk_size)?;
+
+ let mapping = if self
+ .memory_properties
+ .contains(gfx_hal::memory::Properties::CPU_VISIBLE)
+ {
+ log::trace!("Map new memory object");
+ match device.map_memory(
+ &mut raw,
+ gfx_hal::memory::Segment {
+ offset: 0,
+ size: Some(chunk_size),
+ },
+ ) {
+ Ok(mapping) => Some(NonNull::new_unchecked(mapping)),
+ Err(gfx_hal::device::MapError::OutOfMemory(error)) => {
+ device.free_memory(raw);
+ return Err(error.into());
+ }
+ Err(_) => panic!("Unexpected mapping failure"),
+ }
+ } else {
+ None
+ };
+ let memory = Memory::from_raw(
+ raw,
+ chunk_size,
+ self.memory_properties,
+ self.non_coherent_atom_size,
+ );
+ (memory, mapping)
+ };
+ Ok(Chunk::from_memory(block_size, memory, mapping))
+ }
+
+ /// Allocate memory chunk for given block size.
+ fn alloc_chunk(
+ &mut self,
+ device: &B::Device,
+ block_size: u64,
+ total_blocks: u64,
+ ) -> Result<(Chunk<B>, u64), gfx_hal::device::AllocationError> {
+ log::trace!(
+ "Allocate chunk for blocks of size {} ({} total blocks allocated)",
+ block_size,
+ total_blocks
+ );
+
+ let min_chunk_size = MIN_BLOCKS_PER_CHUNK as u64 * block_size;
+ let min_size = min_chunk_size.min(total_blocks * block_size);
+ let max_chunk_size = MAX_BLOCKS_PER_CHUNK as u64 * block_size;
+
+ // If smallest possible chunk size is larger then this allocator max allocation
+ if min_size > self.max_allocation()
+ || (total_blocks < MIN_BLOCKS_PER_CHUNK as u64
+ && min_size >= self.min_device_allocation)
+ {
+ // Allocate memory block from device.
+ let chunk = self.alloc_chunk_from_device(device, block_size, min_size)?;
+ return Ok((chunk, min_size));
+ }
+
+ if let Some(&chunk_size) = self
+ .chunks
+ .range(min_chunk_size..=max_chunk_size)
+ .next_back()
+ {
+ // Allocate block for the chunk.
+ let (block, allocated) = self.alloc_from_entry(device, chunk_size, 1, block_size)?;
+ Ok((Chunk::from_block(block_size, block), allocated))
+ } else {
+ let total_blocks = self.sizes[&block_size].total_blocks;
+ let chunk_size =
+ (max_chunk_size.min(min_chunk_size.max(total_blocks * block_size)) / 2 + 1)
+ .next_power_of_two();
+ let (block, allocated) = self.alloc_block(device, chunk_size, block_size)?;
+ Ok((Chunk::from_block(block_size, block), allocated))
+ }
+ }
+
+ /// Allocate blocks from particular chunk.
+ fn alloc_from_chunk(
+ chunks: &mut slab::Slab<Chunk<B>>,
+ chunk_index: u32,
+ block_size: u64,
+ count: u32,
+ align: u64,
+ ) -> Option<DynamicBlock<B>> {
+ log::trace!(
+ "Allocate {} consecutive blocks of size {} from chunk {}",
+ count,
+ block_size,
+ chunk_index
+ );
+
+ let chunk = &mut chunks[chunk_index as usize];
+ let block_index = chunk.acquire_blocks(count, block_size, align)?;
+ let block_range = chunk.blocks_range(block_size, block_index, count);
+
+ debug_assert_eq!((block_range.end - block_range.start) % count as u64, 0);
+
+ Some(DynamicBlock {
+ range: block_range.clone(),
+ memory: chunk.shared_memory(),
+ block_index,
+ chunk_index,
+ count,
+ ptr: chunk.mapping_ptr().map(|ptr| {
+ mapped_sub_range(ptr, chunk.range(), block_range)
+ .expect("Block must be sub-range of chunk")
+ }),
+ relevant: relevant::Relevant,
+ })
+ }
+
+ /// Allocate blocks from size entry.
+ fn alloc_from_entry(
+ &mut self,
+ device: &B::Device,
+ block_size: u64,
+ count: u32,
+ align: u64,
+ ) -> Result<(DynamicBlock<B>, u64), gfx_hal::device::AllocationError> {
+ log::trace!(
+ "Allocate {} consecutive blocks for size {} from the entry",
+ count,
+ block_size
+ );
+
+ debug_assert!(count < MIN_BLOCKS_PER_CHUNK);
+ let size_entry = self.sizes.entry(block_size).or_default();
+
+ for chunk_index in (&size_entry.ready_chunks).iter() {
+ if let Some(block) = Self::alloc_from_chunk(
+ &mut size_entry.chunks,
+ chunk_index,
+ block_size,
+ count,
+ align,
+ ) {
+ return Ok((block, 0));
+ }
+ }
+
+ if size_entry.chunks.vacant_entry().key() > max_chunks_per_size() {
+ return Err(gfx_hal::device::OutOfMemory::Host.into());
+ }
+
+ let total_blocks = size_entry.total_blocks;
+ let (chunk, allocated) = self.alloc_chunk(device, block_size, total_blocks)?;
+ let size_entry = self.sizes.entry(block_size).or_default();
+ let chunk_index = size_entry.chunks.insert(chunk) as u32;
+
+ let block = Self::alloc_from_chunk(
+ &mut size_entry.chunks,
+ chunk_index,
+ block_size,
+ count,
+ align,
+ )
+ .expect("New chunk should yield blocks");
+
+ if !size_entry.chunks[chunk_index as usize].is_exhausted() {
+ size_entry.ready_chunks.add(chunk_index);
+ }
+
+ Ok((block, allocated))
+ }
+
+ /// Allocate block.
+ fn alloc_block(
+ &mut self,
+ device: &B::Device,
+ block_size: u64,
+ align: u64,
+ ) -> Result<(DynamicBlock<B>, u64), gfx_hal::device::AllocationError> {
+ log::trace!("Allocate block of size {}", block_size);
+
+ debug_assert_eq!(block_size % self.block_size_granularity, 0);
+ let size_entry = self.sizes.entry(block_size).or_default();
+ size_entry.total_blocks += 1;
+
+ let overhead = (MIN_BLOCKS_PER_CHUNK as u64 - 1) / size_entry.total_blocks;
+
+ if overhead >= 1 {
+ if let Some(&size) = self
+ .chunks
+ .range(block_size / 4..block_size * overhead)
+ .next()
+ {
+ return self.alloc_from_entry(
+ device,
+ size,
+ ((block_size - 1) / size + 1) as u32,
+ align,
+ );
+ }
+ }
+
+ if size_entry.total_blocks == MIN_BLOCKS_PER_CHUNK as u64 {
+ self.chunks.insert(block_size);
+ }
+
+ self.alloc_from_entry(device, block_size, 1, align)
+ }
+
+ fn free_chunk(&mut self, device: &B::Device, chunk: Chunk<B>, block_size: u64) -> u64 {
+ log::trace!("Free chunk: {:#?}", chunk);
+ assert!(chunk.is_unused(block_size));
+ match chunk.flavor {
+ ChunkFlavor::Dedicated(mut boxed, _) => {
+ let size = boxed.size();
+ unsafe {
+ if self
+ .memory_properties
+ .contains(gfx_hal::memory::Properties::CPU_VISIBLE)
+ {
+ log::trace!("Unmap memory: {:#?}", boxed);
+ device.unmap_memory(boxed.raw_mut());
+ }
+ device.free_memory(boxed.into_raw());
+ }
+ size
+ }
+ ChunkFlavor::Dynamic(dynamic_block) => self.free(device, dynamic_block),
+ }
+ }
+
+ fn free_block(&mut self, device: &B::Device, block: DynamicBlock<B>) -> u64 {
+ log::trace!("Free block: {:#?}", block);
+
+ let block_size = block.size() / block.count as u64;
+ let size_entry = &mut self
+ .sizes
+ .get_mut(&block_size)
+ .expect("Unable to get size entry from which block was allocated");
+ let chunk_index = block.chunk_index;
+ let chunk = &mut size_entry.chunks[chunk_index as usize];
+ let block_index = block.block_index;
+ let count = block.count;
+ block.dispose();
+ chunk.release_blocks(block_index, count);
+ if chunk.is_unused(block_size) {
+ size_entry.ready_chunks.remove(chunk_index);
+ let chunk = size_entry.chunks.remove(chunk_index as usize);
+ self.free_chunk(device, chunk, block_size)
+ } else {
+ size_entry.ready_chunks.add(chunk_index);
+ 0
+ }
+ }
+
+ /// Perform full cleanup of the memory allocated.
+ pub fn dispose(self) {
+ if !thread::panicking() {
+ for (index, size) in self.sizes {
+ assert_eq!(size.chunks.len(), 0, "SizeEntry({}) is still used", index);
+ }
+ } else {
+ for (index, size) in self.sizes {
+ if !size.chunks.is_empty() {
+ log::error!("Memory leak: SizeEntry({}) is still used", index);
+ }
+ }
+ }
+ }
+}
+
+impl<B> Allocator<B> for DynamicAllocator<B>
+where
+ B: Backend,
+{
+ type Block = DynamicBlock<B>;
+
+ fn kind() -> Kind {
+ Kind::Dynamic
+ }
+
+ fn alloc(
+ &mut self,
+ device: &B::Device,
+ size: u64,
+ align: u64,
+ ) -> Result<(DynamicBlock<B>, u64), gfx_hal::device::AllocationError> {
+ debug_assert!(size <= self.max_allocation());
+ debug_assert!(align.is_power_of_two());
+ let aligned_size = ((size - 1) | (align - 1) | (self.block_size_granularity - 1)) + 1;
+
+ // This will change nothing if `self.non_coherent_atom_size` is power of two.
+ // But just in case...
+ let aligned_size = if is_non_coherent_visible(self.memory_properties) {
+ align_size(aligned_size, self.non_coherent_atom_size)
+ } else {
+ aligned_size
+ };
+
+ log::trace!(
+ "Allocate dynamic block: size: {}, align: {}, aligned size: {}, type: {}",
+ size,
+ align,
+ aligned_size,
+ self.memory_type.0
+ );
+
+ self.alloc_block(device, aligned_size, align)
+ }
+
+ fn free(&mut self, device: &B::Device, block: DynamicBlock<B>) -> u64 {
+ self.free_block(device, block)
+ }
+}
+
+/// Block allocated for chunk.
+#[derive(Debug)]
+enum ChunkFlavor<B: Backend> {
+ /// Allocated from device.
+ Dedicated(Box<Memory<B>>, Option<NonNull<u8>>),
+
+ /// Allocated from chunk of bigger blocks.
+ Dynamic(DynamicBlock<B>),
+}
+
+#[derive(Debug)]
+struct Chunk<B: Backend> {
+ flavor: ChunkFlavor<B>,
+ blocks: u64,
+}
+
+impl<B> Chunk<B>
+where
+ B: Backend,
+{
+ fn from_memory(block_size: u64, memory: Memory<B>, mapping: Option<NonNull<u8>>) -> Self {
+ let blocks = memory.size() / block_size;
+ debug_assert!(blocks <= MAX_BLOCKS_PER_CHUNK as u64);
+
+ let high_bit = 1 << (blocks - 1);
+
+ Chunk {
+ flavor: ChunkFlavor::Dedicated(Box::new(memory), mapping),
+ blocks: (high_bit - 1) | high_bit,
+ }
+ }
+
+ fn from_block(block_size: u64, chunk_block: DynamicBlock<B>) -> Self {
+ let blocks = (chunk_block.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64);
+
+ let high_bit = 1 << (blocks - 1);
+
+ Chunk {
+ flavor: ChunkFlavor::Dynamic(chunk_block),
+ blocks: (high_bit - 1) | high_bit,
+ }
+ }
+
+ fn shared_memory(&self) -> &Memory<B> {
+ match &self.flavor {
+ ChunkFlavor::Dedicated(boxed, _) => &*boxed,
+ ChunkFlavor::Dynamic(chunk_block) => chunk_block.shared_memory(),
+ }
+ }
+
+ fn range(&self) -> Range<u64> {
+ match &self.flavor {
+ ChunkFlavor::Dedicated(boxed, _) => 0..boxed.size(),
+ ChunkFlavor::Dynamic(chunk_block) => chunk_block.range(),
+ }
+ }
+
+ fn size(&self) -> u64 {
+ let range = self.range();
+ range.end - range.start
+ }
+
+ // Get block bytes range
+ fn blocks_range(&self, block_size: u64, block_index: u32, count: u32) -> Range<u64> {
+ let range = self.range();
+ let start = range.start + block_size * block_index as u64;
+ let end = start + block_size * count as u64;
+ debug_assert!(end <= range.end);
+ start..end
+ }
+
+ /// Check if there are free blocks.
+ fn is_unused(&self, block_size: u64) -> bool {
+ let blocks = (self.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64);
+
+ let high_bit = 1 << (blocks - 1);
+ let mask = (high_bit - 1) | high_bit;
+
+ debug_assert!(self.blocks <= mask);
+ self.blocks == mask
+ }
+
+ /// Check if there are free blocks.
+ fn is_exhausted(&self) -> bool {
+ self.blocks == 0
+ }
+
+ fn acquire_blocks(&mut self, count: u32, block_size: u64, align: u64) -> Option<u32> {
+ debug_assert!(count > 0 && count <= MAX_BLOCKS_PER_CHUNK);
+
+ // Holds a bit-array of all positions with `count` free blocks.
+ let mut blocks = !0;
+ for i in 0..count {
+ blocks &= self.blocks >> i;
+ }
+ // Find a position in `blocks` that is aligned.
+ while blocks != 0 {
+ let index = blocks.trailing_zeros();
+ blocks &= !(1 << index);
+
+ if (index as u64 * block_size) & (align - 1) == 0 {
+ let mask = ((1 << count) - 1) << index;
+ self.blocks &= !mask;
+ return Some(index);
+ }
+ }
+ None
+ }
+
+ fn release_blocks(&mut self, index: u32, count: u32) {
+ let mask = ((1 << count) - 1) << index;
+ debug_assert_eq!(self.blocks & mask, 0);
+ self.blocks |= mask;
+ }
+
+ fn mapping_ptr(&self) -> Option<NonNull<u8>> {
+ match &self.flavor {
+ ChunkFlavor::Dedicated(_, ptr) => *ptr,
+ ChunkFlavor::Dynamic(chunk_block) => chunk_block.ptr,
+ }
+ }
+}
+
+fn max_chunks_per_size() -> usize {
+ let value = (std::mem::size_of::<usize>() * 8).pow(4);
+ debug_assert!(fits_u32(value));
+ value
+}
diff --git a/rendy-memory/src/allocator/linear.rs b/rendy-memory/src/allocator/linear.rs
new file mode 100644
index 0000000..6687d4a
--- /dev/null
+++ b/rendy-memory/src/allocator/linear.rs
@@ -0,0 +1,363 @@
+use std::{collections::VecDeque, ops::Range, ptr::NonNull};
+
+use {
+ crate::{
+ allocator::{Allocator, Kind},
+ block::Block,
+ mapping::*,
+ memory::*,
+ util::*,
+ },
+ gfx_hal::{device::Device as _, Backend},
+ std::sync::Arc,
+};
+
+/// Memory block allocated from `LinearAllocator`
+pub struct LinearBlock<B: Backend> {
+ memory: Arc<Memory<B>>,
+ linear_index: u64,
+ ptr: NonNull<u8>,
+ range: Range<u64>,
+ relevant: relevant::Relevant,
+}
+
+impl<B> std::fmt::Debug for LinearBlock<B>
+where
+ B: Backend,
+{
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ fmt.debug_struct("LinearBlock")
+ .field("memory", &*self.memory)
+ .field("linear_index", &self.linear_index)
+ .field("ptr", &self.ptr)
+ .field("range", &self.range)
+ .finish()
+ }
+}
+
+unsafe impl<B> Send for LinearBlock<B> where B: Backend {}
+unsafe impl<B> Sync for LinearBlock<B> where B: Backend {}
+
+impl<B> LinearBlock<B>
+where
+ B: Backend,
+{
+ fn size(&self) -> u64 {
+ self.range.end - self.range.start
+ }
+
+ fn dispose(self) {
+ self.relevant.dispose();
+ }
+}
+
+impl<B> Block<B> for LinearBlock<B>
+where
+ B: Backend,
+{
+ #[inline]
+ fn properties(&self) -> gfx_hal::memory::Properties {
+ self.memory.properties()
+ }
+
+ #[inline]
+ fn memory(&self) -> &B::Memory {
+ self.memory.raw()
+ }
+
+ #[inline]
+ fn range(&self) -> Range<u64> {
+ self.range.clone()
+ }
+
+ #[inline]
+ fn map<'a>(
+ &'a mut self,
+ _device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError> {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ if !self.memory.host_visible() {
+ //TODO: invalid access error
+ return Err(gfx_hal::device::MapError::MappingFailed);
+ }
+
+ let requested_range = relative_to_sub_range(self.range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let mapping_range = if !self.memory.host_coherent() {
+ align_range(
+ requested_range.clone(),
+ self.memory.non_coherent_atom_size(),
+ )
+ } else {
+ requested_range.clone()
+ };
+
+ let ptr = mapped_sub_range(self.ptr, self.range.clone(), mapping_range.clone()).unwrap();
+ let mapping =
+ unsafe { MappedRange::from_raw(&*self.memory, ptr, mapping_range, requested_range) };
+ Ok(mapping)
+ }
+
+ #[inline]
+ fn unmap(&mut self, _device: &B::Device) {
+ debug_assert!(self.memory.host_visible());
+ }
+}
+
+/// Config for `LinearAllocator`.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct LinearConfig {
+ /// Size of the linear chunk.
+ /// Keep it big.
+ pub linear_size: u64,
+}
+
+/// Linear allocator that return memory from chunk sequentially.
+/// It keeps only number of bytes allocated from each chunk.
+/// Once chunk is exhausted it is placed into list.
+/// When all blocks allocated from head of that list are freed,
+/// head is freed as well.
+///
+/// This allocator suites best short-lived types of allocations.
+/// Allocation strategy requires minimal overhead and implementation is fast.
+/// But holding single block will completely stop memory recycling.
+#[derive(Debug)]
+pub struct LinearAllocator<B: Backend> {
+ memory_type: gfx_hal::MemoryTypeId,
+ memory_properties: gfx_hal::memory::Properties,
+ linear_size: u64,
+ offset: u64,
+ lines: VecDeque<Line<B>>,
+ non_coherent_atom_size: u64,
+}
+
+#[derive(Debug)]
+struct Line<B: Backend> {
+ used: u64,
+ free: u64,
+ memory: Arc<Memory<B>>,
+ ptr: NonNull<u8>,
+}
+
+unsafe impl<B> Send for Line<B> where B: Backend {}
+unsafe impl<B> Sync for Line<B> where B: Backend {}
+
+impl<B> LinearAllocator<B>
+where
+ B: Backend,
+{
+ /// Get properties required by the `LinearAllocator`.
+ pub fn properties_required() -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::CPU_VISIBLE
+ }
+
+ /// Maximum allocation size.
+ pub fn max_allocation(&self) -> u64 {
+ self.linear_size / 2
+ }
+
+ /// Create new `LinearAllocator`
+ /// for `memory_type` with `memory_properties` specified,
+ /// with `LinearConfig` provided.
+ pub fn new(
+ memory_type: gfx_hal::MemoryTypeId,
+ memory_properties: gfx_hal::memory::Properties,
+ config: LinearConfig,
+ non_coherent_atom_size: u64,
+ ) -> Self {
+ log::trace!(
+ "Create new 'linear' allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'",
+ memory_type,
+ memory_properties,
+ config
+ );
+ let linear_size = if is_non_coherent_visible(memory_properties) {
+ align_size(config.linear_size, non_coherent_atom_size)
+ } else {
+ config.linear_size
+ };
+ assert!(memory_properties.contains(Self::properties_required()));
+ assert!(
+ fits_usize(linear_size),
+ "Linear size must fit in both usize and u64"
+ );
+ LinearAllocator {
+ memory_type,
+ memory_properties,
+ linear_size,
+ offset: 0,
+ lines: VecDeque::new(),
+ non_coherent_atom_size,
+ }
+ }
+
+ /// Perform full cleanup of the memory allocated.
+ pub fn dispose(mut self, device: &B::Device) {
+ let _ = self.cleanup(device, 0);
+ if !self.lines.is_empty() {
+ log::error!(
+ "Lines are not empty during allocator disposal. Lines: {:#?}",
+ self.lines
+ );
+ }
+ }
+
+ fn cleanup(&mut self, device: &B::Device, off: usize) -> u64 {
+ let mut freed = 0;
+ while self.lines.len() > off {
+ if self.lines[0].used > self.lines[0].free {
+ break;
+ }
+
+ let line = self.lines.pop_front().unwrap();
+ self.offset += 1;
+
+ unsafe {
+ match Arc::try_unwrap(line.memory) {
+ Ok(mut memory) => {
+ // trace!("Unmap memory: {:#?}", line.memory);
+ device.unmap_memory(memory.raw_mut());
+
+ freed += memory.size();
+ device.free_memory(memory.into_raw());
+ }
+ Err(_) => log::error!("Allocated `Line` was freed, but memory is still shared and never will be destroyed"),
+ }
+ }
+ }
+ freed
+ }
+}
+
+impl<B> Allocator<B> for LinearAllocator<B>
+where
+ B: Backend,
+{
+ type Block = LinearBlock<B>;
+
+ fn kind() -> Kind {
+ Kind::Linear
+ }
+
+ fn alloc(
+ &mut self,
+ device: &B::Device,
+ size: u64,
+ align: u64,
+ ) -> Result<(LinearBlock<B>, u64), gfx_hal::device::AllocationError> {
+ debug_assert!(self
+ .memory_properties
+ .contains(gfx_hal::memory::Properties::CPU_VISIBLE));
+
+ let (size, align) = if is_non_coherent_visible(self.memory_properties) {
+ (
+ align_size(size, self.non_coherent_atom_size),
+ align_size(align, self.non_coherent_atom_size),
+ )
+ } else {
+ (size, align)
+ };
+
+ assert!(size <= self.linear_size);
+ assert!(align <= self.linear_size);
+
+ let count = self.lines.len() as u64;
+ if let Some(line) = self.lines.back_mut() {
+ let aligned_offset = aligned(line.used, align);
+ let overhead = aligned_offset - line.used;
+ if self.linear_size - size > aligned_offset {
+ line.used = aligned_offset + size;
+ line.free += overhead;
+
+ let range = aligned_offset..aligned_offset + size;
+
+ let ptr = mapped_sub_range(line.ptr, 0..self.linear_size, range.clone())
+ .expect("This sub-range must fit in line mapping");
+
+ return Ok((
+ LinearBlock {
+ linear_index: self.offset + count - 1,
+ memory: line.memory.clone(),
+ ptr,
+ range,
+ relevant: relevant::Relevant,
+ },
+ 0,
+ ));
+ }
+ }
+
+ let (memory, ptr) = unsafe {
+ let mut raw = device.allocate_memory(self.memory_type, self.linear_size)?;
+
+ let ptr = match device.map_memory(
+ &mut raw,
+ gfx_hal::memory::Segment {
+ offset: 0,
+ size: Some(self.linear_size),
+ },
+ ) {
+ Ok(ptr) => NonNull::new_unchecked(ptr),
+ Err(gfx_hal::device::MapError::OutOfMemory(error)) => {
+ device.free_memory(raw);
+ return Err(error.into());
+ }
+ Err(_) => panic!("Unexpected mapping failure"),
+ };
+
+ let memory = Memory::from_raw(
+ raw,
+ self.linear_size,
+ self.memory_properties,
+ self.non_coherent_atom_size,
+ );
+
+ (memory, ptr)
+ };
+
+ let line = Line {
+ used: size,
+ free: 0,
+ ptr,
+ memory: Arc::new(memory),
+ };
+
+ let block = LinearBlock {
+ linear_index: self.offset + count,
+ memory: line.memory.clone(),
+ ptr,
+ range: 0..size,
+ relevant: relevant::Relevant,
+ };
+
+ self.lines.push_back(line);
+ Ok((block, self.linear_size))
+ }
+
+ fn free(&mut self, device: &B::Device, block: Self::Block) -> u64 {
+ let index = block.linear_index - self.offset;
+ assert!(
+ fits_usize(index),
+ "This can't exceed lines list length which fits into usize by definition"
+ );
+ let index = index as usize;
+ assert!(
+ index < self.lines.len(),
+ "Can't be allocated from not yet created line"
+ );
+ {
+ let line = &mut self.lines[index];
+ line.free += block.size();
+ }
+ block.dispose();
+
+ self.cleanup(device, 1)
+ }
+}
diff --git a/rendy-memory/src/allocator/mod.rs b/rendy-memory/src/allocator/mod.rs
new file mode 100644
index 0000000..770a8a8
--- /dev/null
+++ b/rendy-memory/src/allocator/mod.rs
@@ -0,0 +1,50 @@
+//! This module provides `Allocator` trait and few allocators that implements the trait.
+
+mod dedicated;
+mod dynamic;
+mod linear;
+
+use crate::block::Block;
+
+pub use self::{
+ dedicated::{DedicatedAllocator, DedicatedBlock},
+ dynamic::{DynamicAllocator, DynamicBlock, DynamicConfig},
+ linear::{LinearAllocator, LinearBlock, LinearConfig},
+};
+
+/// Allocator kind.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Kind {
+ /// Memory object per allocation.
+ Dedicated,
+
+ /// General purpose allocator.
+ Dynamic,
+
+ /// Allocates linearly.
+ /// Fast and low overhead.
+ /// Suitable for one-time-use allocations.
+ Linear,
+}
+
+/// Allocator trait implemented for various allocators.
+pub trait Allocator<B: gfx_hal::Backend> {
+ /// Block type returned by allocator.
+ type Block: Block<B>;
+
+ /// Get allocator kind.
+ fn kind() -> Kind;
+
+ /// Allocate block of memory.
+ /// On success returns allocated block and amount of memory consumed from device.
+ fn alloc(
+ &mut self,
+ device: &B::Device,
+ size: u64,
+ align: u64,
+ ) -> Result<(Self::Block, u64), gfx_hal::device::AllocationError>;
+
+ /// Free block of memory.
+ /// Returns amount of memory returned to the device.
+ fn free(&mut self, device: &B::Device, block: Self::Block) -> u64;
+}
diff --git a/rendy-memory/src/block.rs b/rendy-memory/src/block.rs
new file mode 100644
index 0000000..23ff4f4
--- /dev/null
+++ b/rendy-memory/src/block.rs
@@ -0,0 +1,36 @@
+use std::ops::Range;
+
+use crate::mapping::MappedRange;
+
+/// Block that owns a `Range` of the `Memory`.
+/// Implementor must ensure that there can't be any other blocks
+/// with overlapping range (either through type system or safety notes for unsafe functions).
+/// Provides access to safe memory range mapping.
+pub trait Block<B: gfx_hal::Backend> {
+ /// Get memory properties of the block.
+ fn properties(&self) -> gfx_hal::memory::Properties;
+
+ /// Get raw memory object.
+ fn memory(&self) -> &B::Memory;
+
+ /// Get memory range owned by this block.
+ fn range(&self) -> Range<u64>;
+
+ /// Get size of the block.
+ fn size(&self) -> u64 {
+ let range = self.range();
+ range.end - range.start
+ }
+
+ /// Get mapping for the buffer range.
+ /// Memory writes to the region performed by device become available for the host.
+ fn map<'a>(
+ &'a mut self,
+ device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError>;
+
+ /// Release memory mapping. Must be called after successful `map` call.
+ /// No-op if block is not mapped.
+ fn unmap(&mut self, device: &B::Device);
+}
diff --git a/rendy-memory/src/heaps/heap.rs b/rendy-memory/src/heaps/heap.rs
new file mode 100644
index 0000000..6595cbc
--- /dev/null
+++ b/rendy-memory/src/heaps/heap.rs
@@ -0,0 +1,49 @@
+use crate::utilization::*;
+
+#[derive(Debug)]
+pub(super) struct MemoryHeap {
+ size: u64,
+ used: u64,
+ effective: u64,
+}
+
+impl MemoryHeap {
+ pub(super) fn new(size: u64) -> Self {
+ MemoryHeap {
+ size,
+ used: 0,
+ effective: 0,
+ }
+ }
+
+ pub(super) fn available(&self) -> u64 {
+ if self.used > self.size {
+ log::warn!("Heap size exceeded");
+ 0
+ } else {
+ self.size - self.used
+ }
+ }
+
+ pub(super) fn allocated(&mut self, used: u64, effective: u64) {
+ self.used += used;
+ self.effective += effective;
+ debug_assert!(self.used >= self.effective);
+ }
+
+ pub(super) fn freed(&mut self, used: u64, effective: u64) {
+ self.used -= used;
+ self.effective -= effective;
+ debug_assert!(self.used >= self.effective);
+ }
+
+ pub(super) fn utilization(&self) -> MemoryHeapUtilization {
+ MemoryHeapUtilization {
+ utilization: MemoryUtilization {
+ used: self.used,
+ effective: self.effective,
+ },
+ size: self.size,
+ }
+ }
+}
diff --git a/rendy-memory/src/heaps/memory_type.rs b/rendy-memory/src/heaps/memory_type.rs
new file mode 100644
index 0000000..e3c2e72
--- /dev/null
+++ b/rendy-memory/src/heaps/memory_type.rs
@@ -0,0 +1,158 @@
+use {
+ super::{BlockFlavor, HeapsConfig},
+ crate::{allocator::*, usage::MemoryUsage, utilization::*},
+ gfx_hal::memory::Properties,
+};
+
+#[derive(Debug)]
+pub(super) struct MemoryType<B: gfx_hal::Backend> {
+ heap_index: usize,
+ properties: Properties,
+ dedicated: DedicatedAllocator,
+ linear: Option<LinearAllocator<B>>,
+ dynamic: Option<DynamicAllocator<B>>,
+ // chunk: Option<ChunkAllocator>,
+ used: u64,
+ effective: u64,
+}
+
+impl<B> MemoryType<B>
+where
+ B: gfx_hal::Backend,
+{
+ pub(super) fn new(
+ memory_type: gfx_hal::MemoryTypeId,
+ heap_index: usize,
+ properties: Properties,
+ config: HeapsConfig,
+ non_coherent_atom_size: u64,
+ ) -> Self {
+ MemoryType {
+ properties,
+ heap_index,
+ dedicated: DedicatedAllocator::new(memory_type, properties, non_coherent_atom_size),
+ linear: if properties.contains(Properties::CPU_VISIBLE) {
+ config.linear.map(|config| {
+ LinearAllocator::new(memory_type, properties, config, non_coherent_atom_size)
+ })
+ } else {
+ None
+ },
+ dynamic: config.dynamic.map(|config| {
+ DynamicAllocator::new(memory_type, properties, config, non_coherent_atom_size)
+ }),
+ used: 0,
+ effective: 0,
+ }
+ }
+
+ pub(super) fn properties(&self) -> Properties {
+ self.properties
+ }
+
+ pub(super) fn heap_index(&self) -> usize {
+ self.heap_index
+ }
+
+ pub(super) fn alloc(
+ &mut self,
+ device: &B::Device,
+ usage: impl MemoryUsage,
+ size: u64,
+ align: u64,
+ ) -> Result<(BlockFlavor<B>, u64), gfx_hal::device::AllocationError> {
+ let (block, allocated) = self.alloc_impl(device, usage, size, align)?;
+ self.effective += block.size();
+ self.used += allocated;
+ Ok((block, allocated))
+ }
+
+ fn alloc_impl(
+ &mut self,
+ device: &B::Device,
+ usage: impl MemoryUsage,
+ size: u64,
+ align: u64,
+ ) -> Result<(BlockFlavor<B>, u64), gfx_hal::device::AllocationError> {
+ match (self.dynamic.as_mut(), self.linear.as_mut()) {
+ (Some(dynamic), Some(linear)) => {
+ if dynamic.max_allocation() >= size
+ && usage.allocator_fitness(Kind::Dynamic)
+ > usage.allocator_fitness(Kind::Linear)
+ {
+ dynamic
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dynamic(block), size))
+ } else if linear.max_allocation() >= size
+ && usage.allocator_fitness(Kind::Linear) > 0
+ {
+ linear
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Linear(block), size))
+ } else {
+ self.dedicated
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dedicated(block), size))
+ }
+ }
+ (Some(dynamic), None) => {
+ if dynamic.max_allocation() >= size && usage.allocator_fitness(Kind::Dynamic) > 0 {
+ dynamic
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dynamic(block), size))
+ } else {
+ self.dedicated
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dedicated(block), size))
+ }
+ }
+ (None, Some(linear)) => {
+ if linear.max_allocation() >= size && usage.allocator_fitness(Kind::Linear) > 0 {
+ linear
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Linear(block), size))
+ } else {
+ self.dedicated
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dedicated(block), size))
+ }
+ }
+ (None, None) => self
+ .dedicated
+ .alloc(device, size, align)
+ .map(|(block, size)| (BlockFlavor::Dedicated(block), size)),
+ }
+ }
+
+ pub(super) fn free(&mut self, device: &B::Device, block: BlockFlavor<B>) -> u64 {
+ match block {
+ BlockFlavor::Dedicated(block) => self.dedicated.free(device, block),
+ BlockFlavor::Linear(block) => self.linear.as_mut().unwrap().free(device, block),
+ BlockFlavor::Dynamic(block) => self.dynamic.as_mut().unwrap().free(device, block),
+ }
+ }
+
+ pub(super) fn dispose(self, device: &B::Device) {
+ log::trace!("Dispose memory allocators");
+
+ if let Some(linear) = self.linear {
+ linear.dispose(device);
+ log::trace!("Linear allocator disposed");
+ }
+ if let Some(dynamic) = self.dynamic {
+ dynamic.dispose();
+ log::trace!("Dynamic allocator disposed");
+ }
+ }
+
+ pub(super) fn utilization(&self) -> MemoryTypeUtilization {
+ MemoryTypeUtilization {
+ utilization: MemoryUtilization {
+ used: self.used,
+ effective: self.effective,
+ },
+ properties: self.properties,
+ heap_index: self.heap_index,
+ }
+ }
+}
diff --git a/rendy-memory/src/heaps/mod.rs b/rendy-memory/src/heaps/mod.rs
new file mode 100644
index 0000000..0dd7983
--- /dev/null
+++ b/rendy-memory/src/heaps/mod.rs
@@ -0,0 +1,327 @@
+mod heap;
+mod memory_type;
+
+use {
+ self::{heap::MemoryHeap, memory_type::MemoryType},
+ crate::{allocator::*, block::Block, mapping::*, usage::MemoryUsage, util::*, utilization::*},
+ std::ops::Range,
+};
+
+/// Possible errors returned by `Heaps`.
+#[allow(missing_copy_implementations)]
+#[derive(Clone, Debug, PartialEq)]
+pub enum HeapsError {
+ /// Memory allocation failure.
+ AllocationError(gfx_hal::device::AllocationError),
+ /// No memory types among required for resource with requested properties was found.
+ NoSuitableMemory(u32, gfx_hal::memory::Properties),
+}
+
+impl std::fmt::Display for HeapsError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ HeapsError::AllocationError(e) => write!(f, "{:?}", e),
+ HeapsError::NoSuitableMemory(e, e2) => write!(
+ f,
+ "Memory type among ({}) with properties ({:?}) not found",
+ e, e2
+ ),
+ }
+ }
+}
+impl std::error::Error for HeapsError {}
+
+impl From<gfx_hal::device::AllocationError> for HeapsError {
+ fn from(error: gfx_hal::device::AllocationError) -> Self {
+ HeapsError::AllocationError(error)
+ }
+}
+
+impl From<gfx_hal::device::OutOfMemory> for HeapsError {
+ fn from(error: gfx_hal::device::OutOfMemory) -> Self {
+ HeapsError::AllocationError(error.into())
+ }
+}
+
+/// Config for `Heaps` allocator.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct HeapsConfig {
+ /// Config for linear sub-allocator.
+ pub linear: Option<LinearConfig>,
+
+ /// Config for dynamic sub-allocator.
+ pub dynamic: Option<DynamicConfig>,
+}
+
+/// Heaps available on particular physical device.
+#[derive(Debug)]
+pub struct Heaps<B: gfx_hal::Backend> {
+ types: Vec<MemoryType<B>>,
+ heaps: Vec<MemoryHeap>,
+}
+
+impl<B> Heaps<B>
+where
+ B: gfx_hal::Backend,
+{
+ /// This must be called with `gfx_hal::memory::Properties` fetched from physical device.
+ pub unsafe fn new<P, H>(types: P, heaps: H, non_coherent_atom_size: u64) -> Self
+ where
+ P: IntoIterator<Item = (gfx_hal::memory::Properties, u32, HeapsConfig)>,
+ H: IntoIterator<Item = u64>,
+ {
+ let heaps = heaps.into_iter().map(MemoryHeap::new).collect::<Vec<_>>();
+ Heaps {
+ types: types
+ .into_iter()
+ .enumerate()
+ .map(|(index, (properties, heap_index, config))| {
+ assert!(
+ fits_u32(index),
+ "Number of memory types must fit in u32 limit"
+ );
+ assert!(
+ fits_usize(heap_index),
+ "Number of memory types must fit in u32 limit"
+ );
+ let memory_type = gfx_hal::MemoryTypeId(index);
+ let heap_index = heap_index as usize;
+ assert!(heap_index < heaps.len());
+ MemoryType::new(
+ memory_type,
+ heap_index,
+ properties,
+ config,
+ non_coherent_atom_size,
+ )
+ })
+ .collect(),
+ heaps,
+ }
+ }
+
+ /// Allocate memory block
+ /// from one of memory types specified by `mask`,
+ /// for intended `usage`,
+ /// with `size`
+ /// and `align` requirements.
+ pub fn allocate(
+ &mut self,
+ device: &B::Device,
+ mask: u32,
+ usage: impl MemoryUsage,
+ size: u64,
+ align: u64,
+ ) -> Result<MemoryBlock<B>, HeapsError> {
+ debug_assert!(fits_u32(self.types.len()));
+
+ let (memory_index, _, _) = {
+ let suitable_types = self
+ .types
+ .iter()
+ .enumerate()
+ .filter(|(index, _)| (mask & (1u32 << index)) != 0)
+ .filter_map(|(index, mt)| {
+ if mt.properties().contains(usage.properties_required()) {
+ let fitness = usage.memory_fitness(mt.properties());
+ Some((index, mt, fitness))
+ } else {
+ None
+ }
+ })
+ .collect::<smallvec::SmallVec<[_; 64]>>();
+
+ if suitable_types.is_empty() {
+ return Err(HeapsError::NoSuitableMemory(
+ mask,
+ usage.properties_required(),
+ ));
+ }
+
+ suitable_types
+ .into_iter()
+ .filter(|(_, mt, _)| self.heaps[mt.heap_index()].available() > size + align)
+ .max_by_key(|&(_, _, fitness)| fitness)
+ .ok_or_else(|| {
+ log::error!("All suitable heaps are exhausted. {:#?}", self);
+ gfx_hal::device::OutOfMemory::Device
+ })?
+ };
+
+ self.allocate_from(device, memory_index as u32, usage, size, align)
+ }
+
+ /// Allocate memory block
+ /// from `memory_index` specified,
+ /// for intended `usage`,
+ /// with `size`
+ /// and `align` requirements.
+ fn allocate_from(
+ &mut self,
+ device: &B::Device,
+ memory_index: u32,
+ usage: impl MemoryUsage,
+ size: u64,
+ align: u64,
+ ) -> Result<MemoryBlock<B>, HeapsError> {
+ log::trace!(
+ "Allocate memory block: type '{}', usage '{:#?}', size: '{}', align: '{}'",
+ memory_index,
+ usage,
+ size,
+ align
+ );
+ assert!(fits_usize(memory_index));
+
+ let memory_type = &mut self.types[memory_index as usize];
+ let memory_heap = &mut self.heaps[memory_type.heap_index()];
+
+ if memory_heap.available() < size {
+ return Err(gfx_hal::device::OutOfMemory::Device.into());
+ }
+
+ let (block, allocated) = memory_type.alloc(device, usage, size, align)?;
+ memory_heap.allocated(allocated, block.size());
+
+ Ok(MemoryBlock {
+ block,
+ memory_index,
+ })
+ }
+
+ /// Free memory block.
+ ///
+ /// Memory block must be allocated from this heap.
+ pub fn free(&mut self, device: &B::Device, block: MemoryBlock<B>) {
+ // trace!("Free block '{:#?}'", block);
+ let memory_index = block.memory_index;
+ debug_assert!(fits_usize(memory_index));
+ let size = block.size();
+
+ let memory_type = &mut self.types[memory_index as usize];
+ let memory_heap = &mut self.heaps[memory_type.heap_index()];
+ let freed = memory_type.free(device, block.block);
+ memory_heap.freed(freed, size);
+ }
+
+ /// Dispose of allocator.
+ /// Cleanup allocators before dropping.
+ /// Will panic if memory instances are left allocated.
+ pub fn dispose(self, device: &B::Device) {
+ for mt in self.types {
+ mt.dispose(device)
+ }
+ }
+
+ /// Get memory utilization.
+ pub fn utilization(&self) -> TotalMemoryUtilization {
+ TotalMemoryUtilization {
+ heaps: self.heaps.iter().map(MemoryHeap::utilization).collect(),
+ types: self.types.iter().map(MemoryType::utilization).collect(),
+ }
+ }
+}
+
+/// Memory block allocated from `Heaps`.
+#[derive(Debug)]
+pub struct MemoryBlock<B: gfx_hal::Backend> {
+ block: BlockFlavor<B>,
+ memory_index: u32,
+}
+
+impl<B> MemoryBlock<B>
+where
+ B: gfx_hal::Backend,
+{
+ /// Get memory type id.
+ pub fn memory_type(&self) -> u32 {
+ self.memory_index
+ }
+}
+
+#[derive(Debug)]
+enum BlockFlavor<B: gfx_hal::Backend> {
+ Dedicated(DedicatedBlock<B>),
+ Linear(LinearBlock<B>),
+ Dynamic(DynamicBlock<B>),
+ // Chunk(ChunkBlock<B>),
+}
+
+macro_rules! any_block {
+ ($self:ident. $block:ident => $expr:expr) => {{
+ use self::BlockFlavor::*;
+ match $self.$block {
+ Dedicated($block) => $expr,
+ Linear($block) => $expr,
+ Dynamic($block) => $expr,
+ // Chunk($block) => $expr,
+ }
+ }};
+ (& $self:ident. $block:ident => $expr:expr) => {{
+ use self::BlockFlavor::*;
+ match &$self.$block {
+ Dedicated($block) => $expr,
+ Linear($block) => $expr,
+ Dynamic($block) => $expr,
+ // Chunk($block) => $expr,
+ }
+ }};
+ (&mut $self:ident. $block:ident => $expr:expr) => {{
+ use self::BlockFlavor::*;
+ match &mut $self.$block {
+ Dedicated($block) => $expr,
+ Linear($block) => $expr,
+ Dynamic($block) => $expr,
+ // Chunk($block) => $expr,
+ }
+ }};
+}
+
+impl<B> BlockFlavor<B>
+where
+ B: gfx_hal::Backend,
+{
+ #[inline]
+ fn size(&self) -> u64 {
+ use self::BlockFlavor::*;
+ match self {
+ Dedicated(block) => block.size(),
+ Linear(block) => block.size(),
+ Dynamic(block) => block.size(),
+ // Chunk(block) => block.size(),
+ }
+ }
+}
+
+impl<B> Block<B> for MemoryBlock<B>
+where
+ B: gfx_hal::Backend,
+{
+ #[inline]
+ fn properties(&self) -> gfx_hal::memory::Properties {
+ any_block!(&self.block => block.properties())
+ }
+
+ #[inline]
+ fn memory(&self) -> &B::Memory {
+ any_block!(&self.block => block.memory())
+ }
+
+ #[inline]
+ fn range(&self) -> Range<u64> {
+ any_block!(&self.block => block.range())
+ }
+
+ fn map<'a>(
+ &'a mut self,
+ device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError> {
+ any_block!(&mut self.block => block.map(device, range))
+ }
+
+ fn unmap(&mut self, device: &B::Device) {
+ any_block!(&mut self.block => block.unmap(device))
+ }
+}
diff --git a/rendy-memory/src/lib.rs b/rendy-memory/src/lib.rs
new file mode 100644
index 0000000..a0653ee
--- /dev/null
+++ b/rendy-memory/src/lib.rs
@@ -0,0 +1,31 @@
+//! GPU memory management
+//!
+
+#![warn(
+ missing_debug_implementations,
+ missing_copy_implementations,
+ missing_docs,
+ trivial_casts,
+ trivial_numeric_casts,
+ unused_extern_crates,
+ unused_import_braces,
+ unused_qualifications
+)]
+mod allocator;
+mod block;
+mod heaps;
+mod mapping;
+mod memory;
+mod usage;
+mod util;
+mod utilization;
+
+pub use crate::{
+ allocator::*,
+ block::Block,
+ heaps::{Heaps, HeapsConfig, HeapsError, MemoryBlock},
+ mapping::{write::Write, Coherent, MappedRange, MaybeCoherent, NonCoherent},
+ memory::Memory,
+ usage::*,
+ utilization::*,
+};
diff --git a/rendy-memory/src/mapping/mod.rs b/rendy-memory/src/mapping/mod.rs
new file mode 100644
index 0000000..63b2f34
--- /dev/null
+++ b/rendy-memory/src/mapping/mod.rs
@@ -0,0 +1,345 @@
+mod range;
+pub(crate) mod write;
+
+use {
+ crate::{memory::Memory, util::*},
+ gfx_hal::{device::Device as _, Backend},
+ std::{ops::Range, ptr::NonNull},
+};
+
+pub(crate) use self::range::*;
+use self::write::{Write, WriteCoherent, WriteFlush};
+
+/// Non-coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct NonCoherent;
+
+/// Coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct Coherent;
+
+/// Value that contains either coherent marker or non-coherent marker.
+#[derive(Clone, Copy, Debug)]
+pub struct MaybeCoherent(bool);
+
+/// Represents range of the memory mapped to the host.
+/// Provides methods for safer host access to the memory.
+#[derive(Debug)]
+pub struct MappedRange<'a, B: Backend, C = MaybeCoherent> {
+ /// Memory object that is mapped.
+ memory: &'a Memory<B>,
+
+ /// Pointer to range mapped memory.
+ ptr: NonNull<u8>,
+
+ /// Range of mapped memory.
+ mapping_range: Range<u64>,
+
+ /// Mapping range requested by caller.
+ /// Must be subrange of `mapping_range`.
+ requested_range: Range<u64>,
+
+ /// Coherency marker
+ coherent: C,
+}
+
+impl<'a, B> MappedRange<'a, B>
+where
+ B: Backend,
+{
+ // /// Map range of memory.
+ // /// `range` is in memory object space.
+ // ///
+ // /// # Safety
+ // ///
+ // /// * Only one range for the given memory object can be mapped.
+ // /// * Memory object must be not mapped.
+ // /// * Memory object must be created with device specified.
+ // pub unsafe fn new(
+ // memory: &'a Memory<B>,
+ // device: &B::Device,
+ // range: Range<u64>,
+ // ) -> Result<Self, gfx_hal::device::MapError> {
+ // assert!(
+ // range.start < range.end,
+ // "Memory mapping region must have valid size"
+ // );
+ // assert!(
+ // fits_usize(range.end - range.start),
+ // "Range length must fit in usize"
+ // );
+ // assert!(memory.host_visible());
+
+ // let ptr = device.map_memory(memory.raw(), range.clone())?;
+ // assert!(
+ // (ptr as usize).wrapping_neg() >= (range.end - range.start) as usize,
+ // "Resulting pointer value + range length must fit in usize. Pointer: {:p}, range {:?}",
+ // ptr,
+ // range,
+ // );
+
+ // Ok(Self::from_raw(memory, NonNull::new_unchecked(ptr), range))
+ // }
+
+ /// Construct mapped range from raw mapping
+ ///
+ /// # Safety
+ ///
+ /// `memory` `range` must be mapped to host memory region pointer by `ptr`.
+ /// `range` is in memory object space.
+ /// `ptr` points to the `range.start` offset from memory origin.
+ pub(crate) unsafe fn from_raw(
+ memory: &'a Memory<B>,
+ ptr: NonNull<u8>,
+ mapping_range: Range<u64>,
+ requested_range: Range<u64>,
+ ) -> Self {
+ debug_assert!(
+ mapping_range.start < mapping_range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ debug_assert!(
+ requested_range.start < requested_range.end,
+ "Memory mapping region must have valid size"
+ );
+
+ if !memory.host_coherent() {
+ debug_assert_eq!(mapping_range.start % memory.non_coherent_atom_size(), 0, "Bounds of non-coherent memory mapping ranges must be multiple of `Limits::non_coherent_atom_size`");
+ debug_assert_eq!(mapping_range.end % memory.non_coherent_atom_size(), 0, "Bounds of non-coherent memory mapping ranges must be multiple of `Limits::non_coherent_atom_size`");
+ debug_assert!(
+ is_sub_range(mapping_range.clone(), requested_range.clone()),
+ "`requested_range` must be sub-range of `mapping_range`",
+ );
+ } else {
+ debug_assert_eq!(mapping_range, requested_range);
+ }
+
+ MappedRange {
+ ptr,
+ mapping_range,
+ requested_range,
+ memory,
+ coherent: MaybeCoherent(memory.host_coherent()),
+ }
+ }
+
+ /// Get pointer to beginning of memory region.
+ /// i.e. to `range().start` offset from memory origin.
+ pub fn ptr(&self) -> NonNull<u8> {
+ mapped_sub_range(
+ self.ptr,
+ self.mapping_range.clone(),
+ self.requested_range.clone(),
+ )
+ .unwrap()
+ }
+
+ /// Get mapped range.
+ pub fn range(&self) -> Range<u64> {
+ self.requested_range.clone()
+ }
+
+ /// Fetch readable slice of sub-range to be read.
+ /// Invalidating range if memory is not coherent.
+ /// `range.end - range.start` must be multiple of `size_of::()`.
+ /// `mapping offset + range.start` must be multiple of `align_of::()`.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to the memory region until the borrowing ends.
+ /// * `T` Must be plain-old-data type compatible with data in mapped region.
+ pub unsafe fn read<'b, T>(
+ &'b mut self,
+ device: &B::Device,
+ range: Range<u64>,
+ ) -> Result<&'b [T], gfx_hal::device::MapError>
+ where
+ 'a: 'b,
+ T: Copy,
+ {
+ debug_assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ debug_assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ if !self.coherent.0 {
+ let aligned_sub_range = align_range(sub_range, self.memory.non_coherent_atom_size());
+ debug_assert!(is_sub_range(
+ self.mapping_range.clone(),
+ aligned_sub_range.clone()
+ ));
+ device.invalidate_mapped_memory_ranges(std::iter::once((
+ self.memory.raw(),
+ gfx_hal::memory::Segment {
+ offset: aligned_sub_range.start,
+ size: Some(aligned_sub_range.end - aligned_sub_range.start),
+ },
+ )))?;
+ }
+
+ let slice = mapped_slice::<T>(ptr, size);
+ Ok(slice)
+ }
+
+ /// Fetch writer to the sub-region.
+ /// This writer will flush data on drop if written at least once.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to or read from the memory region.
+ pub unsafe fn write<'b, T: 'b>(
+ &'b mut self,
+ device: &'b B::Device,
+ range: Range<u64>,
+ ) -> Result<impl Write<T> + 'b, gfx_hal::device::MapError>
+ where
+ 'a: 'b,
+ T: Copy,
+ {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ let slice = mapped_slice_mut::<T>(ptr, size);
+
+ let memory = &self.memory;
+ let flush = if !self.coherent.0 {
+ let aligned_sub_range = align_range(sub_range, self.memory.non_coherent_atom_size());
+ debug_assert!(is_sub_range(
+ self.mapping_range.clone(),
+ aligned_sub_range.clone()
+ ));
+ Some(move || {
+ device
+ .flush_mapped_memory_ranges(std::iter::once((
+ memory.raw(),
+ gfx_hal::memory::Segment {
+ offset: aligned_sub_range.start,
+ size: Some(aligned_sub_range.end - aligned_sub_range.start),
+ },
+ )))
+ .expect("Should flush successfully");
+ })
+ } else {
+ None
+ };
+
+ Ok(WriteFlush { slice, flush })
+ }
+
+ /// Convert into mapped range with statically known coherency.
+ pub fn coherent(self) -> Result<MappedRange<'a, B, Coherent>, MappedRange<'a, B, NonCoherent>> {
+ if self.coherent.0 {
+ Ok(MappedRange {
+ memory: self.memory,
+ ptr: self.ptr,
+ mapping_range: self.mapping_range,
+ requested_range: self.requested_range,
+ coherent: Coherent,
+ })
+ } else {
+ Err(MappedRange {
+ memory: self.memory,
+ ptr: self.ptr,
+ mapping_range: self.mapping_range,
+ requested_range: self.requested_range,
+ coherent: NonCoherent,
+ })
+ }
+ }
+}
+
+impl<'a, B> From<MappedRange<'a, B, Coherent>> for MappedRange<'a, B>
+where
+ B: Backend,
+{
+ fn from(range: MappedRange<'a, B, Coherent>) -> Self {
+ MappedRange {
+ memory: range.memory,
+ ptr: range.ptr,
+ mapping_range: range.mapping_range,
+ requested_range: range.requested_range,
+ coherent: MaybeCoherent(true),
+ }
+ }
+}
+
+impl<'a, B> From<MappedRange<'a, B, NonCoherent>> for MappedRange<'a, B>
+where
+ B: Backend,
+{
+ fn from(range: MappedRange<'a, B, NonCoherent>) -> Self {
+ MappedRange {
+ memory: range.memory,
+ ptr: range.ptr,
+ mapping_range: range.mapping_range,
+ requested_range: range.requested_range,
+ coherent: MaybeCoherent(false),
+ }
+ }
+}
+
+impl<'a, B> MappedRange<'a, B, Coherent>
+where
+ B: Backend,
+{
+ /// Fetch writer to the sub-region.
+ ///
+ /// # Safety
+ ///
+ /// * Caller must ensure that device won't write to or read from the memory region.
+ pub unsafe fn write<'b, U: 'b>(
+ &'b mut self,
+ range: Range<u64>,
+ ) -> Result<impl Write<U> + 'b, gfx_hal::device::MapError>
+ where
+ U: Copy,
+ {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fits_usize(range.end - range.start),
+ "Range length must fit in usize"
+ );
+
+ let sub_range = relative_to_sub_range(self.requested_range.clone(), range)
+ .ok_or(gfx_hal::device::MapError::OutOfBounds)?;
+
+ let ptr =
+ mapped_sub_range(self.ptr, self.mapping_range.clone(), sub_range.clone()).unwrap();
+
+ let size = (sub_range.end - sub_range.start) as usize;
+
+ let slice = mapped_slice_mut::<U>(ptr, size);
+
+ Ok(WriteCoherent { slice })
+ }
+}
diff --git a/rendy-memory/src/mapping/range.rs b/rendy-memory/src/mapping/range.rs
new file mode 100644
index 0000000..f4c49be
--- /dev/null
+++ b/rendy-memory/src/mapping/range.rs
@@ -0,0 +1,85 @@
+use {
+ crate::util::fits_usize,
+ std::{
+ mem::{align_of, size_of},
+ ops::Range,
+ ptr::NonNull,
+ slice::{from_raw_parts, from_raw_parts_mut},
+ },
+};
+
+/// Get sub-range of memory mapping.
+/// `range` and `fitting` are in memory object space.
+/// `ptr` points to the `range.start` offset from memory origin.
+/// returns pointer to `fitting.start` offset from memory origin
+/// if `fitting` is contained in `range`.
+pub(crate) fn mapped_sub_range(
+ ptr: NonNull<u8>,
+ range: Range<u64>,
+ fitting: Range<u64>,
+) -> Option<NonNull<u8>> {
+ assert!(
+ range.start < range.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(
+ fitting.start < fitting.end,
+ "Memory mapping region must have valid size"
+ );
+ assert!(fits_usize(range.end - range.start));
+ assert!(usize::max_value() - (range.end - range.start) as usize >= ptr.as_ptr() as usize);
+
+ if fitting.start < range.start || fitting.end > range.end {
+ None
+ } else {
+ Some(unsafe {
+ // for x > 0 and y >= 0: x + y > 0. No overflow due to checks above.
+ NonNull::new_unchecked(
+ (ptr.as_ptr() as usize + (fitting.start - range.start) as usize) as *mut u8,
+ )
+ })
+ }
+}
+
+/// # Safety
+///
+/// User must ensure that:
+/// * this function won't create aliasing slices.
+/// * returned slice doesn't outlive mapping.
+/// * `T` Must be plain-old-data type compatible with data in mapped region.
+pub(crate) unsafe fn mapped_slice_mut<'a, T>(ptr: NonNull<u8>, size: usize) -> &'a mut [T] {
+ assert_eq!(
+ size % size_of::<T>(),
+ 0,
+ "Range length must be multiple of element size"
+ );
+ let offset = ptr.as_ptr() as usize;
+ assert_eq!(
+ offset % align_of::<T>(),
+ 0,
+ "Range offset must be multiple of element alignment"
+ );
+ assert!(usize::max_value() - size >= ptr.as_ptr() as usize);
+ from_raw_parts_mut(ptr.as_ptr() as *mut T, size)
+}
+
+/// # Safety
+///
+/// User must ensure that:
+/// * returned slice doesn't outlive mapping.
+/// * `T` Must be plain-old-data type compatible with data in mapped region.
+pub(crate) unsafe fn mapped_slice<'a, T>(ptr: NonNull<u8>, size: usize) -> &'a [T] {
+ assert_eq!(
+ size % size_of::<T>(),
+ 0,
+ "Range length must be multiple of element size"
+ );
+ let offset = ptr.as_ptr() as usize;
+ assert_eq!(
+ offset % align_of::<T>(),
+ 0,
+ "Range offset must be multiple of element alignment"
+ );
+ assert!(usize::max_value() - size >= ptr.as_ptr() as usize);
+ from_raw_parts(ptr.as_ptr() as *const T, size)
+}
diff --git a/rendy-memory/src/mapping/write.rs b/rendy-memory/src/mapping/write.rs
new file mode 100644
index 0000000..d067a61
--- /dev/null
+++ b/rendy-memory/src/mapping/write.rs
@@ -0,0 +1,73 @@
+use std::ptr::copy_nonoverlapping;
+
+/// Trait for memory region suitable for host writes.
+pub trait Write<T: Copy> {
+ /// Get mutable slice of `T` bound to mapped range.
+ ///
+ /// # Safety
+ ///
+ /// * Returned slice should not be read.
+ unsafe fn slice(&mut self) -> &mut [T];
+
+ /// Write data into mapped memory sub-region.
+ ///
+ /// # Panic
+ ///
+ /// Panics if `data.len()` is greater than this sub-region len.
+ fn write(&mut self, data: &[T]) {
+ unsafe {
+ let slice = self.slice();
+ assert!(data.len() <= slice.len());
+ copy_nonoverlapping(data.as_ptr(), slice.as_mut_ptr(), data.len());
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(super) struct WriteFlush<'a, T, F: FnOnce() + 'a> {
+ pub(super) slice: &'a mut [T],
+ pub(super) flush: Option<F>,
+}
+
+impl<'a, T, F> Drop for WriteFlush<'a, T, F>
+where
+ T: 'a,
+ F: FnOnce() + 'a,
+{
+ fn drop(&mut self) {
+ if let Some(f) = self.flush.take() {
+ f();
+ }
+ }
+}
+
+impl<'a, T, F> Write<T> for WriteFlush<'a, T, F>
+where
+ T: Copy + 'a,
+ F: FnOnce() + 'a,
+{
+ /// # Safety
+ ///
+ /// [See doc comment for trait method](trait.Write#method.slice)
+ unsafe fn slice(&mut self) -> &mut [T] {
+ self.slice
+ }
+}
+
+#[warn(dead_code)]
+#[derive(Debug)]
+pub(super) struct WriteCoherent<'a, T> {
+ pub(super) slice: &'a mut [T],
+}
+
+impl<'a, T> Write<T> for WriteCoherent<'a, T>
+where
+ T: Copy + 'a,
+{
+ /// # Safety
+ ///
+ /// [See doc comment for trait method](trait.Write#method.slice)
+ unsafe fn slice(&mut self) -> &mut [T] {
+ self.slice
+ }
+}
diff --git a/rendy-memory/src/memory.rs b/rendy-memory/src/memory.rs
new file mode 100644
index 0000000..a529efe
--- /dev/null
+++ b/rendy-memory/src/memory.rs
@@ -0,0 +1,98 @@
+// use std::fmt;
+
+/// Memory object wrapper.
+/// Contains size and properties of the memory.
+#[derive(Debug)]
+pub struct Memory<B: gfx_hal::Backend> {
+ raw: B::Memory,
+ size: u64,
+ properties: gfx_hal::memory::Properties,
+ non_coherent_atom_size: u64,
+ relevant: relevant::Relevant,
+}
+
+impl<B> Memory<B>
+where
+ B: gfx_hal::Backend,
+{
+ /// Get memory properties.
+ pub fn properties(&self) -> gfx_hal::memory::Properties {
+ self.properties
+ }
+
+ /// Get memory size.
+ pub fn size(&self) -> u64 {
+ self.size
+ }
+
+ /// Get raw memory.
+ pub fn raw(&self) -> &B::Memory {
+ &self.raw
+ }
+
+ /// Get raw memory mutably.
+ pub fn raw_mut(&mut self) -> &mut B::Memory {
+ &mut self.raw
+ }
+
+ /// Unwrap raw memory.
+ pub fn into_raw(self) -> B::Memory {
+ self.relevant.dispose();
+ self.raw
+ }
+
+ pub(crate) fn non_coherent_atom_size(&self) -> u64 {
+ debug_assert!(
+ self.host_visible() && !self.host_coherent(),
+ "Irrelevent and shouldn't be called",
+ );
+ self.non_coherent_atom_size
+ }
+
+ /// Create memory from raw object.
+ ///
+ /// # Safety
+ ///
+ /// TODO:
+ pub unsafe fn from_raw(
+ raw: B::Memory,
+ size: u64,
+ properties: gfx_hal::memory::Properties,
+ non_coherent_atom_size: u64,
+ ) -> Self {
+ Memory {
+ properties,
+ raw,
+ size,
+ non_coherent_atom_size,
+ relevant: relevant::Relevant,
+ }
+ }
+
+ /// Check if this memory is host-visible and can be mapped.
+ /// `memory.host_visible()` is equivalent to `memory.properties().contains(Properties::CPU_VISIBLE)`
+ pub fn host_visible(&self) -> bool {
+ self.properties
+ .contains(gfx_hal::memory::Properties::CPU_VISIBLE)
+ }
+
+ /// Check if this memory is host-coherent and doesn't require invalidating or flushing.
+ /// `memory.host_coherent()` is equivalent to `memory.properties().contains(Properties::COHERENT)`
+ pub fn host_coherent(&self) -> bool {
+ self.properties
+ .contains(gfx_hal::memory::Properties::COHERENT)
+ }
+}
+
+// pub(crate) fn memory_ptr_fmt(
+// memory: &*const Memory,
+// fmt: &mut fmt::Formatter<'_>,
+// ) -> Result<(), fmt::Error> {
+// unsafe {
+// if fmt.alternate() {
+// write!(fmt, "*const {:#?}", **memory)
+// } else {
+// write!(fmt, "*const {:?}", **memory)
+// }
+// }
+// }
diff --git a/rendy-memory/src/usage.rs b/rendy-memory/src/usage.rs
new file mode 100644
index 0000000..a9a4012
--- /dev/null
+++ b/rendy-memory/src/usage.rs
@@ -0,0 +1,210 @@
+//! Defines usage types for memory bocks.
+//! See `Usage` and implementations for details.
+
+use crate::allocator::Kind;
+
+/// Memory usage trait.
+pub trait MemoryUsage: std::fmt::Debug {
+ /// Get set of properties required for the usage.
+ fn properties_required(&self) -> gfx_hal::memory::Properties;
+
+ /// Get comparable fitness value for memory properties.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if properties set doesn't contain required properties.
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32;
+
+ /// Get comparable fitness value for memory allocator.
+ fn allocator_fitness(&self, kind: Kind) -> u32;
+}
+
+impl<T> MemoryUsage for T
+where
+ T: std::ops::Deref + std::fmt::Debug,
+ T::Target: MemoryUsage,
+{
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ (&**self).properties_required()
+ }
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ (&**self).memory_fitness(properties)
+ }
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ (&**self).allocator_fitness(kind)
+ }
+}
+
+/// Full speed GPU access.
+/// Optimal for render targets and persistent resources.
+/// Avoid memory with host access.
+#[derive(Clone, Copy, Debug)]
+pub struct Data;
+
+impl MemoryUsage for Data {
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::DEVICE_LOCAL
+ }
+
+ #[inline]
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ assert!(properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL));
+ 0 | ((!properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)) as u32) << 3
+ | ((!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)) as u32) << 2
+ | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 1
+ | ((!properties.contains(gfx_hal::memory::Properties::COHERENT)) as u32) << 0
+ }
+
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ match kind {
+ Kind::Dedicated => 1,
+ Kind::Dynamic => 2,
+ Kind::Linear => 0,
+ }
+ }
+}
+
+/// CPU to GPU data flow with update commands.
+/// Used for dynamic buffer data, typically constant buffers.
+/// Host access is guaranteed.
+/// Prefers memory with fast GPU access.
+#[derive(Clone, Copy, Debug)]
+pub struct Dynamic;
+
+impl MemoryUsage for Dynamic {
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::CPU_VISIBLE
+ }
+
+ #[inline]
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE));
+ assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED));
+
+ 0 | (properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL) as u32) << 2
+ | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1
+ | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0
+ }
+
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ match kind {
+ Kind::Dedicated => 1,
+ Kind::Dynamic => 2,
+ Kind::Linear => 0,
+ }
+ }
+}
+
+/// CPU to GPU data flow with mapping.
+/// Used for staging data before copying to the `Data` memory.
+/// Host access is guaranteed.
+#[derive(Clone, Copy, Debug)]
+pub struct Upload;
+
+impl MemoryUsage for Upload {
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::CPU_VISIBLE
+ }
+
+ #[inline]
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE));
+ assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED));
+
+ 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2
+ | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1
+ | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0
+ }
+
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ match kind {
+ Kind::Dedicated => 0,
+ Kind::Dynamic => 1,
+ Kind::Linear => 2,
+ }
+ }
+}
+
+/// GPU to CPU data flow with mapping.
+/// Used for copying data from `Data` memory to be read by the host.
+/// Host access is guaranteed.
+#[derive(Clone, Copy, Debug)]
+pub struct Download;
+
+impl MemoryUsage for Download {
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ gfx_hal::memory::Properties::CPU_VISIBLE
+ }
+
+ #[inline]
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE));
+ assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED));
+
+ 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2
+ | (properties.contains(gfx_hal::memory::Properties::CPU_CACHED) as u32) << 1
+ | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 0
+ }
+
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ match kind {
+ Kind::Dedicated => 0,
+ Kind::Dynamic => 1,
+ Kind::Linear => 2,
+ }
+ }
+}
+
+/// Well-known memory usage types.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum MemoryUsageValue {
+ /// See [`Data`]
+ ///
+ /// [`Data`]: struct.Data.html
+ Data,
+
+ /// See [`Dynamic`]
+ ///
+ /// [`Dynamic`]: struct.Dynamic.html
+ Dynamic,
+
+ /// See [`Upload`]
+ ///
+ /// [`Upload`]: struct.Upload.html
+ Upload,
+
+ /// See [`Download`]
+ ///
+ /// [`Download`]: struct.Download.html
+ Download,
+}
+
+/// Memory usage trait.
+impl MemoryUsage for MemoryUsageValue {
+ fn properties_required(&self) -> gfx_hal::memory::Properties {
+ match self {
+ MemoryUsageValue::Data => Data.properties_required(),
+ MemoryUsageValue::Dynamic => Dynamic.properties_required(),
+ MemoryUsageValue::Upload => Upload.properties_required(),
+ MemoryUsageValue::Download => Download.properties_required(),
+ }
+ }
+
+ fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 {
+ match self {
+ MemoryUsageValue::Data => Data.memory_fitness(properties),
+ MemoryUsageValue::Dynamic => Dynamic.memory_fitness(properties),
+ MemoryUsageValue::Upload => Upload.memory_fitness(properties),
+ MemoryUsageValue::Download => Download.memory_fitness(properties),
+ }
+ }
+
+ fn allocator_fitness(&self, kind: Kind) -> u32 {
+ match self {
+ MemoryUsageValue::Data => Data.allocator_fitness(kind),
+ MemoryUsageValue::Dynamic => Dynamic.allocator_fitness(kind),
+ MemoryUsageValue::Upload => Upload.allocator_fitness(kind),
+ MemoryUsageValue::Download => Download.allocator_fitness(kind),
+ }
+ }
+}
diff --git a/rendy-memory/src/util.rs b/rendy-memory/src/util.rs
new file mode 100644
index 0000000..07b9b7c
--- /dev/null
+++ b/rendy-memory/src/util.rs
@@ -0,0 +1,157 @@
+pub(crate) fn aligned(value: u64, align: u64) -> u64 {
+ debug_assert_ne!(align, 0);
+ debug_assert_eq!(align.count_ones(), 1);
+ if value == 0 {
+ 0
+ } else {
+ 1u64 + ((value - 1u64) | (align - 1u64))
+ }
+}
+
+pub(crate) trait IntegerFitting {
+ fn fits_usize(self) -> bool;
+ fn fits_isize(self) -> bool;
+
+ fn usize_fits(value: usize) -> bool;
+ fn isize_fits(value: isize) -> bool;
+}
+
+#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
+impl IntegerFitting for u64 {
+ fn fits_usize(self) -> bool {
+ self <= usize::max_value() as u64
+ }
+ fn fits_isize(self) -> bool {
+ self <= isize::max_value() as u64
+ }
+ fn usize_fits(_value: usize) -> bool {
+ true
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl IntegerFitting for u64 {
+ fn fits_usize(self) -> bool {
+ true
+ }
+ fn fits_isize(self) -> bool {
+ self <= isize::max_value() as u64
+ }
+ fn usize_fits(_value: usize) -> bool {
+ true
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0
+ }
+}
+
+#[cfg(not(any(
+ target_pointer_width = "16",
+ target_pointer_width = "32",
+ target_pointer_width = "64"
+)))]
+impl IntegerFitting for u64 {
+ fn fits_usize(self) -> bool {
+ true
+ }
+ fn fits_isize(self) -> bool {
+ true
+ }
+ fn usize_fits(value: usize) -> bool {
+ value <= u64::max_value() as usize
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0 && value <= u64::max_value() as isize
+ }
+}
+
+#[cfg(target_pointer_width = "16")]
+impl IntegerFitting for u32 {
+ fn fits_usize(self) -> bool {
+ self <= usize::max_value() as u32
+ }
+ fn fits_isize(self) -> bool {
+ self <= isize::max_value() as u32
+ }
+ fn usize_fits(_value: usize) -> bool {
+ true
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0
+ }
+}
+
+#[cfg(target_pointer_width = "32")]
+impl IntegerFitting for u32 {
+ fn fits_usize(self) -> bool {
+ true
+ }
+ fn fits_isize(self) -> bool {
+ self <= isize::max_value() as u32
+ }
+ fn usize_fits(_value: usize) -> bool {
+ true
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0
+ }
+}
+
+#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))]
+impl IntegerFitting for u32 {
+ fn fits_usize(self) -> bool {
+ true
+ }
+ fn fits_isize(self) -> bool {
+ true
+ }
+ fn usize_fits(value: usize) -> bool {
+ value <= u32::max_value() as usize
+ }
+ fn isize_fits(value: isize) -> bool {
+ value >= 0 && value <= u32::max_value() as isize
+ }
+}
+
+pub(crate) fn fits_usize<T: IntegerFitting>(value: T) -> bool {
+ value.fits_usize()
+}
+
+pub(crate) fn fits_u32(value: usize) -> bool {
+ u32::usize_fits(value)
+}
+
+pub(crate) fn align_range(range: std::ops::Range<u64>, align: u64) -> std::ops::Range<u64> {
+ let start = range.start - range.start % align;
+ let end = ((range.end - 1) / align + 1) * align;
+ start..end
+}
+
+pub(crate) fn align_size(size: u64, align: u64) -> u64 {
+ ((size - 1) / align + 1) * align
+}
+
+pub(crate) fn is_non_coherent_visible(properties: gfx_hal::memory::Properties) -> bool {
+ properties & (gfx_hal::memory::Properties::CPU_VISIBLE | gfx_hal::memory::Properties::COHERENT)
+ == gfx_hal::memory::Properties::CPU_VISIBLE
+}
+
+pub(crate) fn relative_to_sub_range(
+ range: std::ops::Range<u64>,
+ relative: std::ops::Range<u64>,
+) -> Option<std::ops::Range<u64>> {
+ let start = relative.start + range.start;
+ let end = relative.end + range.start;
+ if end <= range.end {
+ Some(start..end)
+ } else {
+ None
+ }
+}
+
+pub(crate) fn is_sub_range(range: std::ops::Range<u64>, sub: std::ops::Range<u64>) -> bool {
+ sub.start >= range.start && sub.end <= range.end
+}
diff --git a/rendy-memory/src/utilization.rs b/rendy-memory/src/utilization.rs
new file mode 100644
index 0000000..723c429
--- /dev/null
+++ b/rendy-memory/src/utilization.rs
@@ -0,0 +1,137 @@
+use {
+ colorful::{core::color_string::CString, Color, Colorful as _},
+ gfx_hal::memory::Properties,
+};
+
+/// Memory utilization stats.
+#[derive(Clone, Copy, Debug)]
+pub struct MemoryUtilization {
+ /// Total number of bytes allocated.
+ pub used: u64,
+ /// Effective number bytes allocated.
+ pub effective: u64,
+}
+
+/// Memory utilization of one heap.
+#[derive(Clone, Copy, Debug)]
+pub struct MemoryHeapUtilization {
+ /// Utilization.
+ pub utilization: MemoryUtilization,
+
+ /// Memory heap size.
+ pub size: u64,
+}
+
+/// Memory utilization of one type.
+#[derive(Clone, Copy, Debug)]
+pub struct MemoryTypeUtilization {
+ /// Utilization.
+ pub utilization: MemoryUtilization,
+
+ /// Memory type info.
+ pub properties: Properties,
+
+ /// Index of heap this memory type uses.
+ pub heap_index: usize,
+}
+
+/// Total memory utilization.
+#[derive(Clone, Debug)]
+pub struct TotalMemoryUtilization {
+ /// Utilization by types.
+ pub types: Vec<MemoryTypeUtilization>,
+
+ /// Utilization by heaps.
+ pub heaps: Vec<MemoryHeapUtilization>,
+}
+
+impl std::fmt::Display for TotalMemoryUtilization {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ const MB: u64 = 1024 * 1024;
+
+ writeln!(fmt, "!!! Memory utilization !!!")?;
+ for (index, heap) in self.heaps.iter().enumerate() {
+ let size = heap.size;
+ let MemoryUtilization { used, effective } = heap.utilization;
+ let usage_basis_points = used * 10000 / size;
+ let fill = if usage_basis_points > 10000 {
+ // Shouldn't happen, but just in case.
+ 50
+ } else {
+ (usage_basis_points / 200) as usize
+ };
+ let effective_basis_points = if used > 0 {
+ effective * 10000 / used
+ } else {
+ 10000
+ };
+
+ let line = ("|".repeat(fill) + &(" ".repeat(50 - fill)))
+ .gradient_with_color(Color::Green, Color::Red);
+ writeln!(
+ fmt,
+ "Heap {}:\n{:6} / {:<6} or{} {{ effective:{} }} [{}]",
+ format!("{}", index).magenta(),
+ format!("{}MB", used / MB),
+ format!("{}MB", size / MB),
+ format_basis_points(usage_basis_points),
+ format_basis_points_inverted(effective_basis_points),
+ line
+ )?;
+
+ for ty in self.types.iter().filter(|ty| ty.heap_index == index) {
+ let properties = ty.properties;
+ let MemoryUtilization { used, effective } = ty.utilization;
+ let usage_basis_points = used * 10000 / size;
+ let effective_basis_points = if used > 0 {
+ effective * 10000 / used
+ } else {
+ 0
+ };
+
+ writeln!(
+ fmt,
+ " {:>6} or{} {{ effective:{} }} | {:?}",
+ format!("{}MB", used / MB),
+ format_basis_points(usage_basis_points),
+ format_basis_points_inverted(effective_basis_points),
+ properties,
+ )?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+fn format_basis_points(basis_points: u64) -> CString {
+ debug_assert!(basis_points <= 10000);
+ let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100);
+ if basis_points > 7500 {
+ s.red()
+ } else if basis_points > 5000 {
+ s.yellow()
+ } else if basis_points > 2500 {
+ s.green()
+ } else if basis_points > 100 {
+ s.blue()
+ } else {
+ s.white()
+ }
+}
+
+fn format_basis_points_inverted(basis_points: u64) -> CString {
+ debug_assert!(basis_points <= 10000);
+ let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100);
+ if basis_points > 9900 {
+ s.white()
+ } else if basis_points > 7500 {
+ s.blue()
+ } else if basis_points > 5000 {
+ s.green()
+ } else if basis_points > 2500 {
+ s.yellow()
+ } else {
+ s.red()
+ }
+}
diff --git a/stockton-render/Cargo.toml b/stockton-render/Cargo.toml
index 0348c60..ecb08f3 100644
--- a/stockton-render/Cargo.toml
+++ b/stockton-render/Cargo.toml
@@ -9,7 +9,7 @@ stockton-input = { path = "../stockton-input" }
stockton-levels = { path = "../stockton-levels" }
stockton-types = { path = "../stockton-types" }
winit = "^0.21"
-gfx-hal = "0.4.1"
+gfx-hal = "^0.8.0"
arrayvec = "0.4.10"
nalgebra-glm = "^0.6"
shaderc = "^0.7"
@@ -17,8 +17,8 @@ log = "0.4.0"
image = "0.23.11"
legion = { version = "^0.3" }
egui = "^0.2"
-rendy-memory = "0.5.2"
-rendy-descriptor = "0.5.1"
+rendy-memory = { path = "../rendy-memory" }
+rendy-descriptor = { path = "../rendy-descriptor" }
anyhow = "1.0.40"
thiserror = "1.0.25"
@@ -27,6 +27,5 @@ default = ["vulkan"]
vulkan = ["gfx-backend-vulkan"]
[dependencies.gfx-backend-vulkan]
-version = "0.4.1"
-features = ["x11"]
+version = "^0.8.0"
optional = true
diff --git a/stockton-render/src/draw/buffer.rs b/stockton-render/src/draw/buffer.rs
index 34400a6..227bb12 100644
--- a/stockton-render/src/draw/buffer.rs
+++ b/stockton-render/src/draw/buffer.rs
@@ -1,10 +1,13 @@
use core::mem::{size_of, ManuallyDrop};
use std::convert::TryInto;
-use std::iter::once;
+use std::iter::{empty, once};
use std::ops::{Index, IndexMut};
-use hal::prelude::*;
-use hal::{buffer::Usage, memory::Properties, queue::Submission, MemoryTypeId};
+use hal::{
+ buffer::Usage,
+ memory::{Properties, Segment, SparseFlags},
+ MemoryTypeId,
+};
use crate::error::CreationError;
use crate::types::*;
@@ -12,14 +15,14 @@ use crate::types::*;
/// Create a buffer of the given specifications, allocating more device memory.
// TODO: Use a different memory allocator?
pub(crate) fn create_buffer(
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
usage: Usage,
properties: Properties,
size: u64,
-) -> Result<(Buffer, Memory), CreationError> {
- let mut buffer =
- unsafe { device.create_buffer(size, usage) }.map_err(CreationError::BufferError)?;
+) -> Result<(BufferT, MemoryT), CreationError> {
+ let mut buffer = unsafe { device.create_buffer(size, usage, SparseFlags::empty()) }
+ .map_err(CreationError::BufferError)?;
let requirements = unsafe { device.get_buffer_requirements(&buffer) };
let memory_type_id = adapter
@@ -46,30 +49,30 @@ pub(crate) fn create_buffer(
/// A buffer that can be modified by the CPU
pub trait ModifiableBuffer: IndexMut<usize> {
/// Get a handle to the underlying GPU buffer
- fn get_buffer(&mut self) -> &Buffer;
+ fn get_buffer(&mut self) -> &BufferT;
/// Commit all changes to GPU memory, returning a handle to the GPU buffer
fn commit<'a>(
&'a mut self,
- device: &Device,
- command_queue: &mut CommandQueue,
- command_pool: &mut CommandPool,
- ) -> &'a Buffer;
+ device: &DeviceT,
+ command_queue: &mut QueueT,
+ command_pool: &mut CommandPoolT,
+ ) -> &'a BufferT;
}
/// A GPU buffer that is written to using a staging buffer
pub struct StagedBuffer<'a, T: Sized> {
/// CPU-visible buffer
- staged_buffer: ManuallyDrop<Buffer>,
+ staged_buffer: ManuallyDrop<BufferT>,
/// CPU-visible memory
- staged_memory: ManuallyDrop<Memory>,
+ staged_memory: ManuallyDrop<MemoryT>,
/// GPU Buffer
- buffer: ManuallyDrop<Buffer>,
+ buffer: ManuallyDrop<BufferT>,
/// GPU Memory
- memory: ManuallyDrop<Memory>,
+ memory: ManuallyDrop<MemoryT>,
/// Where staged buffer is mapped in CPU memory
staged_mapped_memory: &'a mut [T],
@@ -84,7 +87,7 @@ pub struct StagedBuffer<'a, T: Sized> {
impl<'a, T: Sized> StagedBuffer<'a, T> {
/// size is the size in T
pub fn new(
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
usage: Usage,
size: u64,
@@ -93,7 +96,7 @@ impl<'a, T: Sized> StagedBuffer<'a, T> {
let size_bytes = size * size_of::<T>() as u64;
// Get CPU-visible buffer
- let (staged_buffer, staged_memory) = create_buffer(
+ let (staged_buffer, mut staged_memory) = create_buffer(
device,
adapter,
Usage::TRANSFER_SRC,
@@ -112,7 +115,15 @@ impl<'a, T: Sized> StagedBuffer<'a, T> {
// Map it somewhere and get a slice to that memory
let staged_mapped_memory = unsafe {
- let ptr = device.map_memory(&staged_memory, 0..size_bytes).unwrap(); // TODO
+ let ptr = device
+ .map_memory(
+ &mut staged_memory,
+ Segment {
+ offset: 0,
+ size: Some(size_bytes),
+ },
+ )
+ .unwrap(); // TODO
std::slice::from_raw_parts_mut(ptr as *mut T, size.try_into().unwrap())
};
@@ -129,9 +140,9 @@ impl<'a, T: Sized> StagedBuffer<'a, T> {
}
/// Call this before dropping
- pub(crate) fn deactivate(mut self, device: &mut Device) {
+ pub(crate) fn deactivate(mut self, device: &mut DeviceT) {
unsafe {
- device.unmap_memory(&self.staged_memory);
+ device.unmap_memory(&mut self.staged_memory);
device.free_memory(ManuallyDrop::take(&mut self.staged_memory));
device.destroy_buffer(ManuallyDrop::take(&mut self.staged_buffer));
@@ -143,16 +154,16 @@ impl<'a, T: Sized> StagedBuffer<'a, T> {
}
impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> {
- fn get_buffer(&mut self) -> &Buffer {
+ fn get_buffer(&mut self) -> &BufferT {
&self.buffer
}
fn commit<'b>(
&'b mut self,
- device: &Device,
- command_queue: &mut CommandQueue,
- command_pool: &mut CommandPool,
- ) -> &'b Buffer {
+ device: &DeviceT,
+ command_queue: &mut QueueT,
+ command_pool: &mut CommandPoolT,
+ ) -> &'b BufferT {
// Only commit if there's changes to commit.
if self.staged_is_dirty {
// Copy from staged to buffer
@@ -166,11 +177,11 @@ impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> {
buf.copy_buffer(
&self.staged_buffer,
&self.buffer,
- &[BufferCopy {
+ std::iter::once(BufferCopy {
src: 0,
dst: 0,
size: ((self.highest_used + 1) * size_of::<T>()) as u64,
- }],
+ }),
);
buf.finish();
@@ -181,15 +192,14 @@ impl<'a, T: Sized> ModifiableBuffer for StagedBuffer<'a, T> {
// TODO: We could use more semaphores or something?
// TODO: Better error handling
unsafe {
- let copy_finished = device.create_fence(false).unwrap();
- command_queue.submit::<_, _, Semaphore, _, _>(
- Submission {
- command_buffers: &[&buf],
- wait_semaphores: std::iter::empty::<_>(),
- signal_semaphores: std::iter::empty::<_>(),
- },
- Some(&copy_finished),
- );
+ let mut copy_finished = device.create_fence(false).unwrap();
+ command_queue
+ .submit::<std::iter::Once<_>, std::iter::Empty<_>, std::iter::Empty<_>>(
+ once(&buf),
+ empty::<(&SemaphoreT, hal::pso::PipelineStage)>(),
+ empty::<&SemaphoreT>(),
+ Some(&mut copy_finished),
+ );
device
.wait_for_fence(&copy_finished, core::u64::MAX)
diff --git a/stockton-render/src/draw/context.rs b/stockton-render/src/draw/context.rs
index 87fb0a2..21a69fa 100644
--- a/stockton-render/src/draw/context.rs
+++ b/stockton-render/src/draw/context.rs
@@ -3,12 +3,13 @@
//! You'll need something else to actually find/sort the faces though.
use std::{
+ iter::once,
mem::ManuallyDrop,
sync::{Arc, RwLock},
};
use arrayvec::ArrayVec;
-use hal::{pool::CommandPoolCreateFlags, prelude::*};
+use hal::{memory::SparseFlags, pool::CommandPoolCreateFlags};
use log::debug;
use na::Mat4;
use rendy_memory::DynamicConfig;
@@ -18,6 +19,7 @@ use super::{
buffer::ModifiableBuffer,
draw_buffers::{DrawBuffers, UvPoint},
pipeline::CompletePipeline,
+ queue_negotiator::QueueNegotiator,
render::do_render,
target::{SwapchainProperties, TargetChain},
texture::{resolver::BasicFsResolver, TextureRepo},
@@ -41,15 +43,11 @@ pub struct RenderingContext<'a, M: 'static + MinBspFeatures<VulkanSystem>> {
instance: ManuallyDrop<back::Instance>,
/// Device we're using
- device: Arc<RwLock<Device>>,
+ device: Arc<RwLock<DeviceT>>,
/// Adapter we're using
adapter: Adapter,
- // Render destination
- /// Surface to draw to
- surface: ManuallyDrop<Surface>,
-
/// Swapchain and stuff
pub(crate) target_chain: ManuallyDrop<TargetChain>,
@@ -61,10 +59,10 @@ pub struct RenderingContext<'a, M: 'static + MinBspFeatures<VulkanSystem>> {
// Command pool and buffers
/// The command pool used for our buffers
- cmd_pool: ManuallyDrop<CommandPool>,
+ cmd_pool: ManuallyDrop<CommandPoolT>,
- /// The queue group our buffers belong to
- queue_group: QueueGroup,
+ /// The queue to use for drawing
+ queue: Arc<RwLock<QueueT>>,
/// Main Texture repo
tex_repo: ManuallyDrop<TextureRepo<'a>>,
@@ -93,9 +91,7 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
pub fn new(window: &Window, map: M) -> Result<Self, error::CreationError> {
let map = Arc::new(RwLock::new(map));
// Create surface
- let (instance, mut surface, mut adapters) = unsafe {
- use hal::Instance;
-
+ let (instance, surface, mut adapters) = unsafe {
let instance = back::Instance::create("stockton", 1)
.map_err(|_| error::CreationError::WindowError)?;
let surface = instance
@@ -109,31 +105,44 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
// TODO: Properly figure out which adapter to use
let adapter = adapters.remove(0);
+ let mut draw_queue_negotiator = QueueNegotiator::find(&adapter, |family| {
+ surface.supports_queue_family(family) && family.queue_type().supports_graphics()
+ })
+ .unwrap();
+
+ let mut tex_queue_negotiator =
+ QueueNegotiator::find(&adapter, TextureRepo::queue_family_filter).unwrap();
// Device & Queue group
- let (device_lock, queue_group) = {
- let family = adapter
- .queue_families
- .iter()
- .find(|family| {
- surface.supports_queue_family(family) && family.queue_type().supports_graphics()
- })
- .unwrap();
-
- let mut gpu = unsafe {
+ let (device_lock, mut queue_groups) = {
+ debug!(
+ "Using draw queue family {:?}",
+ draw_queue_negotiator.family_id()
+ );
+ debug!(
+ "Using tex queue family {:?}",
+ tex_queue_negotiator.family_id()
+ );
+
+ let gpu = unsafe {
adapter
.physical_device
- .open(&[(family, &[1.0])], hal::Features::empty())
+ .open(
+ &[
+ (draw_queue_negotiator.family(&adapter), &[1.0]),
+ (tex_queue_negotiator.family(&adapter), &[1.0]),
+ ],
+ hal::Features::empty(),
+ )
.unwrap()
};
- (
- Arc::new(RwLock::new(gpu.device)),
- gpu.queue_groups.pop().unwrap(),
- )
+ (Arc::new(RwLock::new(gpu.device)), gpu.queue_groups)
};
let mut device = device_lock.write().unwrap();
+ let device_props = adapter.physical_device.properties();
+
// Figure out what our swapchain will look like
let swapchain_properties = SwapchainProperties::find_best(&adapter, &surface)
.map_err(|_| error::CreationError::BadSurface)?;
@@ -145,7 +154,10 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
// Command pool
let mut cmd_pool = unsafe {
- device.create_command_pool(queue_group.family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
+ device.create_command_pool(
+ draw_queue_negotiator.family_id(),
+ CommandPoolCreateFlags::RESET_INDIVIDUAL,
+ )
}
.map_err(|_| error::CreationError::OutOfMemoryError)?;
@@ -179,6 +191,7 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
Format::Rgba8Srgb,
Tiling::Optimal,
Usage::SAMPLED,
+ SparseFlags::empty(),
ViewCapabilities::empty(),
)
.map_err(|_| error::CreationError::OutOfMemoryError)?;
@@ -198,22 +211,29 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
max_chunk_size: u64::pow(2, 63),
min_device_allocation: 4 * 32 * 32,
},
+ device_props.limits.non_coherent_atom_size as u64,
)
};
drop(device);
// Texture repos
+ debug!("Creating 3D Texture Repo");
let tex_repo = TextureRepo::new(
device_lock.clone(),
+ tex_queue_negotiator.family_id(),
+ tex_queue_negotiator.get_queue(&mut queue_groups).unwrap(),
&adapter,
map.clone(),
BasicFsResolver::new(std::path::Path::new(".")),
)
.unwrap(); // TODO
+ debug!("Creating UI Texture Repo");
let ui_tex_repo = TextureRepo::new(
device_lock.clone(),
+ tex_queue_negotiator.family_id(),
+ tex_queue_negotiator.get_queue(&mut queue_groups).unwrap(),
&adapter,
Arc::new(RwLock::new(UiTextures)),
BasicFsResolver::new(std::path::Path::new(".")),
@@ -224,18 +244,13 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
let ds_layout_lock = tex_repo.get_ds_layout();
let ui_ds_layout_lock = ui_tex_repo.get_ds_layout();
- let mut descriptor_set_layouts: ArrayVec<[_; 2]> = ArrayVec::new();
- descriptor_set_layouts.push(&*ds_layout_lock);
-
- let mut ui_descriptor_set_layouts: ArrayVec<[_; 2]> = ArrayVec::new();
- ui_descriptor_set_layouts.push(&*ui_ds_layout_lock);
// Graphics pipeline
let pipeline = CompletePipeline::new(
&mut device,
swapchain_properties.extent,
&swapchain_properties,
- descriptor_set_layouts,
+ once(&*ds_layout_lock),
)?;
// UI pipeline
@@ -243,19 +258,18 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
&mut device,
swapchain_properties.extent,
&swapchain_properties,
- ui_descriptor_set_layouts,
+ once(&*ui_ds_layout_lock),
)?;
// Swapchain and associated resources
let target_chain = TargetChain::new(
&mut device,
&adapter,
- &mut surface,
+ surface,
&pipeline,
&ui_pipeline,
&mut cmd_pool,
swapchain_properties,
- None,
)
.map_err(error::CreationError::TargetChainCreationError)?;
@@ -266,11 +280,11 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
Ok(RenderingContext {
map,
instance: ManuallyDrop::new(instance),
- surface: ManuallyDrop::new(surface),
device: device_lock,
adapter,
- queue_group,
+
+ queue: draw_queue_negotiator.get_queue(&mut queue_groups).unwrap(),
target_chain: ManuallyDrop::new(target_chain),
cmd_pool: ManuallyDrop::new(cmd_pool),
@@ -300,7 +314,10 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
device.wait_idle().unwrap();
- let properties = SwapchainProperties::find_best(&self.adapter, &self.surface)
+ let surface = ManuallyDrop::into_inner(read(&self.target_chain))
+ .deactivate_with_recyling(&mut device, &mut self.cmd_pool);
+
+ let properties = SwapchainProperties::find_best(&self.adapter, &surface)
.map_err(|_| error::CreationError::BadSurface)?;
use core::ptr::read;
@@ -312,14 +329,11 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
ManuallyDrop::into_inner(read(&self.pipeline)).deactivate(&mut device);
self.pipeline = ManuallyDrop::new({
- let mut descriptor_set_layouts: ArrayVec<[_; 2]> = ArrayVec::new();
- descriptor_set_layouts.push(&*ds_layout_handle);
-
CompletePipeline::new(
&mut device,
properties.extent,
&properties,
- descriptor_set_layouts,
+ once(&*ds_layout_handle),
)?
});
@@ -334,32 +348,30 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
&mut device,
properties.extent,
&properties,
- descriptor_set_layouts,
+ once(&*ui_ds_layout_handle),
)?
});
- let old_swapchain = ManuallyDrop::into_inner(read(&self.target_chain))
- .deactivate_with_recyling(&mut device, &mut self.cmd_pool);
self.target_chain = ManuallyDrop::new(
TargetChain::new(
&mut device,
&self.adapter,
- &mut self.surface,
+ surface,
&self.pipeline,
&self.ui_pipeline,
&mut self.cmd_pool,
properties,
- Some(old_swapchain),
)
.map_err(error::CreationError::TargetChainCreationError)?,
);
-
Ok(())
}
/// Draw all vertices in the buffer
pub fn draw_vertices(&mut self, ui: &mut UiState, faces: &[u32]) -> Result<(), &'static str> {
let mut device = self.device.write().unwrap();
+ let mut queue = self.queue.write().unwrap();
+
// Ensure UI texture(s) are loaded
ensure_textures_ui(
&mut self.ui_tex_repo,
@@ -367,7 +379,7 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
&mut device,
&mut self.adapter,
&mut self.texture_allocator,
- &mut self.queue_group.queues[0],
+ &mut queue,
&mut self.cmd_pool,
);
@@ -375,7 +387,7 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
self.tex_repo.process_responses();
// 3D Pass
- let cmd_buffer = self.target_chain.prep_next_target(
+ let (cmd_buffer, img) = self.target_chain.prep_next_target(
&mut device,
&mut self.draw_buffers,
&self.pipeline,
@@ -391,9 +403,9 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
);
// 2D Pass
- let cmd_buffer = self
- .target_chain
- .target_2d_pass(&mut self.ui_draw_buffers, &self.ui_pipeline)?;
+ let cmd_buffer =
+ self.target_chain
+ .target_2d_pass(&mut self.ui_draw_buffers, &img, &self.ui_pipeline)?;
do_render_ui(
cmd_buffer,
&self.ui_pipeline.pipeline_layout,
@@ -403,33 +415,25 @@ impl<'a, M: 'static + MinBspFeatures<VulkanSystem>> RenderingContext<'a, M> {
);
// Update our buffers before we actually start drawing
- self.draw_buffers.vertex_buffer.commit(
- &device,
- &mut self.queue_group.queues[0],
- &mut self.cmd_pool,
- );
+ self.draw_buffers
+ .vertex_buffer
+ .commit(&device, &mut queue, &mut self.cmd_pool);
- self.draw_buffers.index_buffer.commit(
- &device,
- &mut self.queue_group.queues[0],
- &mut self.cmd_pool,
- );
+ self.draw_buffers
+ .index_buffer
+ .commit(&device, &mut queue, &mut self.cmd_pool);
- self.ui_draw_buffers.vertex_buffer.commit(
- &device,
- &mut self.queue_group.queues[0],
- &mut self.cmd_pool,
- );
+ self.ui_draw_buffers
+ .vertex_buffer
+ .commit(&device, &mut queue, &mut self.cmd_pool);
- self.ui_draw_buffers.index_buffer.commit(
- &device,
- &mut self.queue_group.queues[0],
- &mut self.cmd_pool,
- );
+ self.ui_draw_buffers
+ .index_buffer
+ .commit(&device, &mut queue, &mut self.cmd_pool);
// Send commands off to GPU
self.target_chain
- .finish_and_submit_target(&mut self.queue_group.queues[0])?;
+ .finish_and_submit_target(img, &mut queue)?;
Ok(())
}
@@ -454,16 +458,16 @@ impl<'a, M: MinBspFeatures<VulkanSystem>> core::ops::Drop for RenderingContext<'
ManuallyDrop::into_inner(read(&self.texture_allocator)).dispose();
- ManuallyDrop::into_inner(read(&self.target_chain))
- .deactivate(&mut device, &mut self.cmd_pool);
+ ManuallyDrop::into_inner(read(&self.target_chain)).deactivate(
+ &mut self.instance,
+ &mut device,
+ &mut self.cmd_pool,
+ );
device.destroy_command_pool(ManuallyDrop::into_inner(read(&self.cmd_pool)));
ManuallyDrop::into_inner(read(&self.pipeline)).deactivate(&mut device);
ManuallyDrop::into_inner(read(&self.ui_pipeline)).deactivate(&mut device);
-
- self.instance
- .destroy_surface(ManuallyDrop::into_inner(read(&self.surface)));
}
}
}
diff --git a/stockton-render/src/draw/depth_buffer.rs b/stockton-render/src/draw/depth_buffer.rs
index 14b4d30..8af1514 100644
--- a/stockton-render/src/draw/depth_buffer.rs
+++ b/stockton-render/src/draw/depth_buffer.rs
@@ -1,15 +1,15 @@
use crate::draw::buffer::create_buffer;
-use gfx_hal::{format::Aspects, memory::Properties, queue::Submission, MemoryTypeId};
+use gfx_hal::{format::Aspects, memory::Properties, MemoryTypeId};
use hal::{
buffer::Usage as BufUsage,
format::{Format, Swizzle},
- image::{SubresourceRange, Usage, ViewKind},
+ image::{SubresourceRange, Usage, Usage as ImgUsage, ViewKind},
memory,
+ memory::Segment,
};
-use std::convert::TryInto;
+use std::{array::IntoIter, convert::TryInto, iter::empty};
use crate::types::*;
-use hal::prelude::*;
use std::mem::ManuallyDrop;
use super::texture::{LoadableImage, PIXEL_SIZE};
@@ -17,18 +17,18 @@ use super::texture::{LoadableImage, PIXEL_SIZE};
/// Holds an image that's loaded into GPU memory dedicated only to that image, bypassing the memory allocator.
pub struct DedicatedLoadedImage {
/// The GPU Image handle
- image: ManuallyDrop<Image>,
+ image: ManuallyDrop<ImageT>,
/// The full view of the image
- pub image_view: ManuallyDrop<ImageView>,
+ pub image_view: ManuallyDrop<ImageViewT>,
/// The memory backing the image
- memory: ManuallyDrop<Memory>,
+ memory: ManuallyDrop<MemoryT>,
}
impl DedicatedLoadedImage {
pub fn new(
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
format: Format,
usage: Usage,
@@ -39,7 +39,7 @@ impl DedicatedLoadedImage {
let (memory, image_ref) = {
// Round up the size to align properly
let initial_row_size = PIXEL_SIZE * width;
- let limits = adapter.physical_device.limits();
+ let limits = adapter.physical_device.properties().limits;
let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1;
let row_size =
@@ -56,6 +56,7 @@ impl DedicatedLoadedImage {
format,
Tiling::Optimal,
usage,
+ memory::SparseFlags::empty(),
ViewCapabilities::empty(),
)
}
@@ -96,7 +97,14 @@ impl DedicatedLoadedImage {
// Create ImageView and sampler
let image_view = unsafe {
- device.create_image_view(&image_ref, ViewKind::D2, format, Swizzle::NO, resources)
+ device.create_image_view(
+ &image_ref,
+ ViewKind::D2,
+ format,
+ Swizzle::NO,
+ ImgUsage::DEPTH_STENCIL_ATTACHMENT,
+ resources,
+ )
}
.map_err(|_| "Couldn't create the image view!")?;
@@ -111,13 +119,13 @@ impl DedicatedLoadedImage {
pub fn load<T: LoadableImage>(
&mut self,
img: T,
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
- command_queue: &mut CommandQueue,
- command_pool: &mut CommandPool,
+ command_queue: &mut QueueT,
+ command_pool: &mut CommandPoolT,
) -> Result<(), &'static str> {
let initial_row_size = PIXEL_SIZE * img.width() as usize;
- let limits = adapter.physical_device.limits();
+ let limits = adapter.physical_device.properties().limits;
let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1;
let row_size =
@@ -126,7 +134,7 @@ impl DedicatedLoadedImage {
debug_assert!(row_size as usize >= initial_row_size);
// Make a staging buffer
- let (staging_buffer, staging_memory) = create_buffer(
+ let (staging_buffer, mut staging_memory) = create_buffer(
device,
adapter,
BufUsage::TRANSFER_SRC,
@@ -139,7 +147,13 @@ impl DedicatedLoadedImage {
unsafe {
let mapped_memory: *mut u8 = std::mem::transmute(
device
- .map_memory(&staging_memory, 0..total_size)
+ .map_memory(
+ &mut staging_memory,
+ Segment {
+ offset: 0,
+ size: None,
+ },
+ )
.map_err(|_| "Couldn't map buffer memory")?,
);
@@ -148,7 +162,7 @@ impl DedicatedLoadedImage {
img.copy_row(y as u32, mapped_memory.offset(dest_base));
}
- device.unmap_memory(&staging_memory);
+ device.unmap_memory(&mut staging_memory);
}
// Copy from staging to image memory
@@ -170,14 +184,16 @@ impl DedicatedLoadedImage {
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
},
};
buf.pipeline_barrier(
PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
memory::Dependencies::empty(),
- &[image_barrier],
+ IntoIter::new([image_barrier]),
);
// Copy from buffer to image
@@ -185,7 +201,7 @@ impl DedicatedLoadedImage {
&staging_buffer,
&(*self.image),
Layout::TransferDstOptimal,
- &[BufferImageCopy {
+ IntoIter::new([BufferImageCopy {
buffer_offset: 0,
buffer_width: (row_size / PIXEL_SIZE) as u32,
buffer_height: img.height(),
@@ -200,7 +216,7 @@ impl DedicatedLoadedImage {
height: img.height(),
depth: 1,
},
- }],
+ }]),
);
// Setup the layout of our image for shaders
@@ -211,15 +227,17 @@ impl DedicatedLoadedImage {
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
},
};
buf.pipeline_barrier(
PipelineStage::TRANSFER..PipelineStage::FRAGMENT_SHADER,
memory::Dependencies::empty(),
- &[image_barrier],
+ IntoIter::new([image_barrier]),
);
buf.finish();
@@ -229,14 +247,12 @@ impl DedicatedLoadedImage {
// Submit our commands and wait for them to finish
unsafe {
- let setup_finished = device.create_fence(false).unwrap();
- command_queue.submit::<_, _, Semaphore, _, _>(
- Submission {
- command_buffers: &[&buf],
- wait_semaphores: std::iter::empty::<_>(),
- signal_semaphores: std::iter::empty::<_>(),
- },
- Some(&setup_finished),
+ let mut setup_finished = device.create_fence(false).unwrap();
+ command_queue.submit(
+ IntoIter::new([&buf]),
+ empty(),
+ empty(),
+ Some(&mut setup_finished),
);
device
@@ -259,10 +275,10 @@ impl DedicatedLoadedImage {
/// Load the given image into a new buffer
pub fn load_into_new<T: LoadableImage>(
img: T,
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
- command_queue: &mut CommandQueue,
- command_pool: &mut CommandPool,
+ command_queue: &mut QueueT,
+ command_pool: &mut CommandPoolT,
format: Format,
usage: Usage,
) -> Result<DedicatedLoadedImage, &'static str> {
@@ -273,8 +289,10 @@ impl DedicatedLoadedImage {
usage | Usage::TRANSFER_DST,
SubresourceRange {
aspects: Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
},
img.width() as usize,
img.height() as usize,
@@ -286,7 +304,7 @@ impl DedicatedLoadedImage {
/// Properly frees/destroys all the objects in this struct
/// Dropping without doing this is a bad idea
- pub fn deactivate(self, device: &mut Device) {
+ pub fn deactivate(self, device: &mut DeviceT) {
unsafe {
use core::ptr::read;
diff --git a/stockton-render/src/draw/draw_buffers.rs b/stockton-render/src/draw/draw_buffers.rs
index bba69df..67687dd 100644
--- a/stockton-render/src/draw/draw_buffers.rs
+++ b/stockton-render/src/draw/draw_buffers.rs
@@ -21,7 +21,7 @@ pub struct DrawBuffers<'a, T: Sized> {
impl<'a, T> DrawBuffers<'a, T> {
pub fn new(
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
) -> Result<DrawBuffers<'a, T>, CreationError> {
let vert = StagedBuffer::new(device, &adapter, Usage::VERTEX, INITIAL_VERT_SIZE)?;
@@ -33,7 +33,7 @@ impl<'a, T> DrawBuffers<'a, T> {
})
}
- pub fn deactivate(self, device: &mut Device) {
+ pub fn deactivate(self, device: &mut DeviceT) {
unsafe {
use core::ptr::read;
diff --git a/stockton-render/src/draw/macros.rs b/stockton-render/src/draw/macros.rs
deleted file mode 100644
index 8558b71..0000000
--- a/stockton-render/src/draw/macros.rs
+++ /dev/null
@@ -1,89 +0,0 @@
-//! Helper macros, mostly for the graphics pipeline definitions
-
-/// Macro for easily defining buffer attribute descriptions
-/// Usage:
-/// ```
-/// // 0 is the binding value
-/// let attributes: Vec<AttributeDesc> = pipeline_vb_attributes!(0,
-/// size_of::<f32>() * 3; Rgb32Sfloat
-/// size_of::<f32>() * 2; Rg32Sfloat,
-/// size_of::<u32>(); R32Sint
-/// );
-/// ```
-/// See the hal::pso::Format enum for possible types
-#[allow(clippy::vec_init_then_push)]
-macro_rules! pipeline_vb_attributes {
- // Special case for single item
- ( $binding:expr, $firstSize:expr; $firstType:ident ) => ({
- #![allow(clippy::vec_init_then_push)]
- vec![
- AttributeDesc {
- location: 0,
- binding: $binding,
- element: Element {
- format: Format::$firstType,
- offset: $firstSize as u32
- }
- }
- ]
- });
-
- // Start of recursion
- ( $binding:expr,
- $firstSize:expr; $firstType:ident,
- $( $size:expr; $type:ident ),*
- ) => ({
- use hal::pso::{AttributeDesc, Element};
-
- let mut vec = Vec::new();
-
- vec.push(AttributeDesc {
- location: 0,
- binding: $binding,
- element: Element {
- format: Format::$firstType,
- offset: 0
- }
- });
-
- pipeline_vb_attributes!(
- vec; $binding; 1; $firstSize,
- $($size; $type),*
- );
-
- vec
- });
-
- // Middle of recursion
- ( $vec:ident; $binding:expr; $location:expr; $prevSize:expr,
- $firstSize:expr; $firstType:ident,
- $($size:expr; $type:ident),* ) => ({
-
- $vec.push(AttributeDesc {
- location: $location,
- binding: $binding,
- element: Element {
- format: Format::$firstType,
- offset: $prevSize as u32
- }
- });
-
- pipeline_vb_attributes!(
- $vec; $binding; ($location + 1); ($prevSize + $firstSize),
- $($size; $type),*
- );
- });
-
- // End of recursion
- ( $vec:ident; $binding:expr; $location:expr; $prevSize:expr,
- $firstSize:expr; $firstType:ident ) => ({
- $vec.push(AttributeDesc {
- location: $location,
- binding: $binding,
- element: Element {
- format: Format::$firstType,
- offset: $prevSize as u32
- }
- });
- });
-}
diff --git a/stockton-render/src/draw/mod.rs b/stockton-render/src/draw/mod.rs
index c6ee90b..e802ed5 100644
--- a/stockton-render/src/draw/mod.rs
+++ b/stockton-render/src/draw/mod.rs
@@ -2,14 +2,13 @@
pub mod target;
-#[macro_use]
-mod macros;
mod buffer;
mod camera;
mod context;
mod depth_buffer;
mod draw_buffers;
mod pipeline;
+mod queue_negotiator;
mod render;
mod texture;
mod ui;
diff --git a/stockton-render/src/draw/pipeline.rs b/stockton-render/src/draw/pipeline.rs
index 5ef636a..0a02947 100644
--- a/stockton-render/src/draw/pipeline.rs
+++ b/stockton-render/src/draw/pipeline.rs
@@ -10,12 +10,11 @@ const VERTEX_SOURCE: &str = include_str!("./data/stockton.vert");
const FRAGMENT_SOURCE: &str = include_str!("./data/stockton.frag");
use std::{
- borrow::Borrow,
+ array::IntoIter,
+ iter::{empty, once},
mem::{size_of, ManuallyDrop},
};
-use hal::prelude::*;
-
use super::target::SwapchainProperties;
use crate::error;
use crate::types::*;
@@ -24,32 +23,28 @@ use crate::types::*;
/// A complete graphics pipeline and associated resources
pub struct CompletePipeline {
/// Our main render pass
- pub(crate) renderpass: ManuallyDrop<RenderPass>,
+ pub(crate) renderpass: ManuallyDrop<RenderPassT>,
/// The layout of our main graphics pipeline
- pub(crate) pipeline_layout: ManuallyDrop<PipelineLayout>,
+ pub(crate) pipeline_layout: ManuallyDrop<PipelineLayoutT>,
/// Our main graphics pipeline
- pub(crate) pipeline: ManuallyDrop<GraphicsPipeline>,
+ pub(crate) pipeline: ManuallyDrop<GraphicsPipelineT>,
/// The vertex shader module
- pub(crate) vs_module: ManuallyDrop<ShaderModule>,
+ pub(crate) vs_module: ManuallyDrop<ShaderModuleT>,
/// The fragment shader module
- pub(crate) fs_module: ManuallyDrop<ShaderModule>,
+ pub(crate) fs_module: ManuallyDrop<ShaderModuleT>,
}
impl CompletePipeline {
- pub fn new<T>(
- device: &mut Device,
+ pub fn new<'a, T: Iterator<Item = &'a DescriptorSetLayoutT> + std::fmt::Debug>(
+ device: &mut DeviceT,
extent: hal::image::Extent,
swapchain_properties: &SwapchainProperties,
set_layouts: T,
- ) -> Result<Self, error::CreationError>
- where
- T: IntoIterator,
- T::Item: Borrow<DescriptorSetLayout>,
- {
+ ) -> Result<Self, error::CreationError> {
use hal::format::Format;
use hal::pso::*;
@@ -88,7 +83,11 @@ impl CompletePipeline {
};
unsafe {
- device.create_render_pass(&[img_attachment, depth_attachment], &[subpass], &[])
+ device.create_render_pass(
+ IntoIter::new([img_attachment, depth_attachment]),
+ once(subpass),
+ empty(),
+ )
}
.map_err(|_| error::CreationError::OutOfMemoryError)?
};
@@ -150,28 +149,6 @@ impl CompletePipeline {
},
);
- // Shader set
- let shaders = GraphicsShaderSet {
- vertex: vs_entry,
- fragment: Some(fs_entry),
- hull: None,
- domain: None,
- geometry: None,
- };
-
- // Vertex buffers
- let vertex_buffers: Vec<VertexBufferDesc> = vec![VertexBufferDesc {
- binding: 0,
- stride: (size_of::<f32>() * 6) as u32,
- rate: VertexInputRate::Vertex,
- }];
-
- let attributes: Vec<AttributeDesc> = pipeline_vb_attributes!(0,
- size_of::<f32>() * 3; Rgb32Sfloat,
- size_of::<u32>(); R32Sint,
- size_of::<f32>() * 2; Rg32Sfloat
- );
-
// Rasterizer
let rasterizer = Rasterizer {
polygon_mode: PolygonMode::Fill,
@@ -180,6 +157,7 @@ impl CompletePipeline {
depth_clamping: false,
depth_bias: None,
conservative: true,
+ line_width: State::Static(1.0),
};
// Depth stencil
@@ -195,9 +173,9 @@ impl CompletePipeline {
// Pipeline layout
let layout = unsafe {
device.create_pipeline_layout(
- set_layouts,
+ set_layouts.into_iter(),
// vp matrix, 4x4 f32
- &[(ShaderStageFlags::VERTEX, 0..64)],
+ IntoIter::new([(ShaderStageFlags::VERTEX, 0..64)]),
)
}
.map_err(|_| error::CreationError::OutOfMemoryError)?;
@@ -231,18 +209,54 @@ impl CompletePipeline {
depth: (0.0..1.0),
}),
scissor: Some(extent.rect()),
- blend_color: None,
+ blend_constants: None,
depth_bounds: None,
};
- // Input assembler
- let input_assembler = InputAssemblerDesc::new(Primitive::TriangleList);
+ // Primitive assembler
+ let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
+ buffers: &[VertexBufferDesc {
+ binding: 0,
+ stride: (size_of::<f32>() * 6) as u32,
+ rate: VertexInputRate::Vertex,
+ }],
+ attributes: &[
+ AttributeDesc {
+ location: 0,
+ binding: 0,
+ element: Element {
+ format: Format::Rgb32Sfloat,
+ offset: 0,
+ },
+ },
+ AttributeDesc {
+ location: 1,
+ binding: 0,
+ element: Element {
+ format: Format::R32Sint,
+ offset: (size_of::<f32>() * 3) as u32,
+ },
+ },
+ AttributeDesc {
+ location: 2,
+ binding: 0,
+ element: Element {
+ format: Format::Rg32Sfloat,
+ offset: (size_of::<f32>() * 4) as u32,
+ },
+ },
+ ],
+ input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
+ vertex: vs_entry,
+ tessellation: None,
+ geometry: None,
+ };
// Pipeline description
let pipeline_desc = GraphicsPipelineDesc {
- shaders,
+ label: Some("3D"),
rasterizer,
- vertex_buffers,
+ fragment: Some(fs_entry),
blender,
depth_stencil,
multisampling: None,
@@ -251,8 +265,7 @@ impl CompletePipeline {
subpass,
flags: PipelineCreationFlags::empty(),
parent: BasePipeline::None,
- input_assembler,
- attributes,
+ primitive_assembler,
};
// Pipeline
@@ -269,7 +282,7 @@ impl CompletePipeline {
}
/// Deactivate vulkan resources. Use before dropping
- pub fn deactivate(self, device: &mut Device) {
+ pub fn deactivate(self, device: &mut DeviceT) {
unsafe {
use core::ptr::read;
diff --git a/stockton-render/src/draw/queue_negotiator.rs b/stockton-render/src/draw/queue_negotiator.rs
new file mode 100644
index 0000000..b128846
--- /dev/null
+++ b/stockton-render/src/draw/queue_negotiator.rs
@@ -0,0 +1,72 @@
+use crate::types::*;
+use anyhow::Result;
+use hal::queue::family::QueueFamilyId;
+use std::sync::{Arc, RwLock};
+use thiserror::Error;
+
+pub struct QueueNegotiator {
+ family_id: QueueFamilyId,
+ already_allocated: Vec<Arc<RwLock<QueueT>>>,
+ next_share: usize,
+}
+
+impl QueueNegotiator {
+ pub fn find<F: FnMut(&&QueueFamilyT) -> bool>(adapter: &Adapter, filter: F) -> Result<Self> {
+ let family = adapter
+ .queue_families
+ .iter()
+ .find(filter)
+ .ok_or(QueueNegotiatorError::NoSuitableFamilies)?;
+
+ Ok(QueueNegotiator {
+ family_id: family.id(),
+ already_allocated: Vec::with_capacity(family.max_queues()),
+ next_share: 0,
+ })
+ }
+
+ pub fn family<'a>(&self, adapter: &'a Adapter) -> &'a QueueFamilyT {
+ adapter
+ .queue_families
+ .iter()
+ .find(|x| x.id() == self.family_id)
+ .unwrap()
+ }
+
+ pub fn family_id(&self) -> QueueFamilyId {
+ self.family_id
+ }
+
+ pub fn get_queue(&mut self, groups: &mut Vec<QueueGroup>) -> Option<Arc<RwLock<QueueT>>> {
+ match groups
+ .iter()
+ .position(|x| x.queues.len() > 0 && x.family == self.family_id)
+ {
+ Some(idx) => {
+ // At least one remaining queue
+ let queue = groups[idx].queues.pop().unwrap();
+ let queue = Arc::new(RwLock::new(queue));
+
+ self.already_allocated.push(queue.clone());
+
+ Some(queue)
+ }
+ None => {
+ if self.already_allocated.len() == 0 {
+ return None;
+ }
+
+ let queue = self.already_allocated[self.next_share].clone();
+ self.next_share = (self.next_share + 1) % self.already_allocated.len();
+
+ Some(queue)
+ }
+ }
+ }
+}
+
+#[derive(Error, Debug)]
+pub enum QueueNegotiatorError {
+ #[error("No suitable queue families found")]
+ NoSuitableFamilies,
+}
diff --git a/stockton-render/src/draw/render.rs b/stockton-render/src/draw/render.rs
index b2e9f97..2cbdef4 100644
--- a/stockton-render/src/draw/render.rs
+++ b/stockton-render/src/draw/render.rs
@@ -1,10 +1,11 @@
use crate::draw::draw_buffers::INITIAL_INDEX_SIZE;
use crate::draw::draw_buffers::INITIAL_VERT_SIZE;
use crate::draw::UvPoint;
-use arrayvec::ArrayVec;
use faces::FaceType;
-use hal::prelude::*;
-use std::convert::TryInto;
+use std::{
+ convert::TryInto,
+ iter::{empty, once},
+};
use stockton_levels::prelude::*;
use stockton_types::Vector2;
@@ -16,16 +17,14 @@ use super::texture::TextureRepo;
fn draw_or_queue(
current_chunk: usize,
tex_repo: &mut TextureRepo,
- cmd_buffer: &mut CommandBuffer,
- pipeline_layout: &PipelineLayout,
+ cmd_buffer: &mut CommandBufferT,
+ pipeline_layout: &PipelineLayoutT,
chunk_start: u32,
curr_idx_idx: u32,
) {
if let Some(ds) = tex_repo.attempt_get_descriptor_set(current_chunk) {
- let mut descriptor_sets: ArrayVec<[_; 1]> = ArrayVec::new();
- descriptor_sets.push(ds);
unsafe {
- cmd_buffer.bind_graphics_descriptor_sets(pipeline_layout, 0, descriptor_sets, &[]);
+ cmd_buffer.bind_graphics_descriptor_sets(pipeline_layout, 0, once(ds), empty());
cmd_buffer.draw_indexed(chunk_start * 3..(curr_idx_idx * 3) + 1, 0, 0..1);
}
} else {
@@ -34,10 +33,10 @@ fn draw_or_queue(
}
pub fn do_render<M: MinBspFeatures<VulkanSystem>>(
- cmd_buffer: &mut CommandBuffer,
+ cmd_buffer: &mut CommandBufferT,
draw_buffers: &mut DrawBuffers<UvPoint>,
tex_repo: &mut TextureRepo,
- pipeline_layout: &PipelineLayout,
+ pipeline_layout: &PipelineLayoutT,
file: &M,
faces: &[u32],
) {
diff --git a/stockton-render/src/draw/target.rs b/stockton-render/src/draw/target.rs
index 1ee208b..8d308d9 100644
--- a/stockton-render/src/draw/target.rs
+++ b/stockton-render/src/draw/target.rs
@@ -1,14 +1,19 @@
//! Resources needed for drawing on the screen, including sync objects
-use core::{iter::once, mem::ManuallyDrop};
+use std::{
+ array::IntoIter,
+ borrow::Borrow,
+ iter::{empty, once},
+ mem::ManuallyDrop,
+};
use arrayvec::ArrayVec;
use hal::{
- format::{ChannelType, Format, Swizzle},
- image::{Extent, Usage as ImgUsage, ViewKind},
- prelude::*,
+ buffer::SubRange,
+ command::RenderAttachmentInfo,
+ format::{ChannelType, Format},
+ image::{Extent, FramebufferAttachment, Usage as ImgUsage, ViewCapabilities},
pso::Viewport,
- queue::Submission,
window::{CompositeAlphaMode, Extent2D, PresentMode, SwapchainConfig},
};
use na::Mat4;
@@ -22,13 +27,6 @@ use super::{
};
use crate::types::*;
-/// Defines the colour range we use.
-const COLOR_RANGE: hal::image::SubresourceRange = hal::image::SubresourceRange {
- aspects: hal::format::Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
-};
-
#[derive(Debug, Clone)]
pub struct SwapchainProperties {
pub format: Format,
@@ -49,7 +47,7 @@ pub enum NoSupportedValuesError {
impl SwapchainProperties {
pub fn find_best(
adapter: &Adapter,
- surface: &Surface,
+ surface: &SurfaceT,
) -> Result<SwapchainProperties, NoSupportedValuesError> {
let caps = surface.capabilities(&adapter.physical_device);
let formats = surface.supported_formats(&adapter.physical_device);
@@ -124,8 +122,8 @@ impl SwapchainProperties {
}
pub struct TargetChain {
- /// Swapchain we're targeting
- pub swapchain: ManuallyDrop<Swapchain>,
+ /// Surface we're targeting
+ pub surface: ManuallyDrop<SurfaceT>,
pub properties: SwapchainProperties,
@@ -147,16 +145,14 @@ pub struct TargetChain {
}
impl TargetChain {
- #[allow(clippy::too_many_arguments)]
pub fn new(
- device: &mut Device,
+ device: &mut DeviceT,
adapter: &Adapter,
- surface: &mut Surface,
+ mut surface: SurfaceT,
pipeline: &CompletePipeline,
ui_pipeline: &UiPipeline,
- cmd_pool: &mut CommandPool,
+ cmd_pool: &mut CommandPoolT,
properties: SwapchainProperties,
- old_swapchain: Option<Swapchain>,
) -> Result<TargetChain, TargetChainCreationError> {
let caps = surface.capabilities(&adapter.physical_device);
@@ -181,13 +177,6 @@ impl TargetChain {
image_usage: ImgUsage::COLOR_ATTACHMENT,
};
- // Swapchain
- let (swapchain, mut backbuffer) = unsafe {
- device
- .create_swapchain(surface, swap_config, old_swapchain)
- .map_err(|_| TargetChainCreationError::Todo)?
- };
-
let depth_buffer = {
use hal::format::Aspects;
use hal::image::SubresourceRange;
@@ -199,8 +188,10 @@ impl TargetChain {
ImgUsage::DEPTH_STENCIL_ATTACHMENT,
SubresourceRange {
aspects: Aspects::DEPTH,
- levels: 0..1,
- layers: 0..1,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
},
properties.extent.width as usize,
properties.extent.height as usize,
@@ -208,17 +199,24 @@ impl TargetChain {
.map_err(|_| TargetChainCreationError::Todo)
}?;
- let mut targets: Vec<TargetResources> = Vec::with_capacity(backbuffer.len());
- let mut sync_objects: Vec<SyncObjects> = Vec::with_capacity(backbuffer.len());
- for image in backbuffer.drain(..) {
+ let fat = swap_config.framebuffer_attachment();
+ let mut targets: Vec<TargetResources> =
+ Vec::with_capacity(swap_config.image_count as usize);
+ let mut sync_objects: Vec<SyncObjects> =
+ Vec::with_capacity(swap_config.image_count as usize);
+ for _ in 0..swap_config.image_count {
targets.push(
TargetResources::new(
device,
cmd_pool,
&pipeline.renderpass,
&ui_pipeline.renderpass,
- image,
- &(*depth_buffer.image_view),
+ fat.clone(),
+ FramebufferAttachment {
+ usage: ImgUsage::DEPTH_STENCIL_ATTACHMENT,
+ view_caps: ViewCapabilities::empty(),
+ format: properties.depth_format,
+ },
&properties,
)
.map_err(|_| TargetChainCreationError::Todo)?,
@@ -228,8 +226,15 @@ impl TargetChain {
.push(SyncObjects::new(device).map_err(|_| TargetChainCreationError::Todo)?);
}
+ // Configure Swapchain
+ unsafe {
+ surface
+ .configure_swapchain(device, swap_config)
+ .map_err(|_| TargetChainCreationError::Todo)?;
+ }
+
Ok(TargetChain {
- swapchain: ManuallyDrop::new(swapchain),
+ surface: ManuallyDrop::new(surface),
targets: targets.into_boxed_slice(),
sync_objects: sync_objects.into_boxed_slice(),
depth_buffer: ManuallyDrop::new(depth_buffer),
@@ -239,19 +244,24 @@ impl TargetChain {
})
}
- pub fn deactivate(self, device: &mut Device, cmd_pool: &mut CommandPool) {
- let swapchain = self.deactivate_with_recyling(device, cmd_pool);
+ pub fn deactivate(
+ self,
+ instance: &mut InstanceT,
+ device: &mut DeviceT,
+ cmd_pool: &mut CommandPoolT,
+ ) {
+ let surface = self.deactivate_with_recyling(device, cmd_pool);
unsafe {
- device.destroy_swapchain(swapchain);
+ instance.destroy_surface(surface);
}
}
pub fn deactivate_with_recyling(
- self,
- device: &mut Device,
- cmd_pool: &mut CommandPool,
- ) -> Swapchain {
+ mut self,
+ device: &mut DeviceT,
+ cmd_pool: &mut CommandPoolT,
+ ) -> SurfaceT {
use core::ptr::read;
unsafe {
ManuallyDrop::into_inner(read(&self.depth_buffer)).deactivate(device);
@@ -263,84 +273,102 @@ impl TargetChain {
for i in 0..self.sync_objects.len() {
read(&self.sync_objects[i]).deactivate(device);
}
+
+ self.surface.unconfigure_swapchain(device);
}
- unsafe { ManuallyDrop::into_inner(read(&self.swapchain)) }
+ unsafe { ManuallyDrop::into_inner(read(&self.surface)) }
}
pub fn prep_next_target<'a>(
&'a mut self,
- device: &mut Device,
+ device: &mut DeviceT,
draw_buffers: &mut DrawBuffers<UvPoint>,
pipeline: &CompletePipeline,
vp: &Mat4,
- ) -> Result<&'a mut crate::types::CommandBuffer, &'static str> {
+ ) -> Result<
+ (
+ &'a mut crate::types::CommandBufferT,
+ <SurfaceT as PresentationSurface<back::Backend>>::SwapchainImage,
+ ),
+ &'static str,
+ > {
self.last_syncs = (self.last_syncs + 1) % self.sync_objects.len();
let syncs = &mut self.sync_objects[self.last_syncs];
+ self.last_image = (self.last_image + 1) % self.targets.len() as u32;
+
+ let target = &mut self.targets[self.last_image as usize];
+
// Get the image
- let (image_index, _) = unsafe {
- self.swapchain
- .acquire_image(core::u64::MAX, Some(&syncs.get_image), None)
+ let (img, _) = unsafe {
+ self.surface
+ .acquire_image(core::u64::MAX)
.map_err(|_| "FrameError::AcquireError")?
};
- self.last_image = image_index;
-
- let target = &mut self.targets[image_index as usize];
-
// Make sure whatever was last using this has finished
unsafe {
device
.wait_for_fence(&syncs.present_complete, core::u64::MAX)
.map_err(|_| "FrameError::SyncObjectError")?;
device
- .reset_fence(&syncs.present_complete)
+ .reset_fence(&mut syncs.present_complete)
.map_err(|_| "FrameError::SyncObjectError")?;
};
// Record commands
unsafe {
- use hal::buffer::IndexBufferView;
use hal::command::{
ClearColor, ClearDepthStencil, ClearValue, CommandBufferFlags, SubpassContents,
};
use hal::pso::ShaderStageFlags;
- // Colour to clear window to
- let clear_values = [
- ClearValue {
- color: ClearColor {
- float32: [0.0, 0.0, 0.0, 1.0],
- },
- },
- ClearValue {
- depth_stencil: ClearDepthStencil {
- depth: 1.0,
- stencil: 0,
- },
- },
- ];
-
// Get references to our buffers
let (vbufs, ibuf) = {
let vbufref: &<back::Backend as hal::Backend>::Buffer =
draw_buffers.vertex_buffer.get_buffer();
- let vbufs: ArrayVec<[_; 1]> = [(vbufref, 0)].into();
+ let vbufs: ArrayVec<[_; 1]> = [(
+ vbufref,
+ SubRange {
+ offset: 0,
+ size: None,
+ },
+ )]
+ .into();
let ibuf = draw_buffers.index_buffer.get_buffer();
(vbufs, ibuf)
};
- target.cmd_buffer.begin_primary(CommandBufferFlags::EMPTY);
+ target.cmd_buffer.begin_primary(CommandBufferFlags::empty());
// Main render pass / pipeline
target.cmd_buffer.begin_render_pass(
&pipeline.renderpass,
&target.framebuffer,
self.properties.viewport.rect,
- clear_values.iter(),
+ vec![
+ RenderAttachmentInfo {
+ image_view: img.borrow(),
+ clear_value: ClearValue {
+ color: ClearColor {
+ float32: [0.0, 0.0, 0.0, 1.0],
+ },
+ },
+ },
+ RenderAttachmentInfo {
+ image_view: &*self.depth_buffer.image_view,
+ clear_value: ClearValue {
+ depth_stencil: ClearDepthStencil {
+ depth: 1.0,
+ stencil: 0,
+ },
+ },
+ },
+ ]
+ .into_iter(),
SubpassContents::Inline,
);
target.cmd_buffer.bind_graphics_pipeline(&pipeline.pipeline);
@@ -356,22 +384,26 @@ impl TargetChain {
);
// Bind buffers
- target.cmd_buffer.bind_vertex_buffers(0, vbufs);
- target.cmd_buffer.bind_index_buffer(IndexBufferView {
- buffer: ibuf,
- offset: 0,
- index_type: hal::IndexType::U16,
- });
+ target.cmd_buffer.bind_vertex_buffers(0, vbufs.into_iter());
+ target.cmd_buffer.bind_index_buffer(
+ &ibuf,
+ SubRange {
+ offset: 0,
+ size: None,
+ },
+ hal::IndexType::U16,
+ );
};
- Ok(&mut target.cmd_buffer)
+ Ok((&mut target.cmd_buffer, img))
}
pub fn target_2d_pass<'a>(
&'a mut self,
draw_buffers: &mut DrawBuffers<UiPoint>,
+ img: &<SurfaceT as PresentationSurface<back::Backend>>::SwapchainImage,
pipeline: &UiPipeline,
- ) -> Result<&'a mut CommandBuffer, &'static str> {
+ ) -> Result<&'a mut CommandBufferT, &'static str> {
let target = &mut self.targets[self.last_image as usize];
unsafe {
@@ -381,28 +413,27 @@ impl TargetChain {
target.cmd_buffer.pipeline_barrier(
PipelineStage::BOTTOM_OF_PIPE..PipelineStage::TOP_OF_PIPE,
hal::memory::Dependencies::empty(),
- &[],
+ std::iter::empty(),
);
}
// Record commands
unsafe {
- use hal::buffer::IndexBufferView;
use hal::command::{ClearColor, ClearValue, SubpassContents};
- // Colour to clear window to
- let clear_values = [ClearValue {
- color: ClearColor {
- float32: [1.0, 0.0, 0.0, 1.5],
- },
- }];
-
// Get references to our buffers
let (vbufs, ibuf) = {
let vbufref: &<back::Backend as hal::Backend>::Buffer =
draw_buffers.vertex_buffer.get_buffer();
- let vbufs: ArrayVec<[_; 1]> = [(vbufref, 0)].into();
+ let vbufs: ArrayVec<[_; 1]> = [(
+ vbufref,
+ SubRange {
+ offset: 0,
+ size: None,
+ },
+ )]
+ .into();
let ibuf = draw_buffers.index_buffer.get_buffer();
(vbufs, ibuf)
@@ -413,18 +444,29 @@ impl TargetChain {
&pipeline.renderpass,
&target.framebuffer_2d,
self.properties.viewport.rect,
- clear_values.iter(),
+ vec![RenderAttachmentInfo {
+ image_view: img.borrow(),
+ clear_value: ClearValue {
+ color: ClearColor {
+ float32: [0.0, 0.0, 0.0, 1.0],
+ },
+ },
+ }]
+ .into_iter(),
SubpassContents::Inline,
);
target.cmd_buffer.bind_graphics_pipeline(&pipeline.pipeline);
// Bind buffers
- target.cmd_buffer.bind_vertex_buffers(0, vbufs);
- target.cmd_buffer.bind_index_buffer(IndexBufferView {
- buffer: ibuf,
- offset: 0,
- index_type: hal::IndexType::U16,
- });
+ target.cmd_buffer.bind_vertex_buffers(0, vbufs.into_iter());
+ target.cmd_buffer.bind_index_buffer(
+ &ibuf,
+ SubRange {
+ offset: 0,
+ size: None,
+ },
+ hal::IndexType::U16,
+ );
};
Ok(&mut target.cmd_buffer)
@@ -432,7 +474,8 @@ impl TargetChain {
pub fn finish_and_submit_target(
&mut self,
- command_queue: &mut CommandQueue,
+ img: <SurfaceT as PresentationSurface<back::Backend>>::SwapchainImage,
+ command_queue: &mut QueueT,
) -> Result<(), &'static str> {
let syncs = &mut self.sync_objects[self.last_syncs];
let target = &mut self.targets[self.last_image as usize];
@@ -442,31 +485,16 @@ impl TargetChain {
target.cmd_buffer.finish();
}
- // Make submission object
- let command_buffers: std::iter::Once<&CommandBuffer> = once(&target.cmd_buffer);
- let wait_semaphores: std::iter::Once<(&Semaphore, hal::pso::PipelineStage)> = once((
- &syncs.get_image,
- hal::pso::PipelineStage::COLOR_ATTACHMENT_OUTPUT,
- ));
- let signal_semaphores: std::iter::Once<&Semaphore> = once(&syncs.render_complete);
-
- let present_wait_semaphores: std::iter::Once<&Semaphore> = once(&syncs.render_complete);
-
- let submission = Submission {
- command_buffers,
- wait_semaphores,
- signal_semaphores,
- };
-
// Submit it
unsafe {
- command_queue.submit(submission, Some(&syncs.present_complete));
- self.swapchain
- .present(
- command_queue,
- self.last_image as u32,
- present_wait_semaphores,
- )
+ command_queue.submit(
+ once(&*target.cmd_buffer),
+ empty(),
+ once(&*syncs.render_complete),
+ Some(&mut syncs.present_complete),
+ );
+ command_queue
+ .present(&mut self.surface, img, Some(&mut *syncs.render_complete))
.map_err(|_| "FrameError::PresentError")?;
};
@@ -477,53 +505,34 @@ impl TargetChain {
/// Resources for a single target frame, including sync objects
pub struct TargetResources {
/// Command buffer to use when drawing
- pub cmd_buffer: ManuallyDrop<CommandBuffer>,
-
- /// The image for this frame
- pub image: ManuallyDrop<Image>,
-
- /// Imageviews for this frame
- pub imageview: ManuallyDrop<ImageView>,
+ pub cmd_buffer: ManuallyDrop<CommandBufferT>,
/// Framebuffer for this frame
- pub framebuffer: ManuallyDrop<Framebuffer>,
+ pub framebuffer: ManuallyDrop<FramebufferT>,
/// Framebuffer for this frame when drawing in 2D
- pub framebuffer_2d: ManuallyDrop<Framebuffer>,
+ pub framebuffer_2d: ManuallyDrop<FramebufferT>,
}
impl TargetResources {
pub fn new(
- device: &mut Device,
- cmd_pool: &mut CommandPool,
- renderpass: &RenderPass,
- renderpass_2d: &RenderPass,
- image: Image,
- depth_pass: &ImageView,
+ device: &mut DeviceT,
+ cmd_pool: &mut CommandPoolT,
+ renderpass: &RenderPassT,
+ renderpass_2d: &RenderPassT,
+ fat: FramebufferAttachment,
+ dat: FramebufferAttachment,
properties: &SwapchainProperties,
) -> Result<TargetResources, TargetResourcesCreationError> {
// Command Buffer
let cmd_buffer = unsafe { cmd_pool.allocate_one(hal::command::Level::Primary) };
- // ImageView
- let imageview = unsafe {
- device
- .create_image_view(
- &image,
- ViewKind::D2,
- properties.format,
- Swizzle::NO,
- COLOR_RANGE.clone(),
- )
- .map_err(TargetResourcesCreationError::ImageViewError)?
- };
-
// Framebuffer
let framebuffer = unsafe {
device
.create_framebuffer(
&renderpass,
- once(&imageview).chain(once(depth_pass)),
+ IntoIter::new([fat.clone(), dat]),
properties.extent,
)
.map_err(|_| TargetResourcesCreationError::FrameBufferNoMemory)?
@@ -532,48 +541,39 @@ impl TargetResources {
// 2D framebuffer just needs the imageview, not the depth pass
let framebuffer_2d = unsafe {
device
- .create_framebuffer(&renderpass_2d, once(&imageview), properties.extent)
+ .create_framebuffer(&renderpass_2d, once(fat), properties.extent)
.map_err(|_| TargetResourcesCreationError::FrameBufferNoMemory)?
};
Ok(TargetResources {
cmd_buffer: ManuallyDrop::new(cmd_buffer),
- image: ManuallyDrop::new(image),
- imageview: ManuallyDrop::new(imageview),
framebuffer: ManuallyDrop::new(framebuffer),
framebuffer_2d: ManuallyDrop::new(framebuffer_2d),
})
}
- pub fn deactivate(self, device: &mut Device, cmd_pool: &mut CommandPool) {
+ pub fn deactivate(self, device: &mut DeviceT, cmd_pool: &mut CommandPoolT) {
use core::ptr::read;
unsafe {
cmd_pool.free(once(ManuallyDrop::into_inner(read(&self.cmd_buffer))));
device.destroy_framebuffer(ManuallyDrop::into_inner(read(&self.framebuffer)));
device.destroy_framebuffer(ManuallyDrop::into_inner(read(&self.framebuffer_2d)));
- device.destroy_image_view(ManuallyDrop::into_inner(read(&self.imageview)));
}
}
}
pub struct SyncObjects {
- /// Triggered when the image is ready to draw to
- pub get_image: ManuallyDrop<Semaphore>,
-
/// Triggered when rendering is done
- pub render_complete: ManuallyDrop<Semaphore>,
+ pub render_complete: ManuallyDrop<SemaphoreT>,
/// Triggered when the image is on screen
- pub present_complete: ManuallyDrop<Fence>,
+ pub present_complete: ManuallyDrop<FenceT>,
}
impl SyncObjects {
- pub fn new(device: &mut Device) -> Result<Self, TargetResourcesCreationError> {
+ pub fn new(device: &mut DeviceT) -> Result<Self, TargetResourcesCreationError> {
// Sync objects
- let get_image = device
- .create_semaphore()
- .map_err(|_| TargetResourcesCreationError::SyncObjectsNoMemory)?;
let render_complete = device
.create_semaphore()
.map_err(|_| TargetResourcesCreationError::SyncObjectsNoMemory)?;
@@ -582,17 +582,15 @@ impl SyncObjects {
.map_err(|_| TargetResourcesCreationError::SyncObjectsNoMemory)?;
Ok(SyncObjects {
- get_image: ManuallyDrop::new(get_image),
render_complete: ManuallyDrop::new(render_complete),
present_complete: ManuallyDrop::new(present_complete),
})
}
- pub fn deactivate(self, device: &mut Device) {
+ pub fn deactivate(self, device: &mut DeviceT) {
use core::ptr::read;
unsafe {
- device.destroy_semaphore(ManuallyDrop::into_inner(read(&self.get_image)));
device.destroy_semaphore(ManuallyDrop::into_inner(read(&self.render_complete)));
device.destroy_fence(ManuallyDrop::into_inner(read(&self.present_complete)));
}
@@ -606,7 +604,7 @@ pub enum TargetChainCreationError {
#[derive(Debug)]
pub enum TargetResourcesCreationError {
- ImageViewError(hal::image::ViewError),
+ ImageViewError,
FrameBufferNoMemory,
SyncObjectsNoMemory,
}
diff --git a/stockton-render/src/draw/texture/block.rs b/stockton-render/src/draw/texture/block.rs
index 7735f5c..5ac3a94 100644
--- a/stockton-render/src/draw/texture/block.rs
+++ b/stockton-render/src/draw/texture/block.rs
@@ -2,7 +2,6 @@ use super::{loader::BlockRef, repo::BLOCK_SIZE};
use crate::types::*;
use arrayvec::ArrayVec;
-use hal::prelude::*;
use rendy_memory::{Allocator, Block};
use std::{iter::once, mem::ManuallyDrop};
@@ -15,7 +14,7 @@ pub struct TexturesBlock<B: Block<back::Backend>> {
impl<B: Block<back::Backend>> TexturesBlock<B> {
pub fn deactivate<T: Allocator<back::Backend, Block = B>>(
mut self,
- device: &mut Device,
+ device: &mut DeviceT,
tex_alloc: &mut T,
desc_alloc: &mut DescriptorAllocator,
) {
@@ -36,9 +35,9 @@ impl<B: Block<back::Backend>> TexturesBlock<B> {
pub struct LoadedImage<B: Block<back::Backend>> {
pub mem: ManuallyDrop<B>,
- pub img: ManuallyDrop<Image>,
- pub img_view: ManuallyDrop<ImageView>,
- pub sampler: ManuallyDrop<Sampler>,
+ pub img: ManuallyDrop<ImageT>,
+ pub img_view: ManuallyDrop<ImageViewT>,
+ pub sampler: ManuallyDrop<SamplerT>,
pub row_size: usize,
pub height: u32,
pub width: u32,
@@ -47,7 +46,7 @@ pub struct LoadedImage<B: Block<back::Backend>> {
impl<B: Block<back::Backend>> LoadedImage<B> {
pub fn deactivate<T: Allocator<back::Backend, Block = B>>(
self,
- device: &mut Device,
+ device: &mut DeviceT,
alloc: &mut T,
) {
unsafe {
diff --git a/stockton-render/src/draw/texture/load.rs b/stockton-render/src/draw/texture/load.rs
index 7ca07cb..be1aa27 100644
--- a/stockton-render/src/draw/texture/load.rs
+++ b/stockton-render/src/draw/texture/load.rs
@@ -2,7 +2,7 @@ use super::{
block::LoadedImage, block::TexturesBlock, loader::TextureLoader, repo::BLOCK_SIZE,
resolver::TextureResolver, staging_buffer::StagingBuffer, LoadableImage, PIXEL_SIZE,
};
-use crate::types::*;
+use crate::{error::LockPoisoned, types::*};
use stockton_levels::prelude::*;
use anyhow::{Context, Result};
@@ -11,17 +11,22 @@ use hal::{
command::{BufferImageCopy, CommandBufferFlags},
format::{Aspects, Format, Swizzle},
image::{
- Extent, Filter, Layout, Offset, SamplerDesc, SubresourceLayers, SubresourceRange,
+ Access, Extent, Filter, Layout, Offset, SamplerDesc, SubresourceLayers, SubresourceRange,
Usage as ImgUsage, ViewKind, WrapMode,
},
- memory::{Barrier, Dependencies},
- prelude::*,
- pso::{Descriptor, DescriptorSetWrite, PipelineStage, ShaderStageFlags},
- queue::Submission,
+ memory::{Barrier, Dependencies, SparseFlags},
+ pso::{Descriptor, DescriptorSetWrite, ImageDescriptorType, PipelineStage, ShaderStageFlags},
+ MemoryTypeId,
};
+use image::{Rgba, RgbaImage};
use rendy_descriptor::{DescriptorRanges, DescriptorSetLayoutBinding, DescriptorType};
use rendy_memory::{Allocator, Block};
-use std::mem::ManuallyDrop;
+use std::{
+ array::IntoIter,
+ iter::{empty, once},
+ mem::ManuallyDrop,
+ sync::{Arc, RwLock},
+};
use thiserror::Error;
#[derive(Error, Debug)]
@@ -29,16 +34,27 @@ pub enum TextureLoadError {
#[error("No available resources")]
NoResources,
- #[error("Texture is not in map")]
- NotInMap(usize),
-
#[error("Texture could not be resolved")]
ResolveFailed(usize),
}
+const FORMAT: Format = Format::Rgba8Srgb;
+const RESOURCES: SubresourceRange = SubresourceRange {
+ aspects: Aspects::COLOR,
+ level_start: 0,
+ level_count: Some(1),
+ layer_start: 0,
+ layer_count: Some(1),
+};
+const LAYERS: SubresourceLayers = SubresourceLayers {
+ aspects: Aspects::COLOR,
+ level: 0,
+ layers: 0..1,
+};
+
pub struct QueuedLoad<B: Block<back::Backend>> {
- pub fence: Fence,
- pub buf: CommandBuffer,
+ pub fence: FenceT,
+ pub buf: CommandBufferT,
pub block: TexturesBlock<B>,
pub staging_bufs: ArrayVec<[StagingBuffer; BLOCK_SIZE]>,
}
@@ -47,7 +63,7 @@ impl<B: Block<back::Backend>> QueuedLoad<B> {
pub fn dissolve(
self,
) -> (
- (Fence, CommandBuffer),
+ (FenceT, CommandBufferT),
ArrayVec<[StagingBuffer; BLOCK_SIZE]>,
TexturesBlock<B>,
) {
@@ -56,18 +72,6 @@ impl<B: Block<back::Backend>> QueuedLoad<B> {
}
impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R, I> {
- const FORMAT: Format = Format::Rgba8Srgb;
- const RESOURCES: SubresourceRange = SubresourceRange {
- aspects: Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
- };
- const LAYERS: SubresourceLayers = SubresourceLayers {
- aspects: Aspects::COLOR,
- level: 0,
- layers: 0..1,
- };
-
pub(crate) unsafe fn attempt_queue_load(
&mut self,
block_ref: usize,
@@ -78,17 +82,21 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
.map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
- let textures = self.textures.read().unwrap();
+ let textures = self
+ .textures
+ .read()
+ .map_err(|_| LockPoisoned::Map)
+ .context("Error getting map lock")?;
// Get assets to use
- let (fence, mut buf) = self
+ let (mut fence, mut buf) = self
.buffers
.pop_front()
.ok_or(TextureLoadError::NoResources)
.context("Error getting resources to use")?;
// Create descriptor set
- let descriptor_set = {
+ let mut descriptor_set = {
let mut v: ArrayVec<[RDescriptorSet; 1]> = ArrayVec::new();
self.descriptor_allocator
.allocate(
@@ -97,7 +105,11 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
DescriptorRanges::from_bindings(&[
DescriptorSetLayoutBinding {
binding: 0,
- ty: DescriptorType::SampledImage,
+ ty: DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
count: BLOCK_SIZE,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
@@ -113,7 +125,6 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
1,
&mut v,
)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error creating descriptor set")?;
v.pop().unwrap()
@@ -122,7 +133,6 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
// Get a command buffer
buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT);
- let mut copy_cmds: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new();
let mut imgs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new();
let mut staging_bufs: ArrayVec<[_; BLOCK_SIZE]> = ArrayVec::new();
@@ -131,135 +141,85 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
// Get texture and Resolve image
let tex = textures.get_texture(tex_idx as u32);
if tex.is_none() {
- break; // Past the end
- // TODO: We should actually write blank descriptors
+ // Write a blank descriptor
+ device.write_descriptor_set(DescriptorSetWrite {
+ set: descriptor_set.raw_mut(),
+ binding: 0,
+ array_offset: tex_idx % BLOCK_SIZE,
+ descriptors: once(Descriptor::Image(
+ &*self.blank_image.img_view,
+ Layout::ShaderReadOnlyOptimal,
+ )),
+ });
+ device.write_descriptor_set(DescriptorSetWrite {
+ set: descriptor_set.raw_mut(),
+ binding: 1,
+ array_offset: tex_idx % BLOCK_SIZE,
+ descriptors: once(Descriptor::Sampler(&*self.blank_image.sampler)),
+ });
+
+ continue;
}
- let tex = tex.ok_or(TextureLoadError::NotInMap(tex_idx))?;
+
+ let tex = tex.unwrap();
let img_data = self
.resolver
.resolve(tex)
.ok_or(TextureLoadError::ResolveFailed(tex_idx))?;
+ let array_offset = tex_idx % BLOCK_SIZE;
- // Calculate buffer size
- let (row_size, total_size) =
- tex_size_info(&img_data, self.optimal_buffer_copy_pitch_alignment);
-
- // Create staging buffer
- let mut staging_buffer = StagingBuffer::new(
+ let (staging_buffer, img) = load_image(
&mut device,
&mut self.staging_allocator,
- total_size as u64,
+ &mut self.tex_allocator,
self.staging_memory_type,
- )
- .context("Error creating staging buffer")?;
-
- // Write to staging buffer
- let mapped_memory = staging_buffer
- .map_memory(&mut device)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
- .context("Error mapping staged memory")?;
-
- img_data.copy_into(mapped_memory, row_size);
-
- staging_buffer.unmap_memory(&mut device);
-
- // Create image
- let (img_mem, img) = create_image_view(
- &mut device,
- &mut *self.tex_allocator,
- Self::FORMAT,
- ImgUsage::SAMPLED,
- &img_data,
- )
- .context("Error creating image")?;
-
- // Create image view
- let img_view = device
- .create_image_view(
- &img,
- ViewKind::D2,
- Self::FORMAT,
- Swizzle::NO,
- Self::RESOURCES,
- )
- .map_err::<HalErrorWrapper, _>(|e| e.into())
- .context("Error creating image view")?;
-
- // Queue copy from buffer to image
- copy_cmds.push(BufferImageCopy {
- buffer_offset: 0,
- buffer_width: (row_size / super::PIXEL_SIZE) as u32,
- buffer_height: img_data.height(),
- image_layers: Self::LAYERS,
- image_offset: Offset { x: 0, y: 0, z: 0 },
- image_extent: Extent {
- width: img_data.width(),
- height: img_data.height(),
- depth: 1,
- },
- });
-
- // Create sampler
- let sampler = device
- .create_sampler(&SamplerDesc::new(Filter::Nearest, WrapMode::Tile))
- .map_err::<HalErrorWrapper, _>(|e| e.into())
- .context("Error creating sampler")?;
+ self.optimal_buffer_copy_pitch_alignment,
+ img_data,
+ )?;
// Write to descriptor set
{
- device.write_descriptor_sets(vec![
- DescriptorSetWrite {
- set: descriptor_set.raw(),
- binding: 0,
- array_offset: tex_idx % BLOCK_SIZE,
- descriptors: Some(Descriptor::Image(
- &img_view,
- Layout::ShaderReadOnlyOptimal,
- )),
- },
- DescriptorSetWrite {
- set: descriptor_set.raw(),
- binding: 1,
- array_offset: tex_idx % BLOCK_SIZE,
- descriptors: Some(Descriptor::Sampler(&sampler)),
- },
- ]);
+ device.write_descriptor_set(DescriptorSetWrite {
+ set: descriptor_set.raw_mut(),
+ binding: 0,
+ array_offset,
+ descriptors: once(Descriptor::Image(
+ &*img.img_view,
+ Layout::ShaderReadOnlyOptimal,
+ )),
+ });
+ device.write_descriptor_set(DescriptorSetWrite {
+ set: descriptor_set.raw_mut(),
+ binding: 1,
+ array_offset,
+ descriptors: once(Descriptor::Sampler(&*img.sampler)),
+ });
}
- imgs.push(LoadedImage {
- mem: ManuallyDrop::new(img_mem),
- img: ManuallyDrop::new(img),
- img_view: ManuallyDrop::new(img_view),
- sampler: ManuallyDrop::new(sampler),
- row_size,
- height: img_data.height(),
- width: img_data.width(),
- });
+ imgs.push(img);
staging_bufs.push(staging_buffer);
}
- // Add start pipeline barriers
- for li in imgs.iter() {
- use hal::image::Access;
-
- buf.pipeline_barrier(
- PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
- Dependencies::empty(),
- &[Barrier::Image {
- states: (Access::empty(), Layout::Undefined)
- ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
- target: &*li.img,
- families: None,
- range: SubresourceRange {
- aspects: Aspects::COLOR,
- levels: 0..1,
- layers: 0..1,
- },
- }],
- );
- }
+ // Add start pipeline barrier
+ buf.pipeline_barrier(
+ PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
+ Dependencies::empty(),
+ imgs.iter().map(|li| Barrier::Image {
+ states: (Access::empty(), Layout::Undefined)
+ ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
+ target: &*li.img,
+ families: None,
+ range: SubresourceRange {
+ aspects: Aspects::COLOR,
+ level_start: 0,
+ level_count: None,
+ layer_start: 0,
+ layer_count: None,
+ },
+ }),
+ );
// Record copy commands
for (li, sb) in imgs.iter().zip(staging_bufs.iter()) {
@@ -267,7 +227,7 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
&*sb.buf,
&*li.img,
Layout::TransferDstOptimal,
- &[BufferImageCopy {
+ once(BufferImageCopy {
buffer_offset: 0,
buffer_width: (li.row_size / super::PIXEL_SIZE) as u32,
buffer_height: li.height,
@@ -282,36 +242,29 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
height: li.height,
depth: 1,
},
- }],
- );
- }
- for li in imgs.iter() {
- use hal::image::Access;
-
- buf.pipeline_barrier(
- PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
- Dependencies::empty(),
- &[Barrier::Image {
- states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
- ..(Access::SHADER_READ, Layout::ShaderReadOnlyOptimal),
- target: &*li.img,
- families: None,
- range: Self::RESOURCES,
- }],
+ }),
);
}
+ buf.pipeline_barrier(
+ PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE,
+ Dependencies::empty(),
+ imgs.iter().map(|li| Barrier::Image {
+ states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
+ ..(Access::empty(), Layout::ShaderReadOnlyOptimal),
+ target: &*li.img,
+ families: None,
+ range: RESOURCES,
+ }),
+ );
buf.finish();
// Submit command buffer
- self.gpu.queue_groups[self.cmd_queue_idx].queues[0].submit::<_, _, Semaphore, _, _>(
- Submission {
- command_buffers: &[&buf],
- signal_semaphores: std::iter::empty(),
- wait_semaphores: std::iter::empty(),
- },
- Some(&fence),
- );
+ {
+ let mut queue = self.queue.write().map_err(|_| LockPoisoned::Queue)?;
+
+ queue.submit(IntoIter::new([&buf]), empty(), empty(), Some(&mut fence));
+ }
Ok(QueuedLoad {
staging_bufs,
@@ -324,6 +277,107 @@ impl<'a, T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<
},
})
}
+
+ pub(crate) unsafe fn get_blank_image(
+ device: &mut DeviceT,
+ buf: &mut CommandBufferT,
+ queue_lock: &Arc<RwLock<QueueT>>,
+ staging_allocator: &mut DynamicAllocator,
+ tex_allocator: &mut DynamicAllocator,
+ staging_memory_type: MemoryTypeId,
+ obcpa: u64,
+ ) -> Result<LoadedImage<DynamicBlock>> {
+ let img_data = RgbaImage::from_pixel(1, 1, Rgba([0, 0, 0, 0]));
+
+ let height = img_data.height();
+ let width = img_data.width();
+ let row_alignment_mask = obcpa as u32 - 1;
+ let initial_row_size = PIXEL_SIZE * img_data.width() as usize;
+ let row_size =
+ ((initial_row_size as u32 + row_alignment_mask) & !row_alignment_mask) as usize;
+
+ let (staging_buffer, img) = load_image(
+ device,
+ staging_allocator,
+ tex_allocator,
+ staging_memory_type,
+ obcpa,
+ img_data,
+ )?;
+
+ buf.begin_primary(CommandBufferFlags::ONE_TIME_SUBMIT);
+
+ buf.pipeline_barrier(
+ PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
+ Dependencies::empty(),
+ once(Barrier::Image {
+ states: (Access::empty(), Layout::Undefined)
+ ..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
+ target: &*img.img,
+ families: None,
+ range: SubresourceRange {
+ aspects: Aspects::COLOR,
+ level_start: 0,
+ level_count: None,
+ layer_start: 0,
+ layer_count: None,
+ },
+ }),
+ );
+ buf.copy_buffer_to_image(
+ &*staging_buffer.buf,
+ &*img.img,
+ Layout::TransferDstOptimal,
+ once(BufferImageCopy {
+ buffer_offset: 0,
+ buffer_width: (row_size / super::PIXEL_SIZE) as u32,
+ buffer_height: height,
+ image_layers: LAYERS,
+ image_offset: Offset { x: 0, y: 0, z: 0 },
+ image_extent: Extent {
+ width: width,
+ height: height,
+ depth: 1,
+ },
+ }),
+ );
+
+ buf.pipeline_barrier(
+ PipelineStage::TRANSFER..PipelineStage::BOTTOM_OF_PIPE,
+ Dependencies::empty(),
+ once(Barrier::Image {
+ states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
+ ..(Access::empty(), Layout::ShaderReadOnlyOptimal),
+ target: &*img.img,
+ families: None,
+ range: RESOURCES,
+ }),
+ );
+ buf.finish();
+
+ let mut fence = device.create_fence(false).context("Error creating fence")?;
+
+ {
+ let mut queue = queue_lock.write().map_err(|_| LockPoisoned::Queue)?;
+
+ queue.submit(
+ IntoIter::new([buf as &CommandBufferT]),
+ empty(),
+ empty(),
+ Some(&mut fence),
+ );
+ }
+
+ device
+ .wait_for_fence(&fence, std::u64::MAX)
+ .context("Error waiting for copy")?;
+
+ device.destroy_fence(fence);
+
+ staging_buffer.deactivate(device, staging_allocator);
+
+ Ok(img)
+ }
}
pub fn tex_size_info<T: LoadableImage>(img: &T, obcpa: hal::buffer::Offset) -> (usize, usize) {
@@ -338,12 +392,12 @@ pub fn tex_size_info<T: LoadableImage>(img: &T, obcpa: hal::buffer::Offset) -> (
}
fn create_image_view<T, I>(
- device: &mut Device,
+ device: &mut DeviceT,
allocator: &mut T,
format: Format,
usage: ImgUsage,
img: &I,
-) -> Result<(T::Block, Image)>
+) -> Result<(T::Block, ImageT)>
where
T: Allocator<back::Backend>,
I: LoadableImage,
@@ -358,10 +412,10 @@ where
format,
Tiling::Optimal,
usage,
+ SparseFlags::empty(),
ViewCapabilities::empty(),
)
}
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error creating image")?;
// Allocate memory
@@ -370,15 +424,83 @@ where
allocator.alloc(device, requirements.size, requirements.alignment)
}
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error allocating memory")?;
unsafe {
device
.bind_image_memory(&block.memory(), block.range().start, &mut image_ref)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error binding memory to image")?;
}
Ok((block, image_ref))
}
+
+unsafe fn load_image<I: LoadableImage>(
+ device: &mut DeviceT,
+ staging_allocator: &mut DynamicAllocator,
+ tex_allocator: &mut DynamicAllocator,
+ staging_memory_type: MemoryTypeId,
+ obcpa: u64,
+ img_data: I,
+) -> Result<(StagingBuffer, LoadedImage<DynamicBlock>)> {
+ // Calculate buffer size
+ let (row_size, total_size) = tex_size_info(&img_data, obcpa);
+
+ // Create staging buffer
+ let mut staging_buffer = StagingBuffer::new(
+ device,
+ staging_allocator,
+ total_size as u64,
+ staging_memory_type,
+ )
+ .context("Error creating staging buffer")?;
+
+ // Write to staging buffer
+ let mapped_memory = staging_buffer
+ .map_memory(device)
+ .context("Error mapping staged memory")?;
+
+ img_data.copy_into(mapped_memory, row_size);
+
+ staging_buffer.unmap_memory(device);
+
+ // Create image
+ let (img_mem, img) = create_image_view(
+ device,
+ tex_allocator,
+ FORMAT,
+ ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST,
+ &img_data,
+ )
+ .context("Error creating image")?;
+
+ // Create image view
+ let img_view = device
+ .create_image_view(
+ &img,
+ ViewKind::D2,
+ FORMAT,
+ Swizzle::NO,
+ ImgUsage::SAMPLED | ImgUsage::TRANSFER_DST,
+ RESOURCES,
+ )
+ .context("Error creating image view")?;
+
+ // Create sampler
+ let sampler = device
+ .create_sampler(&SamplerDesc::new(Filter::Nearest, WrapMode::Tile))
+ .context("Error creating sampler")?;
+
+ Ok((
+ staging_buffer,
+ LoadedImage {
+ mem: ManuallyDrop::new(img_mem),
+ img: ManuallyDrop::new(img),
+ img_view: ManuallyDrop::new(img_view),
+ sampler: ManuallyDrop::new(sampler),
+ row_size,
+ height: img_data.height(),
+ width: img_data.width(),
+ },
+ ))
+}
diff --git a/stockton-render/src/draw/texture/loader.rs b/stockton-render/src/draw/texture/loader.rs
index f505de5..a23d633 100644
--- a/stockton-render/src/draw/texture/loader.rs
+++ b/stockton-render/src/draw/texture/loader.rs
@@ -1,17 +1,17 @@
//! Manages the loading/unloading of textures
use super::{
- block::TexturesBlock,
+ block::{LoadedImage, TexturesBlock},
load::{QueuedLoad, TextureLoadError},
resolver::TextureResolver,
LoadableImage,
};
-use crate::{draw::utils::find_memory_type_id, types::*};
+use crate::{draw::utils::find_memory_type_id, error::LockPoisoned, types::*};
use std::{
collections::VecDeque,
marker::PhantomData,
- mem::ManuallyDrop,
+ mem::{drop, ManuallyDrop},
sync::{
mpsc::{Receiver, Sender},
Arc, RwLock,
@@ -23,7 +23,9 @@ use std::{
use anyhow::{Context, Result};
use arrayvec::ArrayVec;
use hal::{
- format::Format, memory::Properties as MemProps, prelude::*, queue::family::QueueFamilyId,
+ format::Format,
+ memory::{Properties as MemProps, SparseFlags},
+ queue::family::QueueFamilyId,
MemoryTypeId,
};
use log::*;
@@ -40,23 +42,20 @@ pub type BlockRef = usize;
/// Manages the loading/unloading of textures
/// This is expected to load the textures, then send the loaded blocks back
pub struct TextureLoader<T, R, I> {
- /// Handle to the device we're using
- pub(crate) device: Arc<RwLock<Device>>,
-
/// Blocks for which commands have been queued and are done loading once the fence is triggered.
pub(crate) commands_queued: ArrayVec<[QueuedLoad<DynamicBlock>; NUM_SIMULTANEOUS_CMDS]>,
/// The command buffers used and a fence to go with them
- pub(crate) buffers: VecDeque<(Fence, CommandBuffer)>,
+ pub(crate) buffers: VecDeque<(FenceT, CommandBufferT)>,
/// The command pool buffers were allocated from
- pub(crate) pool: ManuallyDrop<CommandPool>,
+ pub(crate) pool: ManuallyDrop<CommandPoolT>,
/// The GPU we're submitting to
- pub(crate) gpu: ManuallyDrop<Gpu>,
+ pub(crate) device: Arc<RwLock<DeviceT>>,
- /// The index of the command queue being used
- pub(crate) cmd_queue_idx: usize,
+ /// The command queue being used
+ pub(crate) queue: Arc<RwLock<QueueT>>,
/// The memory allocator being used for textures
pub(crate) tex_allocator: ManuallyDrop<DynamicAllocator>,
@@ -67,7 +66,7 @@ pub struct TextureLoader<T, R, I> {
/// Allocator for descriptor sets
pub(crate) descriptor_allocator: ManuallyDrop<DescriptorAllocator>,
- pub(crate) ds_layout: Arc<RwLock<DescriptorSetLayout>>,
+ pub(crate) ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
/// Type ID for staging memory
pub(crate) staging_memory_type: MemoryTypeId,
@@ -88,6 +87,9 @@ pub struct TextureLoader<T, R, I> {
/// The channel blocks are returned to.
pub(crate) return_channel: Sender<TexturesBlock<DynamicBlock>>,
+ /// A filler image for descriptors that aren't needed but still need to be written to
+ pub(crate) blank_image: ManuallyDrop<LoadedImage<DynamicBlock>>,
+
pub(crate) _li: PhantomData<I>,
}
@@ -121,18 +123,20 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
}
}
fn main(&mut self) -> Result<bool> {
- let mut device = self.device.write().unwrap();
-
+ let mut device = self
+ .device
+ .write()
+ .map_err(|_| LockPoisoned::Device)
+ .context("Error getting device lock")?;
// Check for blocks that are finished, then send them back
let mut i = 0;
while i < self.commands_queued.len() {
let signalled = unsafe { device.get_fence_status(&self.commands_queued[i].fence) }
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error checking fence status")?;
if signalled {
let (assets, mut staging_bufs, block) = self.commands_queued.remove(i).dissolve();
- debug!("Done loading texture block {:?}", block.id);
+ debug!("Load finished for texture block {:?}", block.id);
// Destroy staging buffers
while staging_bufs.len() > 0 {
@@ -155,11 +159,15 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
match to_load {
LoaderRequest::Load(to_load) => {
// Attempt to load given block
+ debug!("Attempting to queue load for texture block {:?}", to_load);
+
let result = unsafe { self.attempt_queue_load(to_load) };
match result {
Ok(queued_load) => self.commands_queued.push(queued_load),
Err(x) => match x.downcast_ref::<TextureLoadError>() {
- Some(TextureLoadError::NoResources) => {}
+ Some(TextureLoadError::NoResources) => {
+ debug!("No resources, trying again later");
+ }
_ => return Err(x).context("Error queuing texture load"),
},
}
@@ -172,29 +180,21 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
}
pub fn new(
- device_lock: Arc<RwLock<Device>>,
adapter: &Adapter,
+ device_lock: Arc<RwLock<DeviceT>>,
family: QueueFamilyId,
- gpu: Gpu,
- ds_layout: Arc<RwLock<DescriptorSetLayout>>,
+ queue_lock: Arc<RwLock<QueueT>>,
+ ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
request_channel: Receiver<LoaderRequest>,
return_channel: Sender<TexturesBlock<DynamicBlock>>,
texs: Arc<RwLock<T>>,
resolver: R,
) -> Result<Self> {
- let device = device_lock
+ let mut device = device_lock
.write()
.map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
-
- // Pool
- let mut pool = unsafe {
- use hal::pool::CommandPoolCreateFlags;
-
- device.create_command_pool(family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
- }
- .map_err::<HalErrorWrapper, _>(|e| e.into())
- .context("Error creating command pool")?;
+ let device_props = adapter.physical_device.properties();
let type_mask = unsafe {
use hal::image::{Kind, Tiling, Usage, ViewCapabilities};
@@ -214,9 +214,9 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
Format::Rgba8Srgb,
Tiling::Optimal,
Usage::SAMPLED,
+ SparseFlags::empty(),
ViewCapabilities::empty(),
)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error creating test image to get buffer settings")?;
let type_mask = device.get_image_requirements(&img).type_mask;
@@ -226,8 +226,10 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
type_mask
};
+ debug!("Using type mask {:?}", type_mask);
+
// Tex Allocator
- let tex_allocator = {
+ let mut tex_allocator = {
let props = MemProps::DEVICE_LOCAL;
DynamicAllocator::new(
@@ -239,10 +241,11 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
max_chunk_size: u64::pow(2, 63),
min_device_allocation: 4 * 32 * 32,
},
+ device_props.limits.non_coherent_atom_size as u64,
)
};
- let (staging_memory_type, staging_allocator) = {
+ let (staging_memory_type, mut staging_allocator) = {
let props = MemProps::CPU_VISIBLE | MemProps::COHERENT;
let t = find_memory_type_id(&adapter, type_mask, props)
.ok_or(TextureLoaderError::NoMemoryTypes)?;
@@ -256,20 +259,28 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
max_chunk_size: u64::pow(2, 63),
min_device_allocation: 4 * 32 * 32,
},
+ device_props.limits.non_coherent_atom_size as u64,
),
)
};
- let buffers = {
+ // Pool
+ let mut pool = unsafe {
+ use hal::pool::CommandPoolCreateFlags;
+
+ device.create_command_pool(family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
+ }
+ .context("Error creating command pool")?;
+
+ // Command buffers and fences
+ debug!("Creating resources...");
+ let mut buffers = {
let mut data = VecDeque::with_capacity(NUM_SIMULTANEOUS_CMDS);
for _ in 0..NUM_SIMULTANEOUS_CMDS {
unsafe {
data.push_back((
- device
- .create_fence(false)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
- .context("Error creating fence")?,
+ device.create_fence(false).context("Error creating fence")?,
pool.allocate_one(hal::command::Level::Primary),
));
};
@@ -278,21 +289,30 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
data
};
- let cmd_queue_idx = gpu
- .queue_groups
- .iter()
- .position(|x| x.family == family)
- .unwrap();
+ let optimal_buffer_copy_pitch_alignment =
+ device_props.limits.optimal_buffer_copy_pitch_alignment;
+
+ let blank_image = unsafe {
+ Self::get_blank_image(
+ &mut device,
+ &mut buffers[0].1,
+ &queue_lock,
+ &mut staging_allocator,
+ &mut tex_allocator,
+ staging_memory_type,
+ optimal_buffer_copy_pitch_alignment,
+ )
+ }
+ .context("Error creating blank image")?;
- std::mem::drop(device);
+ drop(device);
Ok(TextureLoader {
- device: device_lock,
commands_queued: ArrayVec::new(),
buffers,
pool: ManuallyDrop::new(pool),
- gpu: ManuallyDrop::new(gpu),
- cmd_queue_idx,
+ device: device_lock,
+ queue: queue_lock,
ds_layout,
tex_allocator: ManuallyDrop::new(tex_allocator),
@@ -300,15 +320,13 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
descriptor_allocator: ManuallyDrop::new(DescriptorAllocator::new()),
staging_memory_type,
- optimal_buffer_copy_pitch_alignment: adapter
- .physical_device
- .limits()
- .optimal_buffer_copy_pitch_alignment,
+ optimal_buffer_copy_pitch_alignment,
request_channel,
return_channel,
textures: texs,
resolver,
+ blank_image: ManuallyDrop::new(blank_image),
_li: PhantomData::default(),
})
}
@@ -354,6 +372,9 @@ impl<T: HasTextures, R: TextureResolver<I>, I: LoadableImage> TextureLoader<T, R
sleep(Duration::from_secs(0));
}
+ // Destroy blank image
+ read(&*self.blank_image).deactivate(&mut device, &mut *self.tex_allocator);
+
// Destroy fences
let vec: Vec<_> = self.buffers.drain(..).collect();
diff --git a/stockton-render/src/draw/texture/repo.rs b/stockton-render/src/draw/texture/repo.rs
index 2316dc4..c37da11 100644
--- a/stockton-render/src/draw/texture/repo.rs
+++ b/stockton-render/src/draw/texture/repo.rs
@@ -6,12 +6,14 @@ use super::{
resolver::TextureResolver,
LoadableImage,
};
+use crate::error::LockPoisoned;
use crate::types::*;
use std::{
+ array::IntoIter,
collections::HashMap,
+ iter::empty,
marker::PhantomData,
- mem::drop,
mem::ManuallyDrop,
sync::{
mpsc::{channel, Receiver, Sender},
@@ -22,12 +24,10 @@ use std::{
use anyhow::{Context, Result};
use hal::{
- prelude::*,
- pso::{DescriptorSetLayoutBinding, DescriptorType, ShaderStageFlags},
- Features,
+ pso::{DescriptorSetLayoutBinding, DescriptorType, ImageDescriptorType, ShaderStageFlags},
+ queue::family::QueueFamilyId,
};
use log::debug;
-use thiserror::Error;
/// The number of textures in one 'block'
/// The textures of the loaded file are divided into blocks of this size.
@@ -36,7 +36,7 @@ pub const BLOCK_SIZE: usize = 8;
pub struct TextureRepo<'a> {
joiner: ManuallyDrop<JoinHandle<Result<TextureLoaderRemains>>>,
- ds_layout: Arc<RwLock<DescriptorSetLayout>>,
+ ds_layout: Arc<RwLock<DescriptorSetLayoutT>>,
req_send: Sender<LoaderRequest>,
resp_recv: Receiver<TexturesBlock<DynamicBlock>>,
blocks: HashMap<BlockRef, Option<TexturesBlock<DynamicBlock>>>,
@@ -44,55 +44,43 @@ pub struct TextureRepo<'a> {
_a: PhantomData<&'a ()>,
}
-#[derive(Error, Debug)]
-pub enum TextureRepoError {
- #[error("No suitable queue family")]
- NoQueueFamilies,
-
- #[error("Lock poisoned")]
- LockPoisoned,
-}
-
impl<'a> TextureRepo<'a> {
+ pub fn queue_family_filter(family: &&QueueFamilyT) -> bool {
+ family.queue_type().supports_transfer() && family.max_queues() >= NUM_SIMULTANEOUS_CMDS
+ }
+
pub fn new<
T: 'static + HasTextures + Send + Sync,
R: 'static + TextureResolver<I> + Send + Sync,
I: 'static + LoadableImage + Send,
>(
- device_lock: Arc<RwLock<Device>>,
+ device_lock: Arc<RwLock<DeviceT>>,
+ family: QueueFamilyId,
+ queue: Arc<RwLock<QueueT>>,
adapter: &Adapter,
texs_lock: Arc<RwLock<T>>,
resolver: R,
) -> Result<Self> {
+ // Create Channels
let (req_send, req_recv) = channel();
let (resp_send, resp_recv) = channel();
- let family = adapter
- .queue_families
- .iter()
- .find(|family| {
- family.queue_type().supports_transfer()
- && family.max_queues() >= NUM_SIMULTANEOUS_CMDS
- })
- .ok_or(TextureRepoError::NoQueueFamilies)?;
-
- let gpu = unsafe {
- adapter
- .physical_device
- .open(&[(family, &[1.0])], Features::empty())?
- };
-
let device = device_lock
.write()
- .map_err(|_| TextureRepoError::LockPoisoned)
+ .map_err(|_| LockPoisoned::Device)
.context("Error getting device lock")?;
+ // Create descriptor set layout
let ds_lock = Arc::new(RwLock::new(
unsafe {
device.create_descriptor_set_layout(
- &[
+ IntoIter::new([
DescriptorSetLayoutBinding {
binding: 0,
- ty: DescriptorType::SampledImage,
+ ty: DescriptorType::Image {
+ ty: ImageDescriptorType::Sampled {
+ with_sampler: false,
+ },
+ },
count: BLOCK_SIZE,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
@@ -104,22 +92,23 @@ impl<'a> TextureRepo<'a> {
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
- ],
- &[],
+ ]),
+ empty(),
)
}
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error creating descriptor set layout")?,
));
+ debug!("Created descriptor set layout {:?}", ds_lock);
+
drop(device);
let joiner = {
let loader = TextureLoader::new(
- device_lock,
adapter,
- family.id(),
- gpu,
+ device_lock.clone(),
+ family,
+ queue,
ds_lock.clone(),
req_recv,
resp_send,
@@ -140,7 +129,7 @@ impl<'a> TextureRepo<'a> {
})
}
- pub fn get_ds_layout(&self) -> RwLockReadGuard<DescriptorSetLayout> {
+ pub fn get_ds_layout(&self) -> RwLockReadGuard<DescriptorSetLayoutT> {
self.ds_layout.read().unwrap()
}
@@ -162,7 +151,7 @@ impl<'a> TextureRepo<'a> {
Ok(())
}
- pub fn attempt_get_descriptor_set(&mut self, block_id: BlockRef) -> Option<&DescriptorSet> {
+ pub fn attempt_get_descriptor_set(&mut self, block_id: BlockRef) -> Option<&DescriptorSetT> {
self.blocks
.get(&block_id)
.and_then(|opt| opt.as_ref().map(|z| z.descriptor_set.raw()))
@@ -176,7 +165,7 @@ impl<'a> TextureRepo<'a> {
}
}
- pub fn deactivate(mut self, device_lock: &mut Arc<RwLock<Device>>) {
+ pub fn deactivate(mut self, device_lock: &mut Arc<RwLock<DeviceT>>) {
unsafe {
use std::ptr::read;
diff --git a/stockton-render/src/draw/texture/staging_buffer.rs b/stockton-render/src/draw/texture/staging_buffer.rs
index 4adc974..8d2ae17 100644
--- a/stockton-render/src/draw/texture/staging_buffer.rs
+++ b/stockton-render/src/draw/texture/staging_buffer.rs
@@ -1,13 +1,14 @@
+#![allow(mutable_transmutes)]
use crate::types::*;
use std::mem::ManuallyDrop;
use anyhow::{Context, Result};
-use hal::{device::MapError, prelude::*, MemoryTypeId};
+use hal::{device::MapError, memory::SparseFlags, MemoryTypeId};
use rendy_memory::{Allocator, Block};
pub struct StagingBuffer {
- pub buf: ManuallyDrop<Buffer>,
+ pub buf: ManuallyDrop<BufferT>,
pub mem: ManuallyDrop<DynamicBlock>,
}
@@ -15,24 +16,21 @@ impl StagingBuffer {
const USAGE: hal::buffer::Usage = hal::buffer::Usage::TRANSFER_SRC;
pub fn new(
- device: &mut Device,
+ device: &mut DeviceT,
alloc: &mut DynamicAllocator,
size: u64,
_memory_type_id: MemoryTypeId,
) -> Result<StagingBuffer> {
- let mut buffer = unsafe { device.create_buffer(size, Self::USAGE) }
- .map_err::<HalErrorWrapper, _>(|e| e.into())
+ let mut buffer = unsafe { device.create_buffer(size, Self::USAGE, SparseFlags::empty()) }
.context("Error creating buffer")?;
let requirements = unsafe { device.get_buffer_requirements(&buffer) };
let (memory, _) = alloc
.alloc(device, requirements.size, requirements.alignment)
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error allocating staging memory")?;
unsafe { device.bind_buffer_memory(memory.memory(), 0, &mut buffer) }
- .map_err::<HalErrorWrapper, _>(|e| e.into())
.context("Error binding staging memory to buffer")?;
Ok(StagingBuffer {
@@ -41,14 +39,15 @@ impl StagingBuffer {
})
}
- pub unsafe fn map_memory(&mut self, device: &mut Device) -> Result<*mut u8, MapError> {
- device.map_memory(self.mem.memory(), self.mem.range())
+ pub unsafe fn map_memory(&mut self, device: &mut DeviceT) -> Result<*mut u8, MapError> {
+ let range = 0..(self.mem.range().end - self.mem.range().start);
+ Ok(self.mem.map(device, range)?.ptr().as_mut())
}
- pub unsafe fn unmap_memory(&mut self, device: &mut Device) {
- device.unmap_memory(self.mem.memory()); // TODO: What if the same Memory is mapped in multiple places?
+ pub unsafe fn unmap_memory(&mut self, device: &mut DeviceT) {
+ self.mem.unmap(device);
}
- pub fn deactivate(self, device: &mut Device, alloc: &mut DynamicAllocator) {
+ pub fn deactivate(self, device: &mut DeviceT, alloc: &mut DynamicAllocator) {
unsafe {
use std::ptr::read;
// Destroy buffer
diff --git a/stockton-render/src/draw/ui/pipeline.rs b/stockton-render/src/draw/ui/pipeline.rs
index c10d83f..757c978 100644
--- a/stockton-render/src/draw/ui/pipeline.rs
+++ b/stockton-render/src/draw/ui/pipeline.rs
@@ -10,12 +10,11 @@ const VERTEX_SOURCE: &str = include_str!("./data/stockton.vert");
const FRAGMENT_SOURCE: &str = include_str!("./data/stockton.frag");
use std::{
- borrow::Borrow,
+ array::IntoIter,
+ iter::once,
mem::{size_of, ManuallyDrop},
};
-use hal::prelude::*;
-
use crate::draw::target::SwapchainProperties;
use crate::error;
use crate::types::*;
@@ -23,32 +22,28 @@ use crate::types::*;
/// A complete 2D graphics pipeline and associated resources
pub struct UiPipeline {
/// Our main render pass
- pub(crate) renderpass: ManuallyDrop<RenderPass>,
+ pub(crate) renderpass: ManuallyDrop<RenderPassT>,
/// The layout of our main graphics pipeline
- pub(crate) pipeline_layout: ManuallyDrop<PipelineLayout>,
+ pub(crate) pipeline_layout: ManuallyDrop<PipelineLayoutT>,
/// Our main graphics pipeline
- pub(crate) pipeline: ManuallyDrop<GraphicsPipeline>,
+ pub(crate) pipeline: ManuallyDrop<GraphicsPipelineT>,
/// The vertex shader module
- pub(crate) vs_module: ManuallyDrop<ShaderModule>,
+ pub(crate) vs_module: ManuallyDrop<ShaderModuleT>,
/// The fragment shader module
- pub(crate) fs_module: ManuallyDrop<ShaderModule>,
+ pub(crate) fs_module: ManuallyDrop<ShaderModuleT>,
}
impl UiPipeline {
- pub fn new<T>(
- device: &mut Device,
+ pub fn new<'a, T: Iterator<Item = &'a DescriptorSetLayoutT>>(
+ device: &mut DeviceT,
extent: hal::image::Extent,
swapchain_properties: &SwapchainProperties,
set_layouts: T,
- ) -> Result<Self, error::CreationError>
- where
- T: IntoIterator + std::fmt::Debug,
- T::Item: Borrow<DescriptorSetLayout>,
- {
+ ) -> Result<Self, error::CreationError> {
use hal::format::Format;
use hal::pso::*;
@@ -81,7 +76,7 @@ impl UiPipeline {
let external_dependency = SubpassDependency {
flags: Dependencies::empty(),
- passes: SubpassRef::External..SubpassRef::Pass(0),
+ passes: None..Some(0),
stages: PipelineStage::COLOR_ATTACHMENT_OUTPUT
..(PipelineStage::COLOR_ATTACHMENT_OUTPUT
| PipelineStage::EARLY_FRAGMENT_TESTS),
@@ -90,7 +85,11 @@ impl UiPipeline {
};
unsafe {
- device.create_render_pass(&[img_attachment], &[subpass], &[external_dependency])
+ device.create_render_pass(
+ IntoIter::new([img_attachment]),
+ IntoIter::new([subpass]),
+ IntoIter::new([external_dependency]),
+ )
}
.map_err(|_| error::CreationError::OutOfMemoryError)?
};
@@ -152,28 +151,6 @@ impl UiPipeline {
},
);
- // Shader set
- let shaders = GraphicsShaderSet {
- vertex: vs_entry,
- fragment: Some(fs_entry),
- hull: None,
- domain: None,
- geometry: None,
- };
-
- // Vertex buffers
- let vertex_buffers: Vec<VertexBufferDesc> = vec![VertexBufferDesc {
- binding: 0,
- stride: ((size_of::<f32>() * 4) + (size_of::<u8>() * 4)) as u32,
- rate: VertexInputRate::Vertex,
- }];
-
- let attributes: Vec<AttributeDesc> = pipeline_vb_attributes!(0,
- size_of::<f32>() * 2; Rg32Sfloat,
- size_of::<f32>() * 2; Rg32Sfloat,
- size_of::<u8>() * 4; R32Uint
- );
-
// Rasterizer
let rasterizer = Rasterizer {
polygon_mode: PolygonMode::Fill,
@@ -182,6 +159,7 @@ impl UiPipeline {
depth_clamping: false,
depth_bias: None,
conservative: true,
+ line_width: State::Static(1.0),
};
// Depth stencil
@@ -191,10 +169,9 @@ impl UiPipeline {
stencil: None,
};
- log::debug!("ui set layouts: {:?}", set_layouts);
// Pipeline layout
let layout = unsafe {
- device.create_pipeline_layout(set_layouts, &[(ShaderStageFlags::VERTEX, 0..8)])
+ device.create_pipeline_layout(set_layouts, once((ShaderStageFlags::VERTEX, 0..8)))
}
.map_err(|_| error::CreationError::OutOfMemoryError)?;
@@ -227,18 +204,55 @@ impl UiPipeline {
depth: (0.0..1.0),
}),
scissor: Some(extent.rect()),
- blend_color: None,
+ blend_constants: None,
depth_bounds: None,
};
- // Input assembler
- let input_assembler = InputAssemblerDesc::new(Primitive::TriangleList);
+ // Primitive assembler
+ let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
+ buffers: &[VertexBufferDesc {
+ binding: 0,
+ stride: (size_of::<f32>() * 6) as u32,
+ rate: VertexInputRate::Vertex,
+ }],
+ attributes: &[
+ AttributeDesc {
+ location: 0,
+ binding: 0,
+ element: Element {
+ format: Format::Rg32Sfloat,
+ offset: 0,
+ },
+ },
+ AttributeDesc {
+ location: 1,
+ binding: 0,
+ element: Element {
+ format: Format::Rg32Sfloat,
+ offset: (size_of::<f32>() * 2) as u32,
+ },
+ },
+ AttributeDesc {
+ location: 2,
+ binding: 0,
+ element: Element {
+ format: Format::R32Uint,
+ offset: (size_of::<f32>() * 4) as u32,
+ },
+ },
+ ],
+ input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
+ vertex: vs_entry,
+ tessellation: None,
+ geometry: None,
+ };
// Pipeline description
let pipeline_desc = GraphicsPipelineDesc {
- shaders,
+ label: Some("UI Pipeline"),
+ primitive_assembler,
rasterizer,
- vertex_buffers,
+ fragment: Some(fs_entry),
blender,
depth_stencil,
multisampling: None,
@@ -247,8 +261,6 @@ impl UiPipeline {
subpass,
flags: PipelineCreationFlags::empty(),
parent: BasePipeline::None,
- input_assembler,
- attributes,
};
// Pipeline
@@ -265,7 +277,7 @@ impl UiPipeline {
}
/// Deactivate vulkan resources. Use before dropping
- pub fn deactivate(self, device: &mut Device) {
+ pub fn deactivate(self, device: &mut DeviceT) {
unsafe {
use core::ptr::read;
diff --git a/stockton-render/src/draw/ui/render.rs b/stockton-render/src/draw/ui/render.rs
index 757b3a2..62a13bd 100644
--- a/stockton-render/src/draw/ui/render.rs
+++ b/stockton-render/src/draw/ui/render.rs
@@ -1,18 +1,16 @@
use crate::draw::texture::TextureRepo;
-use arrayvec::ArrayVec;
-use hal::prelude::*;
use hal::pso::ShaderStageFlags;
use super::UiPoint;
use crate::draw::draw_buffers::DrawBuffers;
use crate::types::*;
use crate::UiState;
-use std::convert::TryInto;
+use std::{array::IntoIter, convert::TryInto, iter::empty};
use stockton_types::Vector2;
pub fn do_render(
- cmd_buffer: &mut CommandBuffer,
- pipeline_layout: &PipelineLayout,
+ cmd_buffer: &mut CommandBufferT,
+ pipeline_layout: &PipelineLayoutT,
draw_buffers: &mut DrawBuffers<UiPoint>,
tex_repo: &mut TextureRepo,
ui: &mut UiState,
@@ -49,11 +47,13 @@ pub fn do_render(
// TODO: *Properly* deal with textures
if let Some(ds) = tex_repo.attempt_get_descriptor_set(0) {
- let mut descriptor_sets: ArrayVec<[_; 1]> = ArrayVec::new();
- descriptor_sets.push(ds);
-
unsafe {
- cmd_buffer.bind_graphics_descriptor_sets(pipeline_layout, 0, descriptor_sets, &[]);
+ cmd_buffer.bind_graphics_descriptor_sets(
+ pipeline_layout,
+ 0,
+ IntoIter::new([ds]),
+ empty(),
+ );
// Call draw
cmd_buffer.draw_indexed(0..tris.indices.len() as u32, 0, 0..1);
}
diff --git a/stockton-render/src/draw/ui/texture.rs b/stockton-render/src/draw/ui/texture.rs
index 7cf207f..0ec4873 100755
--- a/stockton-render/src/draw/ui/texture.rs
+++ b/stockton-render/src/draw/ui/texture.rs
@@ -46,11 +46,11 @@ impl LoadableImage for &Texture {
pub fn ensure_textures(
_tex_repo: &mut TextureRepo,
ui: &mut UiState,
- _device: &mut Device,
+ _device: &mut DeviceT,
_adapter: &mut Adapter,
_allocator: &mut DynamicAllocator,
- _command_queue: &mut CommandQueue,
- _command_pool: &mut CommandPool,
+ _command_queue: &mut QueueT,
+ _command_pool: &mut CommandPoolT,
) {
let tex = ui.ctx.texture();
diff --git a/stockton-render/src/draw/utils.rs b/stockton-render/src/draw/utils.rs
index df62bb8..2ab984b 100644
--- a/stockton-render/src/draw/utils.rs
+++ b/stockton-render/src/draw/utils.rs
@@ -3,7 +3,7 @@ use hal::{memory::Properties as MemProperties, prelude::*, MemoryTypeId};
pub fn find_memory_type_id(
adapter: &Adapter,
- type_mask: u64,
+ type_mask: u32,
props: MemProperties,
) -> Option<MemoryTypeId> {
adapter
diff --git a/stockton-render/src/error.rs b/stockton-render/src/error.rs
index 3441bb2..7c9abd4 100644
--- a/stockton-render/src/error.rs
+++ b/stockton-render/src/error.rs
@@ -1,6 +1,7 @@
//! Error types
use super::draw::target::TargetChainCreationError;
+use thiserror::Error;
/// An error encountered creating a rendering context.
#[derive(Debug)]
@@ -23,8 +24,8 @@ pub enum CreationError {
BufferError(hal::buffer::CreationError),
BufferNoMemory,
- SwapchainError(hal::window::CreationError),
- ImageViewError(hal::image::ViewError),
+ SwapchainError,
+ ImageViewError,
BadDataError,
}
@@ -34,3 +35,15 @@ pub enum CreationError {
/// You'll likely need to exit or create a new context.
#[derive(Debug, Clone)]
pub enum FrameError {}
+
+#[derive(Error, Debug)]
+pub enum LockPoisoned {
+ #[error("Device lock poisoned")]
+ Device,
+
+ #[error("Map lock poisoned")]
+ Map,
+
+ #[error("Queue lock poisoned")]
+ Queue,
+}
diff --git a/stockton-render/src/types.rs b/stockton-render/src/types.rs
index 4a79602..797ced9 100644
--- a/stockton-render/src/types.rs
+++ b/stockton-render/src/types.rs
@@ -1,28 +1,28 @@
//! Convenience module to reference types that are stored in the backend's enum
-use thiserror::Error;
-
-pub type Device = <back::Backend as hal::Backend>::Device;
-pub type Gpu = hal::adapter::Gpu<back::Backend>;
-pub type Buffer = <back::Backend as hal::Backend>::Buffer;
-pub type Memory = <back::Backend as hal::Backend>::Memory;
-pub type Swapchain = <back::Backend as hal::Backend>::Swapchain;
-pub type Surface = <back::Backend as hal::Backend>::Surface;
-pub type Semaphore = <back::Backend as hal::Backend>::Semaphore;
-pub type Fence = <back::Backend as hal::Backend>::Fence;
-pub type CommandPool = <back::Backend as hal::Backend>::CommandPool;
-pub type CommandBuffer = <back::Backend as hal::Backend>::CommandBuffer;
-pub type CommandQueue = <back::Backend as hal::Backend>::CommandQueue;
-pub type DescriptorSetLayout = <back::Backend as hal::Backend>::DescriptorSetLayout;
-pub type DescriptorSet = <back::Backend as hal::Backend>::DescriptorSet;
-pub type PipelineLayout = <back::Backend as hal::Backend>::PipelineLayout;
-pub type GraphicsPipeline = <back::Backend as hal::Backend>::GraphicsPipeline;
-pub type ShaderModule = <back::Backend as hal::Backend>::ShaderModule;
-pub type Sampler = <back::Backend as hal::Backend>::Sampler;
-pub type Image = <back::Backend as hal::Backend>::Image;
-pub type ImageView = <back::Backend as hal::Backend>::ImageView;
-pub type Framebuffer = <back::Backend as hal::Backend>::Framebuffer;
-pub type RenderPass = <back::Backend as hal::Backend>::RenderPass;
+pub use hal::prelude::*;
+
+pub type InstanceT = <back::Backend as hal::Backend>::Instance;
+pub type DeviceT = <back::Backend as hal::Backend>::Device;
+pub type BufferT = <back::Backend as hal::Backend>::Buffer;
+pub type MemoryT = <back::Backend as hal::Backend>::Memory;
+pub type SurfaceT = <back::Backend as hal::Backend>::Surface;
+pub type SemaphoreT = <back::Backend as hal::Backend>::Semaphore;
+pub type FenceT = <back::Backend as hal::Backend>::Fence;
+pub type CommandPoolT = <back::Backend as hal::Backend>::CommandPool;
+pub type CommandBufferT = <back::Backend as hal::Backend>::CommandBuffer;
+pub type QueueT = <back::Backend as hal::Backend>::Queue;
+pub type QueueFamilyT = <back::Backend as hal::Backend>::QueueFamily;
+pub type DescriptorSetLayoutT = <back::Backend as hal::Backend>::DescriptorSetLayout;
+pub type DescriptorSetT = <back::Backend as hal::Backend>::DescriptorSet;
+pub type PipelineLayoutT = <back::Backend as hal::Backend>::PipelineLayout;
+pub type GraphicsPipelineT = <back::Backend as hal::Backend>::GraphicsPipeline;
+pub type ShaderModuleT = <back::Backend as hal::Backend>::ShaderModule;
+pub type SamplerT = <back::Backend as hal::Backend>::Sampler;
+pub type ImageT = <back::Backend as hal::Backend>::Image;
+pub type ImageViewT = <back::Backend as hal::Backend>::ImageView;
+pub type FramebufferT = <back::Backend as hal::Backend>::Framebuffer;
+pub type RenderPassT = <back::Backend as hal::Backend>::RenderPass;
pub type Adapter = hal::adapter::Adapter<back::Backend>;
pub type QueueGroup = hal::queue::QueueGroup<back::Backend>;
@@ -32,45 +32,3 @@ pub type DynamicAllocator = rendy_memory::DynamicAllocator<back::Backend>;
pub type DynamicBlock = rendy_memory::DynamicBlock<back::Backend>;
pub type RDescriptorSet = rendy_descriptor::DescriptorSet<back::Backend>;
-
-#[derive(Error, Debug)]
-pub enum LockPoisoned {
- #[error("Device lock poisoned")]
- Device,
-
- #[error("Map lock poisoned")]
- Map,
-
- #[error("Other lock poisoned")]
- Other,
-}
-
-#[derive(Error, Debug)]
-pub enum HalErrorWrapper {
- #[error("Device Creation Error: {0}")]
- DeviceCreationError(#[from] hal::device::CreationError),
-
- #[error("Buffer Creation Error: {0}")]
- BufferCreationError(#[from] hal::buffer::CreationError),
-
- #[error("Image Creation Error: {0}")]
- ImageCreationError(#[from] hal::image::CreationError),
-
- #[error("View Error: {0}")]
- ImageViewError(#[from] hal::image::ViewError),
-
- #[error("Out of memory on {0}")]
- OutOfMemory(#[from] hal::device::OutOfMemory),
-
- #[error("Device Lost: {0}")]
- DeviceLost(#[from] hal::device::DeviceLost),
-
- #[error("Allocation Error: {0}")]
- AllocationError(#[from] hal::device::AllocationError),
-
- #[error("Bind Error: {0}")]
- BindError(#[from] hal::device::BindError),
-
- #[error("Map Error: {0}")]
- MapError(#[from] hal::device::MapError),
-}