aboutsummaryrefslogtreecommitdiff
path: root/stockton-render/src
diff options
context:
space:
mode:
Diffstat (limited to 'stockton-render/src')
-rw-r--r--stockton-render/src/draw/buffer.rs295
-rw-r--r--stockton-render/src/draw/context.rs71
-rw-r--r--stockton-render/src/draw/mod.rs3
-rw-r--r--stockton-render/src/draw/vertexlump.rs239
-rw-r--r--stockton-render/src/error.rs4
-rw-r--r--stockton-render/src/lib.rs2
6 files changed, 349 insertions, 265 deletions
diff --git a/stockton-render/src/draw/buffer.rs b/stockton-render/src/draw/buffer.rs
new file mode 100644
index 0000000..d29f857
--- /dev/null
+++ b/stockton-render/src/draw/buffer.rs
@@ -0,0 +1,295 @@
+// Copyright (C) 2019 Oscar Shrimpton
+
+// This program is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the Free
+// Software Foundation, either version 3 of the License, or (at your option)
+// any later version.
+
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+// more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this program. If not, see <http://www.gnu.org/licenses/>.
+
+use std::marker::PhantomData;
+use std::ops::{Index, IndexMut, Range};
+use std::convert::TryInto;
+use core::mem::{ManuallyDrop, size_of};
+use hal::memory::{Properties, Requirements, Segment};
+use hal::buffer::Usage;
+use hal::adapter::{Adapter, MemoryType, PhysicalDevice};
+use hal::device::Device;
+use hal::{MemoryTypeId, VertexCount, InstanceCount};
+use hal::Backend;
+use crate::error::CreationError;
+use super::RenderingContext;
+
+// TODO: Proper sizing of buffers
+const BUF_SIZE: u64 = 32;
+
+fn create_buffer(device: &mut <back::Backend as hal::Backend>::Device,
+ adapter: &Adapter<back::Backend>,
+ usage: Usage,
+ properties: Properties) -> Result<(<back::Backend as hal::Backend>::Buffer, <back::Backend as hal::Backend>::Memory), CreationError> {
+ let mut buffer = unsafe { device
+ .create_buffer(BUF_SIZE, usage) }
+ .map_err(|e| CreationError::BufferError (e))?;
+
+ let requirements = unsafe { device.get_buffer_requirements(&buffer) };
+ let memory_type_id = adapter.physical_device
+ .memory_properties().memory_types
+ .iter().enumerate()
+ .find(|&(id, memory_type)| {
+ requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(properties)
+ })
+ .map(|(id, _)| MemoryTypeId(id))
+ .ok_or(CreationError::BufferNoMemory)?;
+
+ let memory = unsafe {device
+ .allocate_memory(memory_type_id, requirements.size) }
+ .map_err(|_| CreationError::OutOfMemoryError)?;
+
+ unsafe { device
+ .bind_buffer_memory(&memory, 0, &mut buffer) }
+ .map_err(|_| CreationError::BufferNoMemory)?;
+
+ Ok((buffer, memory
+))
+}
+
+trait ModifiableBuffer: IndexMut<usize> {
+ fn commit<'a>(&'a self) -> &'a <back::Backend as hal::Backend>::Buffer;
+}
+
+pub struct StagedBuffer<'a> {
+ stagedBuffer: ManuallyDrop<<back::Backend as hal::Backend>::Buffer>,
+ stagedMemory: ManuallyDrop<<back::Backend as hal::Backend>::Memory>,
+ buffer: ManuallyDrop<<back::Backend as hal::Backend>::Buffer>,
+ memory: ManuallyDrop<<back::Backend as hal::Backend>::Memory>,
+ mappedStaged: &'a mut [f32],
+ stagedIsDirty: bool
+}
+
+
+impl<'a> ModifiableBuffer for StagedBuffer<'a> {
+ fn new(device: &mut <back::Backend as hal::Backend>::Device, adapter: &Adapter<back::Backend>, usage: Usage) -> Result<Self, CreationError> {
+
+ let (stagedBuffer, stagedMemory) = create_buffer(device, adapter, Usage::TRANSFER_SRC, Properties::CPU_VISIBLE)?;
+ let (buffer, memory) = create_buffer(device, adapter, Usage::TRANSFER_DST | usage, Properties::DEVICE_LOCAL)?;
+
+ // Map it somewhere and get a slice to that memory
+ let rawPtr = unsafe {
+ device.map_memory(&stagedMemory, Segment::ALL).unwrap() // TODO
+ };
+ let sliceSize: usize = (BUF_SIZE / 4).try_into().unwrap(); // size in f32s
+ let mappedStaged: &'a mut [f32] = std::slice::from_raw_parts_mut(rawPtr as *mut f32, sliceSize);
+
+ Ok(StagedBuffer {
+ stagedBuffer: ManuallyDrop::new(stagedBuffer),
+ stagedMemory: ManuallyDrop::new(stagedMemory),
+ buffer: ManuallyDrop::new(buffer),
+ memory: ManuallyDrop::new(memory),
+ mappedStaged: mappedStaged,
+ stagedIsDirty: false
+ })
+ }
+}
+
+impl<'a> Index<usize> for StagedBuffer<'a> {
+ type Output = f32;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.mappedStaged[index]
+ }
+}
+
+impl<'a> IndexMut<usize> for StagedBuffer<'a> {
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ self.stagedIsDirty = true;
+ &mut self.mappedStaged[index]
+ }
+}
+
+// trait VertexLump {
+// pub fn new(device: &mut <back::Backend as hal::Backend>::Device, adapter: &Adapter<back::Backend>) -> Result<Self, CreationError> {
+// }
+
+// pub(crate) struct VertexLump<T: Into<X>, X: Pod> {
+// pub (crate) buffer: ManuallyDrop<<back::Backend as hal::Backend>::Buffer>,
+// memory: ManuallyDrop<<back::Backend as hal::Backend>::Memory>,
+// requirements: Requirements,
+
+// unit_size_bytes: u64,
+// unit_size_verts: u64,
+// batch_size: u64,
+
+// num_batches: usize,
+
+
+// /// An instance is active if it has been assigned to
+// pub active_instances: Range<InstanceCount>,
+// pub active_verts: Range<VertexCount>,
+
+// active: bool,
+
+// _t: PhantomData<T>,
+// _x: PhantomData<X>
+// }
+
+// const BATCH_SIZE: u64 = 3;
+
+// impl<T: Into<X>, X: Pod> VertexLump<T, X> {
+// pub fn new(device: &mut <back::Backend as hal::Backend>::Device, adapter: &Adapter<back::Backend>) -> Result<VertexLump<T, X>, CreationError> {
+// let unit_size_bytes = size_of::<X>() as u64;
+// let unit_size_verts = unit_size_bytes / size_of::<f32>() as u64;
+
+// let mut buffer = unsafe { device
+// .create_buffer(BATCH_SIZE * unit_size_bytes, Usage::VERTEX) }
+
+// .map_err(|e| CreationError::BufferError (e))?;
+
+// let requirements = unsafe { device.get_buffer_requirements(&buffer) };
+// let memory_type_id = adapter.physical_device
+// .memory_properties().memory_types
+// .iter().enumerate()
+// .find(|&(id, memory_type)| {
+// requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE)
+// })
+// .map(|(id, _)| MemoryTypeId(id))
+// .ok_or(CreationError::BufferNoMemory)?;
+
+// let memory = unsafe {device
+// .allocate_memory(memory_type_id, requirements.size) }
+// .map_err(|_| CreationError::OutOfMemoryError)?;
+
+// unsafe { device
+// .bind_buffer_memory(&memory, 0, &mut buffer) }
+// .map_err(|_| CreationError::BufferNoMemory)?;
+
+// Ok(VertexLump {
+// buffer: ManuallyDrop::new(buffer),
+// memory: ManuallyDrop::new(memory),
+// requirements,
+// active_verts: 0..0,
+// active_instances: 0..0,
+// num_batches: 1,
+// unit_size_bytes,
+// unit_size_verts,
+// batch_size: BATCH_SIZE, // TODO
+// active: true,
+// _t: PhantomData,
+// _x: PhantomData
+// })
+// }
+
+// pub fn set_active_instances(&mut self, range: Range<InstanceCount>) {
+// let count: u64 = (range.end - range.start).into();
+// let size_verts: u32 = (count * self.unit_size_verts).try_into().unwrap();
+// self.active_verts = range.start * size_verts..range.end * size_verts;
+// self.active_instances = range;
+// }
+
+// pub fn add(&mut self, tri: T, ctx: &mut RenderingContext) -> Result<(), ()> {
+
+// // figure out where to put it
+// let idx: usize = (self.active_instances.end).try_into().unwrap();
+// let batch_size: usize = self.batch_size.try_into().unwrap();
+// let max_size: usize = self.num_batches * batch_size;
+
+// // make sure correct size
+// if idx >= max_size {
+// self.num_batches += 1;
+
+// debug!("Reallocating Vertex buffer to {} batches ({} instances)", self.num_batches, self.num_batches as u64 * self.batch_size);
+// // get new buffer
+// let (new_buffer, new_requirements, new_memory) = {
+// let mut buffer = ManuallyDrop::new(unsafe { ctx.device
+// .create_buffer(self.batch_size * self.unit_size_bytes * self.num_batches as u64, Usage::VERTEX) }
+// .map_err(|_| ())?
+// );
+// let requirements = unsafe { ctx.device.get_buffer_requirements(&buffer) };
+
+// let memory_type_id = ctx.adapter.physical_device
+// .memory_properties().memory_types
+// .iter().enumerate()
+// .find(|&(id, memory_type)| {
+// requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE)
+// })
+// .map(|(id, _)| MemoryTypeId(id))
+// .ok_or(())?;
+
+// let memory = ManuallyDrop::new(unsafe { ctx.device
+// .allocate_memory(memory_type_id, requirements.size) }
+// .map_err(|_| ())?);
+
+// unsafe { ctx.device
+// .bind_buffer_memory(&memory, 0, &mut buffer) }
+// .map_err(|_| ())?;
+
+// (buffer, requirements, memory)
+// };
+
+// // copy vertices
+// unsafe {
+// let copy_range = 0..self.requirements.size;
+
+// trace!("Copying {:?} from old buffer to new buffer", copy_range);
+
+// let reader = ctx.device.acquire_mapping_reader::<u8>(&*(self.memory), copy_range.clone())
+// .map_err(|_| ())?;
+// let mut writer = ctx.device.acquire_mapping_writer::<u8>(&new_memory, copy_range.clone())
+// .map_err(|_| ())?;
+
+// let copy_range: Range<usize> = 0..self.requirements.size.try_into().unwrap();
+// writer[copy_range.clone()].copy_from_slice(&reader[copy_range.clone()]);
+
+// ctx.device.release_mapping_reader(reader);
+// ctx.device.release_mapping_writer(writer).map_err(|_| ())?;
+// };
+
+// // destroy old buffer
+// self.deactivate(ctx);
+
+// // use new one
+// self.buffer = new_buffer;
+// self.requirements = new_requirements;
+// self.memory = new_memory;
+// self.active = true;
+
+// }
+
+// {
+// // acquire writer
+// let mut writer = self.writer(ctx)?;
+
+// // write to it
+// writer[idx] = tri.into();
+// }
+
+// // activate new triangle
+// let new_range = self.active_instances.start..self.active_instances.end + 1;
+// self.set_active_instances(new_range);
+
+// Ok(())
+// }
+
+// pub(crate) fn writer<'a>(&'a mut self, ctx: &'a mut RenderingContext) -> Result<VertexWriter<'a, X>, ()> {
+// let mapping_writer = unsafe { ctx.device
+// .acquire_mapping_writer(&*(self.memory), 0..self.requirements.size)
+// .map_err(|_| ())? };
+
+// Ok(VertexWriter {
+// mapping_writer: ManuallyDrop::new(mapping_writer),
+// ctx
+// })
+// }
+
+// pub(crate) fn deactivate(&mut self, ctx: &mut RenderingContext) {
+// unsafe { ctx.device.free_memory(ManuallyDrop::take(&mut self.memory)) };
+// unsafe { ctx.device.destroy_buffer(ManuallyDrop::take(&mut self.buffer)) };
+// self.active = false;
+// }
+// }
+
diff --git a/stockton-render/src/draw/context.rs b/stockton-render/src/draw/context.rs
index bc27395..53f92cf 100644
--- a/stockton-render/src/draw/context.rs
+++ b/stockton-render/src/draw/context.rs
@@ -16,7 +16,6 @@
//! Deals with all the Vulkan/HAL details.
use crate::error as error;
use crate::error::{CreationError, FrameError};
-use super::VertexLump;
use std::mem::{ManuallyDrop, size_of};
use std::convert::TryInto;
@@ -28,6 +27,9 @@ use arrayvec::ArrayVec;
use hal::*;
use hal::device::Device;
use hal::format::{AsFormat, Rgba8Srgb as ColorFormat, Format, ChannelType};
+use hal::pool::CommandPool;
+use hal::queue::{QueueGroup, Submission};
+use hal::window::SwapchainConfig;
use hal::Instance as InstanceTrait;
@@ -35,6 +37,9 @@ use hal::Instance as InstanceTrait;
use back::glutin as glutin;
use stockton_types::Vector2;
+use super::buffer::StagedBuffer;
+
+type ModifiableBuffer<'a> = StagedBuffer<'a>;
const ENTRY_NAME: &str = "main";
const COLOR_RANGE: image::SubresourceRange = image::SubresourceRange {
@@ -74,7 +79,7 @@ type Instance = ();
/// Contains all the hal related stuff.
/// In the end, this takes some 3D points and puts it on the screen.
// TODO: Settings for clear colour, buffer sizes, etc
-pub struct RenderingContext {
+pub struct RenderingContext<'a> {
pub events_loop: winit::EventsLoop,
surface: <back::Backend as hal::Backend>::Surface,
@@ -97,20 +102,20 @@ pub struct RenderingContext {
present_complete: Vec<<back::Backend as hal::Backend>::Fence>,
frames_in_flight: usize,
- cmd_pools: Vec<ManuallyDrop<CommandPool<back::Backend, Graphics>>>,
- cmd_buffers: Vec<command::CommandBuffer<back::Backend, Graphics, command::MultiShot>>,
- queue_group: QueueGroup<back::Backend, Graphics>,
+ cmd_pools: Vec<ManuallyDrop<<back::Backend as hal::Backend>::CommandPool>>,
+ cmd_buffers: Vec<<back::Backend as hal::Backend>::CommandBuffer>,
+ queue_group: QueueGroup<back::Backend>,
- map_verts: VertexLump<Tri2, [f32; 15]>,
+ vert_buffer: ModifiableBuffer<'a>,
+ index_buffer: ModifiableBuffer<'a>,
descriptor_set_layouts: <back::Backend as hal::Backend>::DescriptorSetLayout,
pipeline_layout: ManuallyDrop<<back::Backend as hal::Backend>::PipelineLayout>,
pipeline: ManuallyDrop<<back::Backend as hal::Backend>::GraphicsPipeline>,
pub (crate) adapter: adapter::Adapter<back::Backend>
-
}
-impl RenderingContext {
+impl<'a> RenderingContext<'a> {
/// Create a new RenderingContext for the given window.
pub fn new() -> Result<Self, CreationError> {
let events_loop = EventsLoop::new();
@@ -148,12 +153,30 @@ impl RenderingContext {
let mut adapter = adapters.remove(0);
// Device & Queue group
- let (mut device, queue_group) = adapter
- .open_with::<_, Graphics>(1, |family| surface.supports_queue_family(family))
- .map_err(|e| CreationError::DeviceError (e))?;
+ let (mut device, queue_group) = {
+ // TODO
+ let family = adapter
+ .queue_families
+ .iter()
+ .find(|family| {
+ surface.supports_queue_family(family) && family.queue_type().supports_graphics()
+ })
+ .unwrap();
+
+ let mut gpu = unsafe {
+ adapter
+ .physical_device
+ .open(&[(family, &[1.0])], hal::Features::empty())
+ .unwrap()
+ };
+
+ (gpu.queue_groups.pop().unwrap(), gpu.device)
+ };
// Swapchain stuff
let (format, viewport, extent, swapchain, backbuffer) = {
+ use hal::window::{PresentMode, CompositeAlphaMode};
+
let (caps, formats, present_modes) = surface.compatibility(&mut adapter.physical_device);
let format = formats.map_or(Format::Rgba8Srgb, |formats| {
@@ -164,15 +187,14 @@ impl RenderingContext {
});
let present_mode = {
- use hal::window::PresentMode::*;
- [Mailbox, Fifo, Relaxed, Immediate]
+ [PresentMode::Mailbox, PresentMode::Fifo, PresentMode::Relaxed, PresentMode::Immediate]
.iter()
.cloned()
.find(|pm| present_modes.contains(pm))
.ok_or(CreationError::BadSurface)?
};
let composite_alpha = {
- [CompositeAlpha::OPAQUE, CompositeAlpha::INHERIT, CompositeAlpha::PREMULTIPLIED, CompositeAlpha::POSTMULTIPLIED]
+ [CompositeAlphaMode::OPAQUE, CompositeAlphaMode::INHERIT, CompositeAlphaMode::PREMULTIPLIED, CompositeAlphaMode::POSTMULTIPLIED]
.iter()
.cloned()
.find(|ca| caps.composite_alpha.contains(*ca))
@@ -243,7 +265,7 @@ impl RenderingContext {
};
let dependency = SubpassDependency {
- passes: SubpassRef::External..SubpassRef::Pass(0),
+ passes: None..0,
stages: PipelineStage::COLOR_ATTACHMENT_OUTPUT..PipelineStage::COLOR_ATTACHMENT_OUTPUT,
accesses: Access::empty()
..(Access::COLOR_ATTACHMENT_READ | Access::COLOR_ATTACHMENT_WRITE)
@@ -259,8 +281,14 @@ impl RenderingContext {
main_pass: &renderpass
};
- // Vertex buffer
- let map_verts = VertexLump::new(&mut device, &adapter)?;
+ // Vertex and index buffers
+ let (vert_buffer, index_buffer) = {
+ use hal::buffer::Usage;
+ (
+ ModifiableBuffer::new(&mut device, &adapter, Usage::VERTEX | Usage::TRANSFER_DST),
+ ModifiableBuffer::new(&mut device, &adapter, Usage::TRANSFER_SRC)
+ )
+ };
// Command Pools, Buffers, imageviews, framebuffers & Sync objects
let frames_in_flight = backbuffer.len();
@@ -278,7 +306,7 @@ impl RenderingContext {
device.create_command_pool_typed(&queue_group, pool::CommandPoolCreateFlags::empty())
}.map_err(|_| CreationError::OutOfMemoryError)?));
- cmd_buffers.push((*cmd_pools[i]).acquire_command_buffer::<command::MultiShot>());
+ cmd_buffers.push((*cmd_pools[i]).allocate_one(hal::command::Level::Primary));
get_image.push(device.create_semaphore().map_err(|_| CreationError::SyncObjectError)?);
render_complete.push(device.create_semaphore().map_err(|_| CreationError::SyncObjectError)?);
present_complete.push(device.create_fence(true).map_err(|_| CreationError::SyncObjectError)?);
@@ -332,7 +360,8 @@ impl RenderingContext {
pipeline_layout: ManuallyDrop::new(pipeline_layout),
pipeline: ManuallyDrop::new(pipeline),
- map_verts,
+ vert_buffer,
+ index_buffer,
adapter
})
@@ -660,7 +689,7 @@ impl RenderingContext {
}
}
-impl core::ops::Drop for RenderingContext {
+impl<'a> core::ops::Drop for RenderingContext<'a> {
fn drop(&mut self) {
// TODO: Probably missing some destroy stuff
self.device.wait_idle().unwrap();
@@ -687,7 +716,7 @@ impl core::ops::Drop for RenderingContext {
use core::ptr::read;
for cmd_pool in self.cmd_pools.drain(..) {
self.device.destroy_command_pool(
- ManuallyDrop::into_inner(cmd_pool).into_raw(),
+ ManuallyDrop::into_inner(cmd_pool),
);
}
self.device
diff --git a/stockton-render/src/draw/mod.rs b/stockton-render/src/draw/mod.rs
index 0a4c17a..c506f7a 100644
--- a/stockton-render/src/draw/mod.rs
+++ b/stockton-render/src/draw/mod.rs
@@ -16,8 +16,7 @@
//! Given 3D points and some camera information, renders to the screen.
mod context;
-mod vertexlump;
+mod buffer;
pub use self::context::RenderingContext;
pub use self::context::Tri2;
-pub(crate) use self::vertexlump::VertexLump; \ No newline at end of file
diff --git a/stockton-render/src/draw/vertexlump.rs b/stockton-render/src/draw/vertexlump.rs
deleted file mode 100644
index ad607ef..0000000
--- a/stockton-render/src/draw/vertexlump.rs
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright (C) 2019 Oscar Shrimpton
-
-// This program is free software: you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation, either version 3 of the License, or (at your option)
-// any later version.
-
-// This program is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details.
-
-// You should have received a copy of the GNU General Public License along
-// with this program. If not, see <http://www.gnu.org/licenses/>.
-
-use std::marker::PhantomData;
-use std::ops::{Index, IndexMut, Range};
-use std::convert::TryInto;
-use core::mem::{ManuallyDrop, size_of};
-use hal::memory::{Pod, Properties, Requirements};
-use hal::buffer::Usage;
-use hal::adapter::MemoryTypeId;
-use hal::{VertexCount, InstanceCount, Adapter, Device, PhysicalDevice, mapping};
-use hal::Backend;
-use crate::error::CreationError;
-use super::RenderingContext;
-
-pub(crate) struct VertexLump<T: Into<X>, X: Pod> {
- pub (crate) buffer: ManuallyDrop<<back::Backend as hal::Backend>::Buffer>,
- memory: ManuallyDrop<<back::Backend as hal::Backend>::Memory>,
- requirements: Requirements,
-
- unit_size_bytes: u64,
- unit_size_verts: u64,
- batch_size: u64,
-
- num_batches: usize,
-
-
- /// An instance is active if it has been assigned to
- pub active_instances: Range<InstanceCount>,
- pub active_verts: Range<VertexCount>,
-
- active: bool,
-
- _t: PhantomData<T>,
- _x: PhantomData<X>
-}
-
-const BATCH_SIZE: u64 = 3;
-
-impl<T: Into<X>, X: Pod> VertexLump<T, X> {
- pub fn new(device: &mut <back::Backend as hal::Backend>::Device, adapter: &Adapter<back::Backend>) -> Result<VertexLump<T, X>, CreationError> {
- let unit_size_bytes = size_of::<X>() as u64;
- let unit_size_verts = unit_size_bytes / size_of::<f32>() as u64;
-
- let mut buffer = unsafe { device
- .create_buffer(BATCH_SIZE * unit_size_bytes, Usage::VERTEX) }
-
- .map_err(|e| CreationError::BufferError (e))?;
-
- let requirements = unsafe { device.get_buffer_requirements(&buffer) };
- let memory_type_id = adapter.physical_device
- .memory_properties().memory_types
- .iter().enumerate()
- .find(|&(id, memory_type)| {
- requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE)
- })
- .map(|(id, _)| MemoryTypeId(id))
- .ok_or(CreationError::BufferNoMemory)?;
-
- let memory = unsafe {device
- .allocate_memory(memory_type_id, requirements.size) }
- .map_err(|_| CreationError::OutOfMemoryError)?;
-
- unsafe { device
- .bind_buffer_memory(&memory, 0, &mut buffer) }
- .map_err(|_| CreationError::BufferNoMemory)?;
-
- Ok(VertexLump {
- buffer: ManuallyDrop::new(buffer),
- memory: ManuallyDrop::new(memory),
- requirements,
- active_verts: 0..0,
- active_instances: 0..0,
- num_batches: 1,
- unit_size_bytes,
- unit_size_verts,
- batch_size: BATCH_SIZE, // TODO
- active: true,
- _t: PhantomData,
- _x: PhantomData
- })
- }
-
- pub fn set_active_instances(&mut self, range: Range<InstanceCount>) {
- let count: u64 = (range.end - range.start).into();
- let size_verts: u32 = (count * self.unit_size_verts).try_into().unwrap();
- self.active_verts = range.start * size_verts..range.end * size_verts;
- self.active_instances = range;
- }
-
- pub fn add(&mut self, tri: T, ctx: &mut RenderingContext) -> Result<(), ()> {
-
- // figure out where to put it
- let idx: usize = (self.active_instances.end).try_into().unwrap();
- let batch_size: usize = self.batch_size.try_into().unwrap();
- let max_size: usize = self.num_batches * batch_size;
-
- // make sure correct size
- if idx >= max_size {
- self.num_batches += 1;
-
- debug!("Reallocating Vertex buffer to {} batches ({} instances)", self.num_batches, self.num_batches as u64 * self.batch_size);
- // get new buffer
- let (new_buffer, new_requirements, new_memory) = {
- let mut buffer = ManuallyDrop::new(unsafe { ctx.device
- .create_buffer(self.batch_size * self.unit_size_bytes * self.num_batches as u64, Usage::VERTEX) }
- .map_err(|_| ())?
- );
- let requirements = unsafe { ctx.device.get_buffer_requirements(&buffer) };
-
- let memory_type_id = ctx.adapter.physical_device
- .memory_properties().memory_types
- .iter().enumerate()
- .find(|&(id, memory_type)| {
- requirements.type_mask & (1 << id) != 0 && memory_type.properties.contains(Properties::CPU_VISIBLE)
- })
- .map(|(id, _)| MemoryTypeId(id))
- .ok_or(())?;
-
- let memory = ManuallyDrop::new(unsafe { ctx.device
- .allocate_memory(memory_type_id, requirements.size) }
- .map_err(|_| ())?);
-
- unsafe { ctx.device
- .bind_buffer_memory(&memory, 0, &mut buffer) }
- .map_err(|_| ())?;
-
- (buffer, requirements, memory)
- };
-
- // copy vertices
- unsafe {
- let copy_range = 0..self.requirements.size;
-
- trace!("Copying {:?} from old buffer to new buffer", copy_range);
-
- let reader = ctx.device.acquire_mapping_reader::<u8>(&*(self.memory), copy_range.clone())
- .map_err(|_| ())?;
- let mut writer = ctx.device.acquire_mapping_writer::<u8>(&new_memory, copy_range.clone())
- .map_err(|_| ())?;
-
- let copy_range: Range<usize> = 0..self.requirements.size.try_into().unwrap();
- writer[copy_range.clone()].copy_from_slice(&reader[copy_range.clone()]);
-
- ctx.device.release_mapping_reader(reader);
- ctx.device.release_mapping_writer(writer).map_err(|_| ())?;
- };
-
- // destroy old buffer
- self.deactivate(ctx);
-
- // use new one
- self.buffer = new_buffer;
- self.requirements = new_requirements;
- self.memory = new_memory;
- self.active = true;
-
- }
-
- {
- // acquire writer
- let mut writer = self.writer(ctx)?;
-
- // write to it
- writer[idx] = tri.into();
- }
-
- // activate new triangle
- let new_range = self.active_instances.start..self.active_instances.end + 1;
- self.set_active_instances(new_range);
-
- Ok(())
- }
-
- pub(crate) fn writer<'a>(&'a mut self, ctx: &'a mut RenderingContext) -> Result<VertexWriter<'a, X>, ()> {
- let mapping_writer = unsafe { ctx.device
- .acquire_mapping_writer(&*(self.memory), 0..self.requirements.size)
- .map_err(|_| ())? };
-
- Ok(VertexWriter {
- mapping_writer: ManuallyDrop::new(mapping_writer),
- ctx
- })
- }
-
- pub(crate) fn deactivate(&mut self, ctx: &mut RenderingContext) {
- unsafe { ctx.device.free_memory(ManuallyDrop::take(&mut self.memory)) };
- unsafe { ctx.device.destroy_buffer(ManuallyDrop::take(&mut self.buffer)) };
- self.active = false;
- }
-}
-
-pub struct VertexWriter<'a, X: Pod> {
- mapping_writer: ManuallyDrop<mapping::Writer<'a, back::Backend, X>>,
- ctx: &'a mut RenderingContext
-}
-
-impl<'a, X: Pod> Drop for VertexWriter<'a, X> {
- fn drop(&mut self) {
- unsafe {
- self.ctx.device.release_mapping_writer(ManuallyDrop::take(&mut self.mapping_writer))
- }.unwrap();
- }
-}
-
-impl<'a, X: Pod> Index<usize> for VertexWriter<'a, X> {
- type Output = X;
-
- fn index(&self, index: usize) -> &Self::Output {
- &self.mapping_writer[index]
- }
-}
-
-impl<'a, X: Pod> IndexMut<usize> for VertexWriter<'a, X> {
- fn index_mut(&mut self, index: usize) -> &mut Self::Output {
- &mut self.mapping_writer[index]
- }
-}
-
-
-impl<T: Into<X>, X: Pod> Drop for VertexLump<T, X> {
- fn drop(&mut self) {
- if self.active {
- panic!("VertexLump dropped without being deactivated");
- }
- }
-} \ No newline at end of file
diff --git a/stockton-render/src/error.rs b/stockton-render/src/error.rs
index 7b2b3c0..485d1e7 100644
--- a/stockton-render/src/error.rs
+++ b/stockton-render/src/error.rs
@@ -27,7 +27,7 @@ pub enum CreationError {
WindowError,
BadSurface,
- DeviceError (hal::error::DeviceCreationError),
+ DeviceError (hal::device::CreationError),
OutOfMemoryError,
@@ -42,7 +42,7 @@ pub enum CreationError {
BufferNoMemory,
SwapchainError (hal::window::CreationError),
- ImageViewError (hal::image::ViewError)
+ ImageViewError (hal::image::ViewCreationError)
}
/// An error encountered when rendering.
diff --git a/stockton-render/src/lib.rs b/stockton-render/src/lib.rs
index 31cf57e..e51a582 100644
--- a/stockton-render/src/lib.rs
+++ b/stockton-render/src/lib.rs
@@ -60,7 +60,7 @@ use std::sync::{Arc, RwLock};
pub struct Renderer<'a> {
world: Arc<RwLock<World<'a>>>,
- pub context: RenderingContext
+ pub context: RenderingContext<'a>
}