/*
* Copyright (C) Oscar Shrimpton 2020
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see .
*/
//! Deals with all the Vulkan/HAL details.
//! In the end, this takes in a depth-sorted list of faces and a map file and renders them.
//! You'll need something else to actually find/sort the faces though.
use std::{convert::TryInto, mem::ManuallyDrop, ops::Deref};
use arrayvec::ArrayVec;
use hal::{pool::CommandPoolCreateFlags, prelude::*};
use log::debug;
use na::Mat4;
use winit::window::Window;
use stockton_levels::prelude::*;
use stockton_levels::traits::faces::FaceType;
use stockton_types::{Vector2, Vector3};
use super::{
buffer::ModifiableBuffer,
draw_buffers::{DrawBuffers, INITIAL_INDEX_SIZE, INITIAL_VERT_SIZE},
pipeline::CompletePipeline,
target::{SwapchainProperties, TargetChain},
texture::TextureStore,
};
use crate::{error, types::*};
/// Represents a point of a triangle, including UV and texture information.
#[derive(Debug, Clone, Copy)]
pub struct UVPoint(pub Vector3, pub i32, pub Vector2);
/// Contains all the hal related stuff.
/// In the end, this takes in a depth-sorted list of faces and a map file and renders them.
// TODO: Settings for clear colour, buffer sizes, etc
pub struct RenderingContext<'a> {
// Parents for most of these things
/// Vulkan Instance
instance: ManuallyDrop,
/// Device we're using
device: ManuallyDrop,
/// Adapter we're using
adapter: Adapter,
// Render destination
/// Surface to draw to
surface: ManuallyDrop,
/// Swapchain and stuff
pub(crate) target_chain: ManuallyDrop,
/// Graphics pipeline and associated objects
pipeline: ManuallyDrop,
// Command pool and buffers
/// The command pool used for our buffers
cmd_pool: ManuallyDrop,
/// The queue group our buffers belong to
queue_group: QueueGroup,
/// Texture store
texture_store: ManuallyDrop,
/// Buffers used for drawing
draw_buffers: ManuallyDrop>,
/// View projection matrix
pub(crate) vp_matrix: Mat4,
}
impl<'a> RenderingContext<'a> {
/// Create a new RenderingContext for the given window.
pub fn new(window: &Window, file: &T) -> Result {
// Create surface
let (instance, mut surface, mut adapters) = unsafe {
use hal::Instance;
let instance = back::Instance::create("stockton", 1)
.map_err(|_| error::CreationError::WindowError)?;
let surface = instance
.create_surface(window)
.map_err(|_| error::CreationError::WindowError)?;
let adapters = instance.enumerate_adapters();
(instance, surface, adapters)
};
// TODO: Properly figure out which adapter to use
let mut adapter = adapters.remove(0);
// Device & Queue group
let (mut device, mut queue_group) = {
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::empty())
.unwrap()
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
// Figure out what our swapchain will look like
let swapchain_properties = SwapchainProperties::find_best(&adapter, &surface)
.map_err(|_| error::CreationError::BadSurface)?;
debug!(
"Detected following swapchain properties: {:?}",
swapchain_properties
);
// Command pool
let mut cmd_pool = unsafe {
device.create_command_pool(queue_group.family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
}
.map_err(|_| error::CreationError::OutOfMemoryError)?;
// Vertex and index buffers
let draw_buffers = DrawBuffers::new(&mut device, &adapter)?;
// Texture store
let texture_store = TextureStore::new(
&mut device,
&mut adapter,
&mut queue_group.queues[0],
&mut cmd_pool,
file,
)?;
let mut descriptor_set_layouts: ArrayVec<[_; 2]> = ArrayVec::new();
descriptor_set_layouts.push(texture_store.descriptor_set_layout.deref());
// Graphics pipeline
let pipeline = CompletePipeline::new(
&mut device,
swapchain_properties.extent,
&swapchain_properties,
descriptor_set_layouts,
)?;
// Swapchain and associated resources
let target_chain = TargetChain::new(
&mut device,
&adapter,
&mut surface,
&pipeline,
&mut cmd_pool,
swapchain_properties,
None,
)
.map_err(error::CreationError::TargetChainCreationError)?;
Ok(RenderingContext {
instance: ManuallyDrop::new(instance),
surface: ManuallyDrop::new(surface),
device: ManuallyDrop::new(device),
adapter,
queue_group,
target_chain: ManuallyDrop::new(target_chain),
cmd_pool: ManuallyDrop::new(cmd_pool),
pipeline: ManuallyDrop::new(pipeline),
texture_store: ManuallyDrop::new(texture_store),
draw_buffers: ManuallyDrop::new(draw_buffers),
vp_matrix: Mat4::identity(),
})
}
/// If this function fails the whole context is probably dead
/// # Safety
/// The context must not be used while this is being called
pub unsafe fn handle_surface_change(&mut self) -> Result<(), error::CreationError> {
self.device.wait_idle().unwrap();
let properties = SwapchainProperties::find_best(&self.adapter, &self.surface)
.map_err(|_| error::CreationError::BadSurface)?;
use core::ptr::read;
// Graphics pipeline
// TODO: Recycle
ManuallyDrop::into_inner(read(&self.pipeline)).deactivate(&mut self.device);
self.pipeline = ManuallyDrop::new({
let mut descriptor_set_layouts: ArrayVec<[_; 2]> = ArrayVec::new();
descriptor_set_layouts.push(self.texture_store.descriptor_set_layout.deref());
CompletePipeline::new(
&mut self.device,
properties.extent,
&properties,
descriptor_set_layouts,
)?
});
let old_swapchain = ManuallyDrop::into_inner(read(&self.target_chain))
.deactivate_with_recyling(&mut self.device, &mut self.cmd_pool);
self.target_chain = ManuallyDrop::new(
TargetChain::new(
&mut self.device,
&self.adapter,
&mut self.surface,
&self.pipeline,
&mut self.cmd_pool,
properties,
Some(old_swapchain),
)
.map_err(error::CreationError::TargetChainCreationError)?,
);
Ok(())
}
/// Draw all vertices in the buffer
pub fn draw_vertices>(
&mut self,
file: &M,
faces: &[u32],
) -> Result<(), &'static str> {
// Prepare command buffer
let cmd_buffer = self.target_chain.prep_next_target(
&mut self.device,
&mut self.draw_buffers,
&self.pipeline,
&self.vp_matrix,
)?;
// Iterate over faces, copying them in and drawing groups that use the same texture chunk all at once.
let mut current_chunk = file.get_face(0).texture_idx as usize / 8;
let mut chunk_start = 0;
let mut curr_vert_idx: usize = 0;
let mut curr_idx_idx: usize = 0;
for face in faces.iter().map(|idx| file.get_face(*idx)) {
if current_chunk != face.texture_idx as usize / 8 {
// Last index was last of group, so draw it all.
let mut descriptor_sets: ArrayVec<[_; 1]> = ArrayVec::new();
descriptor_sets.push(self.texture_store.get_chunk_descriptor_set(current_chunk));
unsafe {
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline.pipeline_layout,
0,
descriptor_sets,
&[],
);
cmd_buffer.draw_indexed(
chunk_start as u32 * 3..(curr_idx_idx as u32 * 3) + 1,
0,
0..1,
);
}
// Next group of same-chunked faces starts here.
chunk_start = curr_idx_idx;
current_chunk = face.texture_idx as usize / 8;
}
if face.face_type == FaceType::Polygon || face.face_type == FaceType::Mesh {
// 2 layers of indirection
let base = face.vertices_idx.start;
for idx in face.meshverts_idx.clone().step_by(3) {
let start_idx: u16 = curr_vert_idx.try_into().unwrap();
for idx2 in idx..idx + 3 {
let vert = &file.resolve_meshvert(idx2 as u32, base);
let uv = Vector2::new(vert.tex.u[0], vert.tex.v[0]);
let uvp = UVPoint(vert.position, face.texture_idx.try_into().unwrap(), uv);
self.draw_buffers.vertex_buffer[curr_vert_idx] = uvp;
curr_vert_idx += 1;
}
self.draw_buffers.index_buffer[curr_idx_idx] =
(start_idx, start_idx + 1, start_idx + 2);
curr_idx_idx += 1;
if curr_vert_idx >= INITIAL_VERT_SIZE.try_into().unwrap()
|| curr_idx_idx >= INITIAL_INDEX_SIZE.try_into().unwrap()
{
println!("out of vertex buffer space!");
break;
}
}
} else {
// TODO: Other types of faces
}
if curr_vert_idx >= INITIAL_VERT_SIZE.try_into().unwrap()
|| curr_idx_idx >= INITIAL_INDEX_SIZE.try_into().unwrap()
{
println!("out of vertex buffer space!");
break;
}
}
// Draw the final group of chunks
let mut descriptor_sets: ArrayVec<[_; 1]> = ArrayVec::new();
descriptor_sets.push(self.texture_store.get_chunk_descriptor_set(current_chunk));
unsafe {
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline.pipeline_layout,
0,
descriptor_sets,
&[],
);
cmd_buffer.draw_indexed(
chunk_start as u32 * 3..(curr_idx_idx as u32 * 3) + 1,
0,
0..1,
);
}
// Update our buffers before we actually start drawing
self.draw_buffers.vertex_buffer.commit(
&self.device,
&mut self.queue_group.queues[0],
&mut self.cmd_pool,
);
self.draw_buffers.index_buffer.commit(
&self.device,
&mut self.queue_group.queues[0],
&mut self.cmd_pool,
);
// Send commands off to GPU
self.target_chain
.finish_and_submit_target(&mut self.queue_group.queues[0])?;
Ok(())
}
}
impl<'a> core::ops::Drop for RenderingContext<'a> {
fn drop(&mut self) {
self.device.wait_idle().unwrap();
unsafe {
use core::ptr::read;
ManuallyDrop::into_inner(read(&self.draw_buffers)).deactivate(&mut self.device);
ManuallyDrop::into_inner(read(&self.texture_store)).deactivate(&mut self.device);
ManuallyDrop::into_inner(read(&self.target_chain))
.deactivate(&mut self.device, &mut self.cmd_pool);
self.device
.destroy_command_pool(ManuallyDrop::into_inner(read(&self.cmd_pool)));
ManuallyDrop::into_inner(read(&self.pipeline)).deactivate(&mut self.device);
self.instance
.destroy_surface(ManuallyDrop::into_inner(read(&self.surface)));
ManuallyDrop::drop(&mut self.device);
}
}
}