1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
|
use std::{ops::Range, ptr::NonNull};
use {
crate::{
allocator::{Allocator, Kind},
block::Block,
mapping::{mapped_sub_range, MappedRange},
memory::*,
util::*,
},
gfx_hal::{device::Device as _, Backend},
};
/// Memory block allocated from `DedicatedAllocator`
#[derive(Debug)]
pub struct DedicatedBlock<B: Backend> {
memory: Memory<B>,
mapping: Option<(NonNull<u8>, Range<u64>)>,
}
unsafe impl<B> Send for DedicatedBlock<B> where B: Backend {}
unsafe impl<B> Sync for DedicatedBlock<B> where B: Backend {}
impl<B> DedicatedBlock<B>
where
B: Backend,
{
/// Get inner memory.
/// Panics if mapped.
pub fn unwrap_memory(self) -> Memory<B> {
assert!(self.mapping.is_none());
self.memory
}
/// Make unmapped block.
pub fn from_memory(memory: Memory<B>) -> Self {
DedicatedBlock {
memory,
mapping: None,
}
}
}
impl<B> Block<B> for DedicatedBlock<B>
where
B: Backend,
{
#[inline]
fn properties(&self) -> gfx_hal::memory::Properties {
self.memory.properties()
}
#[inline]
fn memory(&self) -> &B::Memory {
self.memory.raw()
}
#[inline]
fn range(&self) -> Range<u64> {
0..self.memory.size()
}
fn map<'a>(
&'a mut self,
device: &B::Device,
range: Range<u64>,
) -> Result<MappedRange<'a, B>, gfx_hal::device::MapError> {
assert!(
range.start < range.end,
"Memory mapping region must have valid size"
);
if !self.memory.host_visible() {
//TODO: invalid access error
return Err(gfx_hal::device::MapError::MappingFailed);
}
let requested_range = range.clone();
let mapping_range = if !self.memory.host_coherent() {
align_range(range, self.memory.non_coherent_atom_size())
} else {
range
};
unsafe {
if let Some(ptr) = self
.mapping
.clone()
.and_then(|(ptr, range)| mapped_sub_range(ptr, range, mapping_range.clone()))
{
Ok(MappedRange::from_raw(
&self.memory,
ptr,
mapping_range,
requested_range,
))
} else {
self.unmap(device);
let ptr = device.map_memory(
self.memory.raw_mut(),
gfx_hal::memory::Segment {
offset: mapping_range.start,
size: Some(mapping_range.end - mapping_range.start),
},
)?;
let ptr = NonNull::new(ptr).expect("Memory mapping shouldn't return nullptr");
let mapping =
MappedRange::from_raw(&self.memory, ptr, mapping_range, requested_range);
self.mapping = Some((mapping.ptr(), mapping.range()));
Ok(mapping)
}
}
}
fn unmap(&mut self, device: &B::Device) {
if self.mapping.take().is_some() {
unsafe {
// trace!("Unmap memory: {:#?}", self.memory);
device.unmap_memory(self.memory.raw_mut());
}
}
}
}
/// Dedicated memory allocator that uses memory object per allocation requested.
///
/// This allocator suites best huge allocations.
/// From 32 MiB when GPU has 4-8 GiB memory total.
///
/// `Heaps` use this allocator when none of sub-allocators bound to the memory type
/// can handle size required.
/// TODO: Check if resource prefers dedicated memory.
#[derive(Debug)]
pub struct DedicatedAllocator {
memory_type: gfx_hal::MemoryTypeId,
memory_properties: gfx_hal::memory::Properties,
non_coherent_atom_size: u64,
used: u64,
}
impl DedicatedAllocator {
/// Get properties required by the allocator.
pub fn properties_required() -> gfx_hal::memory::Properties {
gfx_hal::memory::Properties::empty()
}
/// Create new `LinearAllocator`
/// for `memory_type` with `memory_properties` specified
pub fn new(
memory_type: gfx_hal::MemoryTypeId,
memory_properties: gfx_hal::memory::Properties,
non_coherent_atom_size: u64,
) -> Self {
DedicatedAllocator {
memory_type,
memory_properties,
non_coherent_atom_size,
used: 0,
}
}
}
impl<B> Allocator<B> for DedicatedAllocator
where
B: Backend,
{
type Block = DedicatedBlock<B>;
fn kind() -> Kind {
Kind::Dedicated
}
#[inline]
fn alloc(
&mut self,
device: &B::Device,
size: u64,
_align: u64,
) -> Result<(DedicatedBlock<B>, u64), gfx_hal::device::AllocationError> {
let size = if is_non_coherent_visible(self.memory_properties) {
align_size(size, self.non_coherent_atom_size)
} else {
size
};
let memory = unsafe {
Memory::from_raw(
device.allocate_memory(self.memory_type, size)?,
size,
self.memory_properties,
self.non_coherent_atom_size,
)
};
self.used += size;
Ok((DedicatedBlock::from_memory(memory), size))
}
#[inline]
fn free(&mut self, device: &B::Device, mut block: DedicatedBlock<B>) -> u64 {
block.unmap(device);
let size = block.memory.size();
self.used -= size;
unsafe {
device.free_memory(block.memory.into_raw());
}
size
}
}
impl Drop for DedicatedAllocator {
fn drop(&mut self) {
if self.used > 0 {
log::error!("Not all allocation from DedicatedAllocator was freed");
}
}
}
|