1 use hal::{buffer, format, image, memory, pass, pso};
2 use range_alloc::RangeAllocator;
3 
4 use parking_lot::RwLock;
5 use winapi::{
6     shared::{dxgiformat::DXGI_FORMAT, minwindef::UINT},
7     um::d3d12,
8 };
9 
10 use std::{collections::BTreeMap, fmt, ops::Range, slice, sync::Arc};
11 
12 use crate::{
13     descriptors_cpu::{Handle, MultiCopyAccumulator},
14     root_constants::RootConstant,
15     Backend, DescriptorIndex, MAX_VERTEX_BUFFERS,
16 };
17 
18 // ShaderModule is either a precompiled if the source comes from HLSL or
19 // the SPIR-V module doesn't contain specialization constants or push constants
20 // because they need to be adjusted on pipeline creation.
21 #[derive(Debug, Hash)]
22 pub enum ShaderModule {
23     Compiled(BTreeMap<String, native::Blob>),
24     Spirv(Vec<u32>),
25 }
26 unsafe impl Send for ShaderModule {}
27 unsafe impl Sync for ShaderModule {}
28 
29 #[derive(Clone, Debug, Hash)]
30 pub struct BarrierDesc {
31     pub(crate) attachment_id: pass::AttachmentId,
32     pub(crate) states: Range<d3d12::D3D12_RESOURCE_STATES>,
33     pub(crate) flags: d3d12::D3D12_RESOURCE_BARRIER_FLAGS,
34 }
35 
36 impl BarrierDesc {
new( attachment_id: pass::AttachmentId, states: Range<d3d12::D3D12_RESOURCE_STATES>, ) -> Self37     pub(crate) fn new(
38         attachment_id: pass::AttachmentId,
39         states: Range<d3d12::D3D12_RESOURCE_STATES>,
40     ) -> Self {
41         BarrierDesc {
42             attachment_id,
43             states,
44             flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE,
45         }
46     }
47 
split(self) -> Range<Self>48     pub(crate) fn split(self) -> Range<Self> {
49         BarrierDesc {
50             flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_BEGIN_ONLY,
51             ..self.clone()
52         }..BarrierDesc {
53             flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_END_ONLY,
54             ..self
55         }
56     }
57 }
58 
59 #[derive(Clone, Debug, Hash)]
60 pub struct SubpassDesc {
61     pub(crate) color_attachments: Vec<pass::AttachmentRef>,
62     pub(crate) depth_stencil_attachment: Option<pass::AttachmentRef>,
63     pub(crate) input_attachments: Vec<pass::AttachmentRef>,
64     pub(crate) resolve_attachments: Vec<pass::AttachmentRef>,
65     pub(crate) pre_barriers: Vec<BarrierDesc>,
66     pub(crate) post_barriers: Vec<BarrierDesc>,
67 }
68 
69 impl SubpassDesc {
70     /// Check if an attachment is used by this sub-pass.
71     //Note: preserved attachment are not considered used.
is_using(&self, at_id: pass::AttachmentId) -> bool72     pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool {
73         self.color_attachments
74             .iter()
75             .chain(self.depth_stencil_attachment.iter())
76             .chain(self.input_attachments.iter())
77             .chain(self.resolve_attachments.iter())
78             .any(|&(id, _)| id == at_id)
79     }
80 }
81 
82 #[derive(Clone, Debug, Hash)]
83 pub struct RenderPass {
84     pub(crate) attachments: Vec<pass::Attachment>,
85     pub(crate) subpasses: Vec<SubpassDesc>,
86     pub(crate) post_barriers: Vec<BarrierDesc>,
87     pub(crate) raw_name: Vec<u16>,
88 }
89 
90 // Indirection layer attribute -> remap -> binding.
91 //
92 // Required as vulkan allows attribute offsets larger than the stride.
93 // Storing the stride specified in the pipeline required for vertex buffer binding.
94 #[derive(Copy, Clone, Debug)]
95 pub struct VertexBinding {
96     // Map into the specified bindings on pipeline creation.
97     pub mapped_binding: usize,
98     pub stride: UINT,
99     // Additional offset to rebase the attributes.
100     pub offset: u32,
101 }
102 
103 #[derive(Debug)]
104 pub struct GraphicsPipeline {
105     pub(crate) raw: native::PipelineState,
106     pub(crate) shared: Arc<PipelineShared>,
107     pub(crate) topology: d3d12::D3D12_PRIMITIVE_TOPOLOGY,
108     pub(crate) vertex_bindings: [Option<VertexBinding>; MAX_VERTEX_BUFFERS],
109     pub(crate) baked_states: pso::BakedStates,
110 }
111 unsafe impl Send for GraphicsPipeline {}
112 unsafe impl Sync for GraphicsPipeline {}
113 
114 #[derive(Debug)]
115 pub struct ComputePipeline {
116     pub(crate) raw: native::PipelineState,
117     pub(crate) shared: Arc<PipelineShared>,
118 }
119 
120 unsafe impl Send for ComputePipeline {}
121 unsafe impl Sync for ComputePipeline {}
122 
123 bitflags! {
124     pub struct SetTableTypes: u8 {
125         const SRV_CBV_UAV = 0x1;
126         const SAMPLERS = 0x2;
127     }
128 }
129 
130 pub const SRV_CBV_UAV: SetTableTypes = SetTableTypes::SRV_CBV_UAV;
131 pub const SAMPLERS: SetTableTypes = SetTableTypes::SAMPLERS;
132 
133 pub type RootSignatureOffset = usize;
134 
135 #[derive(Debug, Hash)]
136 pub struct RootTable {
137     pub ty: SetTableTypes,
138     pub offset: RootSignatureOffset,
139 }
140 
141 #[derive(Debug)]
142 pub struct RootElement {
143     pub table: RootTable,
144     pub mutable_bindings: auxil::FastHashSet<pso::DescriptorBinding>,
145 }
146 
147 #[derive(Debug)]
148 pub struct PipelineShared {
149     pub(crate) signature: native::RootSignature,
150     /// Disjunct, sorted vector of root constant ranges.
151     pub(crate) constants: Vec<RootConstant>,
152     /// A root offset per parameter.
153     pub(crate) parameter_offsets: Vec<u32>,
154     /// Total number of root slots occupied by the pipeline.
155     pub(crate) total_slots: u32,
156 }
157 
158 unsafe impl Send for PipelineShared {}
159 unsafe impl Sync for PipelineShared {}
160 
161 #[derive(Debug)]
162 pub struct PipelineLayout {
163     pub(crate) shared: Arc<PipelineShared>,
164     // Storing for each associated descriptor set layout, which tables we created
165     // in the root signature. This is required for binding descriptor sets.
166     pub(crate) elements: Vec<RootElement>,
167 }
168 
169 #[derive(Debug, Clone)]
170 pub struct Framebuffer {
171     /// Number of layers in the render area. Required for subpass resolves.
172     pub(crate) layers: image::Layer,
173 }
174 
175 #[derive(Clone, Debug)]
176 pub struct BufferUnbound {
177     pub(crate) requirements: memory::Requirements,
178     pub(crate) usage: buffer::Usage,
179     pub(crate) name: Option<Vec<u16>>,
180 }
181 
182 pub struct BufferBound {
183     pub(crate) resource: native::Resource,
184     pub(crate) requirements: memory::Requirements,
185     pub(crate) clear_uav: Option<Handle>,
186 }
187 
188 impl fmt::Debug for BufferBound {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result189     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
190         fmt.write_str("BufferBound")
191     }
192 }
193 
194 unsafe impl Send for BufferBound {}
195 unsafe impl Sync for BufferBound {}
196 
197 pub enum Buffer {
198     Unbound(BufferUnbound),
199     Bound(BufferBound),
200 }
201 
202 impl fmt::Debug for Buffer {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result203     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
204         fmt.write_str("Buffer")
205     }
206 }
207 
208 impl Buffer {
expect_unbound(&self) -> &BufferUnbound209     pub(crate) fn expect_unbound(&self) -> &BufferUnbound {
210         match *self {
211             Buffer::Unbound(ref unbound) => unbound,
212             Buffer::Bound(_) => panic!("Expected unbound buffer"),
213         }
214     }
215 
expect_bound(&self) -> &BufferBound216     pub(crate) fn expect_bound(&self) -> &BufferBound {
217         match *self {
218             Buffer::Unbound(_) => panic!("Expected bound buffer"),
219             Buffer::Bound(ref bound) => bound,
220         }
221     }
222 }
223 
224 #[derive(Copy, Clone)]
225 pub struct BufferView {
226     // Descriptor handle for uniform texel buffers.
227     pub(crate) handle_srv: Option<Handle>,
228     // Descriptor handle for storage texel buffers.
229     pub(crate) handle_uav: Option<Handle>,
230 }
231 
232 impl fmt::Debug for BufferView {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result233     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
234         fmt.write_str("BufferView")
235     }
236 }
237 
238 unsafe impl Send for BufferView {}
239 unsafe impl Sync for BufferView {}
240 
241 #[derive(Clone)]
242 pub enum Place {
243     Heap { raw: native::Heap, offset: u64 },
244     Swapchain {},
245 }
246 
247 #[derive(Clone)]
248 pub struct ImageBound {
249     pub(crate) resource: native::Resource,
250     pub(crate) place: Place,
251     pub(crate) surface_type: format::SurfaceType,
252     pub(crate) kind: image::Kind,
253     pub(crate) mip_levels: image::Level,
254     pub(crate) default_view_format: Option<DXGI_FORMAT>,
255     pub(crate) view_caps: image::ViewCapabilities,
256     pub(crate) descriptor: d3d12::D3D12_RESOURCE_DESC,
257     pub(crate) clear_cv: Vec<Handle>,
258     pub(crate) clear_dv: Vec<Handle>,
259     pub(crate) clear_sv: Vec<Handle>,
260     pub(crate) requirements: memory::Requirements,
261 }
262 
263 impl fmt::Debug for ImageBound {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result264     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
265         fmt.write_str("ImageBound")
266     }
267 }
268 
269 unsafe impl Send for ImageBound {}
270 unsafe impl Sync for ImageBound {}
271 
272 impl ImageBound {
calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT273     pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
274         mip_level
275             + (layer * self.descriptor.MipLevels as UINT)
276             + (plane * self.descriptor.MipLevels as UINT * self.kind.num_layers() as UINT)
277     }
278 }
279 
280 #[derive(Clone)]
281 pub struct ImageUnbound {
282     pub(crate) desc: d3d12::D3D12_RESOURCE_DESC,
283     pub(crate) view_format: Option<DXGI_FORMAT>,
284     pub(crate) dsv_format: Option<DXGI_FORMAT>,
285     pub(crate) requirements: memory::Requirements,
286     pub(crate) format: format::Format,
287     pub(crate) kind: image::Kind,
288     pub(crate) mip_levels: image::Level,
289     pub(crate) usage: image::Usage,
290     pub(crate) tiling: image::Tiling,
291     pub(crate) view_caps: image::ViewCapabilities,
292     //TODO: use hal::format::FormatDesc
293     pub(crate) bytes_per_block: u8,
294     // Dimension of a texel block (compressed formats).
295     pub(crate) block_dim: (u8, u8),
296     pub(crate) name: Option<Vec<u16>>,
297 }
298 
299 impl fmt::Debug for ImageUnbound {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result300     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
301         fmt.write_str("ImageUnbound")
302     }
303 }
304 
305 impl ImageUnbound {
calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT306     pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
307         mip_level
308             + (layer * self.desc.MipLevels as UINT)
309             + (plane * self.desc.MipLevels as UINT * self.kind.num_layers() as UINT)
310     }
311 }
312 
313 #[derive(Clone)]
314 pub enum Image {
315     Unbound(ImageUnbound),
316     Bound(ImageBound),
317 }
318 
319 impl fmt::Debug for Image {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result320     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
321         fmt.write_str("Image")
322     }
323 }
324 
325 impl Image {
expect_unbound(&self) -> &ImageUnbound326     pub(crate) fn expect_unbound(&self) -> &ImageUnbound {
327         match *self {
328             Image::Unbound(ref unbound) => unbound,
329             Image::Bound(_) => panic!("Expected unbound image"),
330         }
331     }
332 
expect_bound(&self) -> &ImageBound333     pub(crate) fn expect_bound(&self) -> &ImageBound {
334         match *self {
335             Image::Unbound(_) => panic!("Expected bound image"),
336             Image::Bound(ref bound) => bound,
337         }
338     }
339 
get_desc(&self) -> &d3d12::D3D12_RESOURCE_DESC340     pub fn get_desc(&self) -> &d3d12::D3D12_RESOURCE_DESC {
341         match self {
342             Image::Bound(i) => &i.descriptor,
343             Image::Unbound(i) => &i.desc,
344         }
345     }
346 
calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT347     pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
348         match self {
349             Image::Bound(i) => i.calc_subresource(mip_level, layer, plane),
350             Image::Unbound(i) => i.calc_subresource(mip_level, layer, plane),
351         }
352     }
353 }
354 
355 #[derive(Copy, Clone)]
356 pub enum RenderTargetHandle {
357     None,
358     Swapchain(native::CpuDescriptor),
359     Pool(Handle),
360 }
361 
362 impl RenderTargetHandle {
raw(&self) -> Option<native::CpuDescriptor>363     pub fn raw(&self) -> Option<native::CpuDescriptor> {
364         match *self {
365             RenderTargetHandle::None => None,
366             RenderTargetHandle::Swapchain(rtv) => Some(rtv),
367             RenderTargetHandle::Pool(ref handle) => Some(handle.raw),
368         }
369     }
370 }
371 
372 #[derive(Copy, Clone)]
373 pub struct ImageView {
374     pub(crate) resource: native::Resource, // weak-ptr owned by image.
375     pub(crate) handle_srv: Option<Handle>,
376     pub(crate) handle_rtv: RenderTargetHandle,
377     pub(crate) handle_dsv: Option<Handle>,
378     pub(crate) handle_uav: Option<Handle>,
379     // Required for attachment resolves.
380     pub(crate) dxgi_format: DXGI_FORMAT,
381     pub(crate) num_levels: image::Level,
382     pub(crate) mip_levels: (image::Level, image::Level),
383     pub(crate) layers: (image::Layer, image::Layer),
384     pub(crate) kind: image::Kind,
385 }
386 
387 impl fmt::Debug for ImageView {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result388     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
389         fmt.write_str("ImageView")
390     }
391 }
392 
393 unsafe impl Send for ImageView {}
394 unsafe impl Sync for ImageView {}
395 
396 impl ImageView {
calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT397     pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT {
398         mip_level + (layer * self.num_levels as UINT)
399     }
400 
is_swapchain(&self) -> bool401     pub fn is_swapchain(&self) -> bool {
402         match self.handle_rtv {
403             RenderTargetHandle::Swapchain(_) => true,
404             _ => false,
405         }
406     }
407 }
408 
409 pub struct Sampler {
410     pub(crate) handle: Handle,
411 }
412 
413 impl fmt::Debug for Sampler {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result414     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
415         fmt.write_str("Sampler")
416     }
417 }
418 
419 #[derive(Debug)]
420 pub struct DescriptorSetLayout {
421     pub(crate) bindings: Vec<pso::DescriptorSetLayoutBinding>,
422 }
423 
424 #[derive(Debug)]
425 pub struct Fence {
426     pub(crate) raw: native::Fence,
427 }
428 unsafe impl Send for Fence {}
429 unsafe impl Sync for Fence {}
430 
431 #[derive(Debug)]
432 pub struct Semaphore {
433     pub(crate) raw: native::Fence,
434 }
435 
436 unsafe impl Send for Semaphore {}
437 unsafe impl Sync for Semaphore {}
438 
439 #[derive(Debug)]
440 pub struct Memory {
441     pub(crate) heap: native::Heap,
442     pub(crate) type_id: usize,
443     pub(crate) size: u64,
444     // Buffer containing the whole memory for mapping (only for host visible heaps)
445     pub(crate) resource: Option<native::Resource>,
446 }
447 
448 unsafe impl Send for Memory {}
449 unsafe impl Sync for Memory {}
450 
451 bitflags! {
452     /// A set of D3D12 descriptor types that need to be associated
453     /// with a single gfx-hal `DescriptorType`.
454     #[derive(Default)]
455     pub struct DescriptorContent: u8 {
456         const CBV = 0x1;
457         const SRV = 0x2;
458         const UAV = 0x4;
459         const SAMPLER = 0x8;
460 
461         /// Indicates if the descriptor is a dynamic uniform/storage buffer.
462         /// Important as dynamic buffers are implemented as root descriptors.
463         const DYNAMIC = 0x10;
464 
465         const VIEW = DescriptorContent::CBV.bits |DescriptorContent::SRV.bits | DescriptorContent::UAV.bits;
466     }
467 }
468 
469 impl DescriptorContent {
is_dynamic(&self) -> bool470     pub fn is_dynamic(&self) -> bool {
471         self.contains(DescriptorContent::DYNAMIC)
472     }
473 }
474 
475 impl From<pso::DescriptorType> for DescriptorContent {
from(ty: pso::DescriptorType) -> Self476     fn from(ty: pso::DescriptorType) -> Self {
477         use hal::pso::{
478             BufferDescriptorFormat as Bdf, BufferDescriptorType as Bdt, DescriptorType as Dt,
479             ImageDescriptorType as Idt,
480         };
481 
482         use DescriptorContent as Dc;
483 
484         match ty {
485             Dt::Sampler => Dc::SAMPLER,
486             Dt::Image { ty } => match ty {
487                 Idt::Storage { read_only: true } => Dc::SRV,
488                 Idt::Storage { read_only: false } => Dc::SRV | Dc::UAV,
489                 Idt::Sampled { with_sampler } => match with_sampler {
490                     true => Dc::SRV | Dc::SAMPLER,
491                     false => Dc::SRV,
492                 },
493             },
494             Dt::Buffer { ty, format } => match ty {
495                 Bdt::Storage { read_only: true } => match format {
496                     Bdf::Structured {
497                         dynamic_offset: true,
498                     } => Dc::SRV | Dc::DYNAMIC,
499                     Bdf::Structured {
500                         dynamic_offset: false,
501                     }
502                     | Bdf::Texel => Dc::SRV,
503                 },
504                 Bdt::Storage { read_only: false } => match format {
505                     Bdf::Structured {
506                         dynamic_offset: true,
507                     } => Dc::SRV | Dc::UAV | Dc::DYNAMIC,
508                     Bdf::Structured {
509                         dynamic_offset: false,
510                     }
511                     | Bdf::Texel => Dc::SRV | Dc::UAV,
512                 },
513                 Bdt::Uniform => match format {
514                     Bdf::Structured {
515                         dynamic_offset: true,
516                     } => Dc::CBV | Dc::DYNAMIC,
517                     Bdf::Structured {
518                         dynamic_offset: false,
519                     } => Dc::CBV,
520                     Bdf::Texel => Dc::SRV,
521                 },
522             },
523             Dt::InputAttachment => Dc::SRV,
524         }
525     }
526 }
527 
528 #[derive(Debug)]
529 pub struct DescriptorRange {
530     pub(crate) handle: DualHandle,
531     pub(crate) ty: pso::DescriptorType,
532     pub(crate) handle_size: u64,
533 }
534 
535 impl DescriptorRange {
at(&self, index: DescriptorIndex) -> native::CpuDescriptor536     pub(crate) fn at(&self, index: DescriptorIndex) -> native::CpuDescriptor {
537         assert!(index < self.handle.size);
538         let ptr = self.handle.cpu.ptr + (self.handle_size * index) as usize;
539         native::CpuDescriptor { ptr }
540     }
541 }
542 
543 #[derive(Copy, Clone, Debug)]
544 pub(crate) struct DynamicDescriptor {
545     pub content: DescriptorContent,
546     pub gpu_buffer_location: u64,
547 }
548 
549 #[derive(Debug, Default)]
550 pub struct DescriptorBindingInfo {
551     pub(crate) count: u64,
552     pub(crate) view_range: Option<DescriptorRange>,
553     pub(crate) dynamic_descriptors: Vec<DynamicDescriptor>,
554     pub(crate) content: DescriptorContent,
555 }
556 
557 #[derive(Default)]
558 pub struct DescriptorOrigins {
559     // For each index on the heap, this array stores the origin CPU handle.
560     origins: Vec<native::CpuDescriptor>,
561 }
562 
563 impl DescriptorOrigins {
find(&self, other: &[native::CpuDescriptor]) -> Option<DescriptorIndex>564     fn find(&self, other: &[native::CpuDescriptor]) -> Option<DescriptorIndex> {
565         //TODO: need a smarter algorithm here!
566         for i in other.len()..=self.origins.len() {
567             let base = i - other.len();
568             //TODO: use slice comparison when `CpuDescriptor` implements `PartialEq`.
569             if unsafe {
570                 slice::from_raw_parts(&self.origins[base].ptr, other.len())
571                     == slice::from_raw_parts(&other[0].ptr, other.len())
572             } {
573                 return Some(base as DescriptorIndex);
574             }
575         }
576         None
577     }
578 
grow(&mut self, other: &[native::CpuDescriptor]) -> DescriptorIndex579     fn grow(&mut self, other: &[native::CpuDescriptor]) -> DescriptorIndex {
580         let base = self.origins.len() as DescriptorIndex;
581         self.origins.extend_from_slice(other);
582         base
583     }
584 }
585 
586 pub struct DescriptorSet {
587     // Required for binding at command buffer
588     pub(crate) heap_srv_cbv_uav: native::DescriptorHeap,
589     pub(crate) heap_samplers: native::DescriptorHeap,
590     pub(crate) sampler_origins: Box<[native::CpuDescriptor]>,
591     pub(crate) binding_infos: Vec<DescriptorBindingInfo>,
592     pub(crate) first_gpu_sampler: Option<native::GpuDescriptor>,
593     pub(crate) first_gpu_view: Option<native::GpuDescriptor>,
594     pub(crate) raw_name: Vec<u16>,
595 }
596 
597 impl fmt::Debug for DescriptorSet {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result598     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
599         fmt.write_str("DescriptorSet")
600     }
601 }
602 
603 // TODO: is this really safe?
604 unsafe impl Send for DescriptorSet {}
605 unsafe impl Sync for DescriptorSet {}
606 
607 impl DescriptorSet {
srv_cbv_uav_gpu_start(&self) -> native::GpuDescriptor608     pub fn srv_cbv_uav_gpu_start(&self) -> native::GpuDescriptor {
609         self.heap_srv_cbv_uav.start_gpu_descriptor()
610     }
611 
sampler_gpu_start(&self) -> native::GpuDescriptor612     pub fn sampler_gpu_start(&self) -> native::GpuDescriptor {
613         self.heap_samplers.start_gpu_descriptor()
614     }
615 
sampler_offset(&self, binding: u32, last_offset: usize) -> usize616     pub fn sampler_offset(&self, binding: u32, last_offset: usize) -> usize {
617         let mut offset = 0;
618         for bi in &self.binding_infos[..binding as usize] {
619             if bi.content.contains(DescriptorContent::SAMPLER) {
620                 offset += bi.count as usize;
621             }
622         }
623         if self.binding_infos[binding as usize]
624             .content
625             .contains(DescriptorContent::SAMPLER)
626         {
627             offset += last_offset;
628         }
629         offset
630     }
631 
update_samplers( &mut self, heap: &DescriptorHeap, origins: &RwLock<DescriptorOrigins>, accum: &mut MultiCopyAccumulator, )632     pub fn update_samplers(
633         &mut self,
634         heap: &DescriptorHeap,
635         origins: &RwLock<DescriptorOrigins>,
636         accum: &mut MultiCopyAccumulator,
637     ) {
638         let start_index = if let Some(index) = {
639             // explicit variable allows to limit the lifetime of that borrow
640             let borrow = origins.read();
641             borrow.find(&self.sampler_origins)
642         } {
643             Some(index)
644         } else if self.sampler_origins.iter().any(|desc| desc.ptr == 0) {
645             // set is incomplete, don't try to build it
646             None
647         } else {
648             let base = origins.write().grow(&self.sampler_origins);
649             // copy the descriptors from their origins into the new location
650             accum.dst_samplers.add(
651                 heap.cpu_descriptor_at(base),
652                 self.sampler_origins.len() as u32,
653             );
654             for &origin in self.sampler_origins.iter() {
655                 accum.src_samplers.add(origin, 1);
656             }
657             Some(base)
658         };
659 
660         self.first_gpu_sampler = start_index.map(|index| heap.gpu_descriptor_at(index));
661     }
662 }
663 
664 #[derive(Copy, Clone)]
665 pub struct DualHandle {
666     pub(crate) cpu: native::CpuDescriptor,
667     pub(crate) gpu: native::GpuDescriptor,
668     /// How large the block allocated to this handle is.
669     pub(crate) size: u64,
670 }
671 
672 impl fmt::Debug for DualHandle {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result673     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
674         fmt.write_str("DualHandle")
675     }
676 }
677 
678 pub struct DescriptorHeap {
679     pub(crate) raw: native::DescriptorHeap,
680     pub(crate) handle_size: u64,
681     pub(crate) total_handles: u64,
682     pub(crate) start: DualHandle,
683 }
684 
685 impl fmt::Debug for DescriptorHeap {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result686     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
687         fmt.write_str("DescriptorHeap")
688     }
689 }
690 
691 impl DescriptorHeap {
at(&self, index: DescriptorIndex, size: u64) -> DualHandle692     pub(crate) fn at(&self, index: DescriptorIndex, size: u64) -> DualHandle {
693         assert!(index < self.total_handles);
694         DualHandle {
695             cpu: self.cpu_descriptor_at(index),
696             gpu: self.gpu_descriptor_at(index),
697             size,
698         }
699     }
700 
cpu_descriptor_at(&self, index: u64) -> native::CpuDescriptor701     pub(crate) fn cpu_descriptor_at(&self, index: u64) -> native::CpuDescriptor {
702         native::CpuDescriptor {
703             ptr: self.start.cpu.ptr + (self.handle_size * index) as usize,
704         }
705     }
706 
gpu_descriptor_at(&self, index: u64) -> native::GpuDescriptor707     pub(crate) fn gpu_descriptor_at(&self, index: u64) -> native::GpuDescriptor {
708         native::GpuDescriptor {
709             ptr: self.start.gpu.ptr + self.handle_size * index,
710         }
711     }
712 
destroy(&self)713     pub(crate) unsafe fn destroy(&self) {
714         self.raw.destroy();
715     }
716 }
717 
718 /// Slice of an descriptor heap, which is allocated for a pool.
719 /// Pools will create descriptor sets inside this slice.
720 #[derive(Debug)]
721 pub struct DescriptorHeapSlice {
722     pub(crate) heap: native::DescriptorHeap, // Weak reference, owned by descriptor heap.
723     pub(crate) start: DualHandle,
724     pub(crate) handle_size: u64,
725     pub(crate) range_allocator: RangeAllocator<u64>,
726 }
727 
728 impl DescriptorHeapSlice {
alloc_handles(&mut self, count: u64) -> Option<DualHandle>729     pub(crate) fn alloc_handles(&mut self, count: u64) -> Option<DualHandle> {
730         self.range_allocator
731             .allocate_range(count)
732             .ok()
733             .map(|range| DualHandle {
734                 cpu: native::CpuDescriptor {
735                     ptr: self.start.cpu.ptr + (self.handle_size * range.start) as usize,
736                 },
737                 gpu: native::GpuDescriptor {
738                     ptr: self.start.gpu.ptr + (self.handle_size * range.start) as u64,
739                 },
740                 size: count,
741             })
742     }
743 
744     /// Free handles previously given out by this `DescriptorHeapSlice`.
745     /// Do not use this with handles not given out by this `DescriptorHeapSlice`.
free_handles(&mut self, handle: DualHandle)746     pub(crate) fn free_handles(&mut self, handle: DualHandle) {
747         let start = (handle.gpu.ptr - self.start.gpu.ptr) / self.handle_size;
748         let handle_range = start..start + handle.size;
749         self.range_allocator.free_range(handle_range);
750     }
751 
752     /// Clear the allocator.
clear(&mut self)753     pub(crate) fn clear(&mut self) {
754         self.range_allocator.reset();
755     }
756 }
757 
758 #[derive(Debug)]
759 pub struct DescriptorPool {
760     pub(crate) heap_raw_sampler: native::DescriptorHeap,
761     pub(crate) heap_srv_cbv_uav: DescriptorHeapSlice,
762     pub(crate) pools: Vec<pso::DescriptorRangeDesc>,
763     pub(crate) max_size: u64,
764 }
765 unsafe impl Send for DescriptorPool {}
766 unsafe impl Sync for DescriptorPool {}
767 
768 impl pso::DescriptorPool<Backend> for DescriptorPool {
allocate_one( &mut self, layout: &DescriptorSetLayout, ) -> Result<DescriptorSet, pso::AllocationError>769     unsafe fn allocate_one(
770         &mut self,
771         layout: &DescriptorSetLayout,
772     ) -> Result<DescriptorSet, pso::AllocationError> {
773         let mut binding_infos = Vec::new();
774         let mut first_gpu_view = None;
775         let mut num_samplers = 0;
776 
777         info!("allocate_one");
778         for binding in &layout.bindings {
779             // Add dummy bindings in case of out-of-range or sparse binding layout.
780             while binding_infos.len() <= binding.binding as usize {
781                 binding_infos.push(DescriptorBindingInfo::default());
782             }
783             let content = DescriptorContent::from(binding.ty);
784             debug!("\tbinding {:?} with content {:?}", binding, content);
785 
786             let (view_range, dynamic_descriptors) = if content.is_dynamic() {
787                 let descriptor = DynamicDescriptor {
788                     content: content ^ DescriptorContent::DYNAMIC,
789                     gpu_buffer_location: 0,
790                 };
791                 (None, vec![descriptor; binding.count])
792             } else {
793                 if content.contains(DescriptorContent::SAMPLER) {
794                     num_samplers += binding.count;
795                 }
796 
797                 let view_range = if content.intersects(DescriptorContent::VIEW) {
798                     let count = if content.contains(DescriptorContent::SRV | DescriptorContent::UAV)
799                     {
800                         2 * binding.count as u64
801                     } else {
802                         binding.count as u64
803                     };
804                     debug!("\tview handles: {}", count);
805                     let handle = self
806                         .heap_srv_cbv_uav
807                         .alloc_handles(count)
808                         .ok_or(pso::AllocationError::OutOfPoolMemory)?;
809                     if first_gpu_view.is_none() {
810                         first_gpu_view = Some(handle.gpu);
811                     }
812                     Some(DescriptorRange {
813                         handle,
814                         ty: binding.ty,
815                         handle_size: self.heap_srv_cbv_uav.handle_size,
816                     })
817                 } else {
818                     None
819                 };
820 
821                 (view_range, Vec::new())
822             };
823 
824             binding_infos[binding.binding as usize] = DescriptorBindingInfo {
825                 count: binding.count as _,
826                 view_range,
827                 dynamic_descriptors,
828                 content,
829             };
830         }
831 
832         Ok(DescriptorSet {
833             heap_srv_cbv_uav: self.heap_srv_cbv_uav.heap,
834             heap_samplers: self.heap_raw_sampler,
835             sampler_origins: vec![native::CpuDescriptor { ptr: 0 }; num_samplers]
836                 .into_boxed_slice(),
837             binding_infos,
838             first_gpu_sampler: None,
839             first_gpu_view,
840             raw_name: Vec::new(),
841         })
842     }
843 
free<I>(&mut self, descriptor_sets: I) where I: Iterator<Item = DescriptorSet>,844     unsafe fn free<I>(&mut self, descriptor_sets: I)
845     where
846         I: Iterator<Item = DescriptorSet>,
847     {
848         for descriptor_set in descriptor_sets {
849             for binding_info in descriptor_set.binding_infos {
850                 if let Some(view_range) = binding_info.view_range {
851                     if binding_info.content.intersects(DescriptorContent::VIEW) {
852                         self.heap_srv_cbv_uav.free_handles(view_range.handle);
853                     }
854                 }
855             }
856         }
857     }
858 
reset(&mut self)859     unsafe fn reset(&mut self) {
860         self.heap_srv_cbv_uav.clear();
861     }
862 }
863 
864 #[derive(Debug)]
865 pub struct QueryPool {
866     pub(crate) raw: native::QueryHeap,
867     pub(crate) ty: hal::query::Type,
868 }
869 
870 unsafe impl Send for QueryPool {}
871 unsafe impl Sync for QueryPool {}
872