1 use std::borrow::Borrow;
2 use std::collections::{BTreeMap, VecDeque};
3 use std::ops::Range;
4 use std::{ffi, mem, ptr, slice};
5
6 use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode};
7 use smallvec::SmallVec;
8
9 use winapi::shared::minwindef::{FALSE, TRUE, UINT};
10 use winapi::shared::{dxgi, dxgi1_2, dxgi1_4, dxgiformat, dxgitype, windef, winerror};
11 use winapi::um::{d3d12, d3dcompiler, synchapi, winbase, winnt};
12 use winapi::Interface;
13
14 use auxil::spirv_cross_specialize_ast;
15 use hal::format::Aspects;
16 use hal::memory::Requirements;
17 use hal::pool::CommandPoolCreateFlags;
18 use hal::pso::VertexInputRate;
19 use hal::queue::{CommandQueue as _, QueueFamilyId};
20 use hal::range::RangeArg;
21 use hal::{
22 self,
23 buffer,
24 device as d,
25 format,
26 image,
27 memory,
28 pass,
29 pso,
30 query,
31 window as w,
32 };
33
34 use pool::{CommandPool, CommandPoolAllocator};
35 use range_alloc::RangeAllocator;
36 use root_constants::RootConstant;
37 use {
38 conv,
39 command as cmd,
40 descriptors_cpu,
41 resource as r,
42 root_constants,
43 window::{Surface, Swapchain},
44 Backend as B,
45 Device,
46 MemoryGroup,
47 MAX_VERTEX_BUFFERS,
48 NUM_HEAP_PROPERTIES,
49 QUEUE_FAMILIES,
50 };
51 use native::{
52 PipelineStateSubobject,
53 Subobject,
54 };
55
56 // Register space used for root constants.
57 const ROOT_CONSTANT_SPACE: u32 = 0;
58
59 const MEM_TYPE_MASK: u64 = 0x7;
60 const MEM_TYPE_SHIFT: u64 = 3;
61
62 const MEM_TYPE_UNIVERSAL_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::Universal as u64;
63 const MEM_TYPE_BUFFER_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::BufferOnly as u64;
64 const MEM_TYPE_IMAGE_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::ImageOnly as u64;
65 const MEM_TYPE_TARGET_SHIFT: u64 = MEM_TYPE_SHIFT * MemoryGroup::TargetOnly as u64;
66
67 pub const IDENTITY_MAPPING: UINT = 0x1688; // D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING
68
69 /// Emit error during shader module creation. Used if we don't expect an error
70 /// but might panic due to an exception in SPIRV-Cross.
gen_unexpected_error(err: SpirvErrorCode) -> d::ShaderError71 fn gen_unexpected_error(err: SpirvErrorCode) -> d::ShaderError {
72 let msg = match err {
73 SpirvErrorCode::CompilationError(msg) => msg,
74 SpirvErrorCode::Unhandled => "Unexpected error".into(),
75 };
76 d::ShaderError::CompilationFailed(msg)
77 }
78
79 /// Emit error during shader module creation. Used if we execute an query command.
gen_query_error(err: SpirvErrorCode) -> d::ShaderError80 fn gen_query_error(err: SpirvErrorCode) -> d::ShaderError {
81 let msg = match err {
82 SpirvErrorCode::CompilationError(msg) => msg,
83 SpirvErrorCode::Unhandled => "Unknown query error".into(),
84 };
85 d::ShaderError::CompilationFailed(msg)
86 }
87
88 #[derive(Clone, Debug)]
89 pub(crate) struct ViewInfo {
90 pub(crate) resource: native::Resource,
91 pub(crate) kind: image::Kind,
92 pub(crate) caps: image::ViewCapabilities,
93 pub(crate) view_kind: image::ViewKind,
94 pub(crate) format: dxgiformat::DXGI_FORMAT,
95 pub(crate) component_mapping: UINT,
96 pub(crate) range: image::SubresourceRange,
97 }
98
99 pub(crate) enum CommandSignature {
100 Draw,
101 DrawIndexed,
102 Dispatch,
103 }
104
105 /// Compile a single shader entry point from a HLSL text shader
compile_shader( stage: pso::Stage, shader_model: hlsl::ShaderModel, entry: &str, code: &[u8], ) -> Result<native::Blob, d::ShaderError>106 pub(crate) fn compile_shader(
107 stage: pso::Stage,
108 shader_model: hlsl::ShaderModel,
109 entry: &str,
110 code: &[u8],
111 ) -> Result<native::Blob, d::ShaderError> {
112 let stage_to_str = |stage, shader_model| {
113 let stage = match stage {
114 pso::Stage::Vertex => "vs",
115 pso::Stage::Fragment => "ps",
116 pso::Stage::Compute => "cs",
117 _ => unimplemented!(),
118 };
119
120 let model = match shader_model {
121 hlsl::ShaderModel::V5_0 => "5_0",
122 hlsl::ShaderModel::V5_1 => "5_1",
123 hlsl::ShaderModel::V6_0 => "6_0",
124 _ => unimplemented!(),
125 };
126
127 format!("{}_{}\0", stage, model)
128 };
129
130 let mut shader_data = native::Blob::null();
131 let mut error = native::Blob::null();
132 let entry = ffi::CString::new(entry).unwrap();
133 let hr = unsafe {
134 d3dcompiler::D3DCompile(
135 code.as_ptr() as *const _,
136 code.len(),
137 ptr::null(),
138 ptr::null(),
139 ptr::null_mut(),
140 entry.as_ptr() as *const _,
141 stage_to_str(stage, shader_model).as_ptr() as *const i8,
142 1,
143 0,
144 shader_data.mut_void() as *mut *mut _,
145 error.mut_void() as *mut *mut _,
146 )
147 };
148 if !winerror::SUCCEEDED(hr) {
149 error!("D3DCompile error {:x}", hr);
150 let message = unsafe {
151 let pointer = error.GetBufferPointer();
152 let size = error.GetBufferSize();
153 let slice = slice::from_raw_parts(pointer as *const u8, size as usize);
154 String::from_utf8_lossy(slice).into_owned()
155 };
156 unsafe {
157 error.destroy();
158 }
159 Err(d::ShaderError::CompilationFailed(message))
160 } else {
161 Ok(shader_data)
162 }
163 }
164
165 #[repr(C)]
166 struct GraphicsPipelineStateSubobjectStream {
167 root_signature: PipelineStateSubobject<*mut d3d12::ID3D12RootSignature>,
168 vs: PipelineStateSubobject<d3d12::D3D12_SHADER_BYTECODE>,
169 ps: PipelineStateSubobject<d3d12::D3D12_SHADER_BYTECODE>,
170 ds: PipelineStateSubobject<d3d12::D3D12_SHADER_BYTECODE>,
171 hs: PipelineStateSubobject<d3d12::D3D12_SHADER_BYTECODE>,
172 gs: PipelineStateSubobject<d3d12::D3D12_SHADER_BYTECODE>,
173 stream_output: PipelineStateSubobject<d3d12::D3D12_STREAM_OUTPUT_DESC>,
174 blend: PipelineStateSubobject<d3d12::D3D12_BLEND_DESC>,
175 sample_mask: PipelineStateSubobject<UINT>,
176 rasterizer: PipelineStateSubobject<d3d12::D3D12_RASTERIZER_DESC>,
177 depth_stencil: PipelineStateSubobject<d3d12::D3D12_DEPTH_STENCIL_DESC1>,
178 input_layout: PipelineStateSubobject<d3d12::D3D12_INPUT_LAYOUT_DESC>,
179 ib_strip_cut_value: PipelineStateSubobject<d3d12::D3D12_INDEX_BUFFER_STRIP_CUT_VALUE>,
180 primitive_topology: PipelineStateSubobject<d3d12::D3D12_PRIMITIVE_TOPOLOGY_TYPE>,
181 render_target_formats: PipelineStateSubobject<d3d12::D3D12_RT_FORMAT_ARRAY>,
182 depth_stencil_format: PipelineStateSubobject<dxgiformat::DXGI_FORMAT>,
183 sample_desc: PipelineStateSubobject<dxgitype::DXGI_SAMPLE_DESC>,
184 node_mask: PipelineStateSubobject<UINT>,
185 cached_pso: PipelineStateSubobject<d3d12::D3D12_CACHED_PIPELINE_STATE>,
186 flags: PipelineStateSubobject<d3d12::D3D12_PIPELINE_STATE_FLAGS>,
187 }
188
189 impl GraphicsPipelineStateSubobjectStream {
new( pso_desc: &d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC, depth_bounds_test_enable: bool, ) -> Self190 fn new(
191 pso_desc: &d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC,
192 depth_bounds_test_enable: bool,
193 ) -> Self {
194 GraphicsPipelineStateSubobjectStream {
195 root_signature: PipelineStateSubobject::new(
196 Subobject::RootSignature,
197 pso_desc.pRootSignature,
198 ),
199 vs: PipelineStateSubobject::new(Subobject::VS, pso_desc.VS),
200 ps: PipelineStateSubobject::new(Subobject::PS, pso_desc.PS),
201 ds: PipelineStateSubobject::new(Subobject::DS, pso_desc.DS),
202 hs: PipelineStateSubobject::new(Subobject::HS, pso_desc.HS),
203 gs: PipelineStateSubobject::new(Subobject::GS, pso_desc.GS),
204 stream_output: PipelineStateSubobject::new(
205 Subobject::StreamOutput,
206 pso_desc.StreamOutput,
207 ),
208 blend: PipelineStateSubobject::new(Subobject::Blend, pso_desc.BlendState),
209 sample_mask: PipelineStateSubobject::new(Subobject::SampleMask, pso_desc.SampleMask),
210 rasterizer: PipelineStateSubobject::new(
211 Subobject::Rasterizer,
212 pso_desc.RasterizerState,
213 ),
214 depth_stencil: PipelineStateSubobject::new(
215 Subobject::DepthStencil1,
216 d3d12::D3D12_DEPTH_STENCIL_DESC1 {
217 DepthEnable: pso_desc.DepthStencilState.DepthEnable,
218 DepthWriteMask: pso_desc.DepthStencilState.DepthWriteMask,
219 DepthFunc: pso_desc.DepthStencilState.DepthFunc,
220 StencilEnable: pso_desc.DepthStencilState.StencilEnable,
221 StencilReadMask: pso_desc.DepthStencilState.StencilReadMask,
222 StencilWriteMask: pso_desc.DepthStencilState.StencilWriteMask,
223 FrontFace: pso_desc.DepthStencilState.FrontFace,
224 BackFace: pso_desc.DepthStencilState.BackFace,
225 DepthBoundsTestEnable: depth_bounds_test_enable as _,
226 },
227 ),
228 input_layout: PipelineStateSubobject::new(Subobject::InputLayout, pso_desc.InputLayout),
229 ib_strip_cut_value: PipelineStateSubobject::new(
230 Subobject::IBStripCut,
231 pso_desc.IBStripCutValue,
232 ),
233 primitive_topology: PipelineStateSubobject::new(
234 Subobject::PrimitiveTopology,
235 pso_desc.PrimitiveTopologyType,
236 ),
237 render_target_formats: PipelineStateSubobject::new(
238 Subobject::RTFormats,
239 d3d12::D3D12_RT_FORMAT_ARRAY {
240 RTFormats: pso_desc.RTVFormats,
241 NumRenderTargets: pso_desc.NumRenderTargets,
242 },
243 ),
244 depth_stencil_format: PipelineStateSubobject::new(
245 Subobject::DSFormat,
246 pso_desc.DSVFormat,
247 ),
248 sample_desc: PipelineStateSubobject::new(Subobject::SampleDesc, pso_desc.SampleDesc),
249 node_mask: PipelineStateSubobject::new(Subobject::NodeMask, pso_desc.NodeMask),
250 cached_pso: PipelineStateSubobject::new(Subobject::CachedPSO, pso_desc.CachedPSO),
251 flags: PipelineStateSubobject::new(Subobject::Flags, pso_desc.Flags),
252 }
253 }
254 }
255
256 impl Device {
parse_spirv(raw_data: &[u32]) -> Result<spirv::Ast<hlsl::Target>, d::ShaderError>257 fn parse_spirv(raw_data: &[u32]) -> Result<spirv::Ast<hlsl::Target>, d::ShaderError> {
258 let module = spirv::Module::from_words(raw_data);
259
260 spirv::Ast::parse(&module).map_err(|err| {
261 let msg = match err {
262 SpirvErrorCode::CompilationError(msg) => msg,
263 SpirvErrorCode::Unhandled => "Unknown parsing error".into(),
264 };
265 d::ShaderError::CompilationFailed(msg)
266 })
267 }
268
patch_spirv_resources( ast: &mut spirv::Ast<hlsl::Target>, layout: Option<&r::PipelineLayout>, ) -> Result<(), d::ShaderError>269 fn patch_spirv_resources(
270 ast: &mut spirv::Ast<hlsl::Target>,
271 layout: Option<&r::PipelineLayout>,
272 ) -> Result<(), d::ShaderError> {
273 // Move the descriptor sets away to yield for the root constants at "space0".
274 let space_offset = match layout {
275 Some(layout) if !layout.constants.is_empty() => 1,
276 _ => return Ok(()),
277 };
278
279 let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?;
280 for image in &shader_resources.separate_images {
281 let set = ast
282 .get_decoration(image.id, spirv::Decoration::DescriptorSet)
283 .map_err(gen_query_error)?;
284 ast.set_decoration(
285 image.id,
286 spirv::Decoration::DescriptorSet,
287 space_offset + set,
288 )
289 .map_err(gen_unexpected_error)?;
290 }
291
292 for uniform_buffer in &shader_resources.uniform_buffers {
293 let set = ast
294 .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet)
295 .map_err(gen_query_error)?;
296 ast.set_decoration(
297 uniform_buffer.id,
298 spirv::Decoration::DescriptorSet,
299 space_offset + set,
300 )
301 .map_err(gen_unexpected_error)?;
302 }
303
304 for storage_buffer in &shader_resources.storage_buffers {
305 let set = ast
306 .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet)
307 .map_err(gen_query_error)?;
308 ast.set_decoration(
309 storage_buffer.id,
310 spirv::Decoration::DescriptorSet,
311 space_offset + set,
312 )
313 .map_err(gen_unexpected_error)?;
314 }
315
316 for image in &shader_resources.storage_images {
317 let set = ast
318 .get_decoration(image.id, spirv::Decoration::DescriptorSet)
319 .map_err(gen_query_error)?;
320 ast.set_decoration(
321 image.id,
322 spirv::Decoration::DescriptorSet,
323 space_offset + set,
324 )
325 .map_err(gen_unexpected_error)?;
326 }
327
328 for sampler in &shader_resources.separate_samplers {
329 let set = ast
330 .get_decoration(sampler.id, spirv::Decoration::DescriptorSet)
331 .map_err(gen_query_error)?;
332 ast.set_decoration(
333 sampler.id,
334 spirv::Decoration::DescriptorSet,
335 space_offset + set,
336 )
337 .map_err(gen_unexpected_error)?;
338 }
339
340 for image in &shader_resources.sampled_images {
341 let set = ast
342 .get_decoration(image.id, spirv::Decoration::DescriptorSet)
343 .map_err(gen_query_error)?;
344 ast.set_decoration(
345 image.id,
346 spirv::Decoration::DescriptorSet,
347 space_offset + set,
348 )
349 .map_err(gen_unexpected_error)?;
350 }
351
352 for input in &shader_resources.subpass_inputs {
353 let set = ast
354 .get_decoration(input.id, spirv::Decoration::DescriptorSet)
355 .map_err(gen_query_error)?;
356 ast.set_decoration(
357 input.id,
358 spirv::Decoration::DescriptorSet,
359 space_offset + set,
360 )
361 .map_err(gen_unexpected_error)?;
362 }
363
364 // TODO: other resources
365
366 Ok(())
367 }
368
translate_spirv( ast: &mut spirv::Ast<hlsl::Target>, shader_model: hlsl::ShaderModel, layout: &r::PipelineLayout, stage: pso::Stage, ) -> Result<String, d::ShaderError>369 fn translate_spirv(
370 ast: &mut spirv::Ast<hlsl::Target>,
371 shader_model: hlsl::ShaderModel,
372 layout: &r::PipelineLayout,
373 stage: pso::Stage,
374 ) -> Result<String, d::ShaderError> {
375 let mut compile_options = hlsl::CompilerOptions::default();
376 compile_options.shader_model = shader_model;
377 compile_options.vertex.invert_y = true;
378
379 let stage_flag = stage.into();
380 let root_constant_layout = layout
381 .constants
382 .iter()
383 .filter_map(|constant| {
384 if constant.stages.contains(stage_flag) {
385 Some(hlsl::RootConstant {
386 start: constant.range.start * 4,
387 end: constant.range.end * 4,
388 binding: constant.range.start,
389 space: 0,
390 })
391 } else {
392 None
393 }
394 })
395 .collect();
396 ast.set_compiler_options(&compile_options)
397 .map_err(gen_unexpected_error)?;
398 ast.set_root_constant_layout(root_constant_layout)
399 .map_err(gen_unexpected_error)?;
400 ast.compile().map_err(|err| {
401 let msg = match err {
402 SpirvErrorCode::CompilationError(msg) => msg,
403 SpirvErrorCode::Unhandled => "Unknown compile error".into(),
404 };
405 d::ShaderError::CompilationFailed(msg)
406 })
407 }
408
409 // Extract entry point from shader module on pipeline creation.
410 // Returns compiled shader blob and bool to indicate if the shader should be
411 // destroyed after pipeline creation
extract_entry_point( stage: pso::Stage, source: &pso::EntryPoint<B>, layout: &r::PipelineLayout, ) -> Result<(native::Blob, bool), d::ShaderError>412 fn extract_entry_point(
413 stage: pso::Stage,
414 source: &pso::EntryPoint<B>,
415 layout: &r::PipelineLayout,
416 ) -> Result<(native::Blob, bool), d::ShaderError> {
417 match *source.module {
418 r::ShaderModule::Compiled(ref shaders) => {
419 // TODO: do we need to check for specialization constants?
420 // Use precompiled shader, ignore specialization or layout.
421 shaders
422 .get(source.entry)
423 .map(|src| (*src, false))
424 .ok_or(d::ShaderError::MissingEntryPoint(source.entry.into()))
425 }
426 r::ShaderModule::Spirv(ref raw_data) => {
427 let mut ast = Self::parse_spirv(raw_data)?;
428 spirv_cross_specialize_ast(&mut ast, &source.specialization)?;
429 Self::patch_spirv_resources(&mut ast, Some(layout))?;
430
431 let shader_model = hlsl::ShaderModel::V5_1;
432 let shader_code = Self::translate_spirv(&mut ast, shader_model, layout, stage)?;
433 debug!("SPIRV-Cross generated shader:\n{}", shader_code);
434
435 let real_name = ast
436 .get_cleansed_entry_point_name(source.entry, conv::map_stage(stage))
437 .map_err(gen_query_error)?;
438 // TODO: opt: don't query *all* entry points.
439 let entry_points = ast.get_entry_points().map_err(gen_query_error)?;
440 entry_points
441 .iter()
442 .find(|entry_point| entry_point.name == real_name)
443 .ok_or(d::ShaderError::MissingEntryPoint(source.entry.into()))
444 .and_then(|entry_point| {
445 let stage = conv::map_execution_model(entry_point.execution_model);
446 let shader = compile_shader(
447 stage,
448 shader_model,
449 &entry_point.name,
450 shader_code.as_bytes(),
451 )?;
452 Ok((shader, true))
453 })
454 }
455 }
456 }
457
458 /// Create a shader module from HLSL with a single entry point
create_shader_module_from_source( &self, stage: pso::Stage, hlsl_entry: &str, entry_point: &str, code: &[u8], ) -> Result<r::ShaderModule, d::ShaderError>459 pub fn create_shader_module_from_source(
460 &self,
461 stage: pso::Stage,
462 hlsl_entry: &str,
463 entry_point: &str,
464 code: &[u8],
465 ) -> Result<r::ShaderModule, d::ShaderError> {
466 let mut shader_map = BTreeMap::new();
467 let blob = compile_shader(stage, hlsl::ShaderModel::V5_1, hlsl_entry, code)?;
468 shader_map.insert(entry_point.into(), blob);
469 Ok(r::ShaderModule::Compiled(shader_map))
470 }
471
create_command_signature( device: native::Device, ty: CommandSignature, ) -> native::CommandSignature472 pub(crate) fn create_command_signature(
473 device: native::Device,
474 ty: CommandSignature,
475 ) -> native::CommandSignature {
476 let (arg, stride) = match ty {
477 CommandSignature::Draw => (native::IndirectArgument::draw(), 16),
478 CommandSignature::DrawIndexed => (native::IndirectArgument::draw_indexed(), 20),
479 CommandSignature::Dispatch => (native::IndirectArgument::dispatch(), 12),
480 };
481
482 let (signature, hr) =
483 device.create_command_signature(native::RootSignature::null(), &[arg], stride, 0);
484
485 if !winerror::SUCCEEDED(hr) {
486 error!("error on command signature creation: {:x}", hr);
487 }
488 signature
489 }
490
create_descriptor_heap_impl( device: native::Device, heap_type: native::DescriptorHeapType, shader_visible: bool, capacity: usize, ) -> r::DescriptorHeap491 pub(crate) fn create_descriptor_heap_impl(
492 device: native::Device,
493 heap_type: native::DescriptorHeapType,
494 shader_visible: bool,
495 capacity: usize,
496 ) -> r::DescriptorHeap {
497 assert_ne!(capacity, 0);
498
499 let (heap, _hr) = device.create_descriptor_heap(
500 capacity as _,
501 heap_type,
502 if shader_visible {
503 native::DescriptorHeapFlags::SHADER_VISIBLE
504 } else {
505 native::DescriptorHeapFlags::empty()
506 },
507 0,
508 );
509
510 let descriptor_size = device.get_descriptor_increment_size(heap_type);
511 let cpu_handle = heap.start_cpu_descriptor();
512 let gpu_handle = heap.start_gpu_descriptor();
513
514 let range_allocator = RangeAllocator::new(0 .. (capacity as u64));
515
516 r::DescriptorHeap {
517 raw: heap,
518 handle_size: descriptor_size as _,
519 total_handles: capacity as _,
520 start: r::DualHandle {
521 cpu: cpu_handle,
522 gpu: gpu_handle,
523 size: 0,
524 },
525 range_allocator,
526 }
527 }
528
view_image_as_render_target_impl( device: native::Device, handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, info: ViewInfo, ) -> Result<(), image::ViewError>529 pub(crate) fn view_image_as_render_target_impl(
530 device: native::Device,
531 handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE,
532 info: ViewInfo,
533 ) -> Result<(), image::ViewError> {
534 #![allow(non_snake_case)]
535
536 let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC {
537 Format: info.format,
538 ViewDimension: 0,
539 u: unsafe { mem::zeroed() },
540 };
541
542 let MipSlice = info.range.levels.start as _;
543 let FirstArraySlice = info.range.layers.start as _;
544 let ArraySize = (info.range.layers.end - info.range.layers.start) as _;
545 let is_msaa = info.kind.num_samples() > 1;
546 if info.range.levels.start + 1 != info.range.levels.end {
547 return Err(image::ViewError::Level(info.range.levels.start));
548 }
549 if info.range.layers.end > info.kind.num_layers() {
550 return Err(image::ViewError::Layer(image::LayerError::OutOfBounds(
551 info.range.layers,
552 )));
553 }
554
555 match info.view_kind {
556 image::ViewKind::D1 => {
557 assert_eq!(info.range.layers, 0 .. 1);
558 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1D;
559 *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_RTV { MipSlice }
560 }
561 image::ViewKind::D1Array => {
562 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1DARRAY;
563 *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_RTV {
564 MipSlice,
565 FirstArraySlice,
566 ArraySize,
567 }
568 }
569 image::ViewKind::D2 if is_msaa => {
570 assert_eq!(info.range.layers, 0 .. 1);
571 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMS;
572 *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_RTV {
573 UnusedField_NothingToDefine: 0,
574 }
575 }
576 image::ViewKind::D2 => {
577 assert_eq!(info.range.layers, 0 .. 1);
578 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2D;
579 *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_RTV {
580 MipSlice,
581 PlaneSlice: 0, //TODO
582 }
583 }
584 image::ViewKind::D2Array if is_msaa => {
585 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY;
586 *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_RTV {
587 FirstArraySlice,
588 ArraySize,
589 }
590 }
591 image::ViewKind::D2Array => {
592 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
593 *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_RTV {
594 MipSlice,
595 FirstArraySlice,
596 ArraySize,
597 PlaneSlice: 0, //TODO
598 }
599 }
600 image::ViewKind::D3 => {
601 assert_eq!(info.range.layers, 0 .. 1);
602 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE3D;
603 *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_RTV {
604 MipSlice,
605 FirstWSlice: 0,
606 WSize: info.kind.extent().depth as _,
607 }
608 }
609 image::ViewKind::Cube | image::ViewKind::CubeArray => {
610 desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
611 //TODO: double-check if any *6 are needed
612 *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_RTV {
613 MipSlice,
614 FirstArraySlice,
615 ArraySize,
616 PlaneSlice: 0, //TODO
617 }
618 }
619 };
620
621 unsafe {
622 device.CreateRenderTargetView(info.resource.as_mut_ptr(), &desc, handle);
623 }
624
625 Ok(())
626 }
627
view_image_as_render_target( &self, info: ViewInfo, ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError>628 fn view_image_as_render_target(
629 &self,
630 info: ViewInfo,
631 ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError> {
632 let handle = self.rtv_pool.lock().unwrap().alloc_handle();
633 Self::view_image_as_render_target_impl(self.raw, handle, info).map(|_| handle)
634 }
635
view_image_as_depth_stencil_impl( device: native::Device, handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, info: ViewInfo, ) -> Result<(), image::ViewError>636 pub(crate) fn view_image_as_depth_stencil_impl(
637 device: native::Device,
638 handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE,
639 info: ViewInfo,
640 ) -> Result<(), image::ViewError> {
641 #![allow(non_snake_case)]
642
643 let mut desc = d3d12::D3D12_DEPTH_STENCIL_VIEW_DESC {
644 Format: info.format,
645 ViewDimension: 0,
646 Flags: 0,
647 u: unsafe { mem::zeroed() },
648 };
649
650 let MipSlice = info.range.levels.start as _;
651 let FirstArraySlice = info.range.layers.start as _;
652 let ArraySize = (info.range.layers.end - info.range.layers.start) as _;
653 let is_msaa = info.kind.num_samples() > 1;
654 if info.range.levels.start + 1 != info.range.levels.end {
655 return Err(image::ViewError::Level(info.range.levels.start));
656 }
657 if info.range.layers.end > info.kind.num_layers() {
658 return Err(image::ViewError::Layer(image::LayerError::OutOfBounds(
659 info.range.layers,
660 )));
661 }
662
663 match info.view_kind {
664 image::ViewKind::D1 => {
665 assert_eq!(info.range.layers, 0 .. 1);
666 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1D;
667 *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_DSV { MipSlice }
668 }
669 image::ViewKind::D1Array => {
670 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1DARRAY;
671 *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_DSV {
672 MipSlice,
673 FirstArraySlice,
674 ArraySize,
675 }
676 }
677 image::ViewKind::D2 if is_msaa => {
678 assert_eq!(info.range.layers, 0 .. 1);
679 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMS;
680 *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_DSV {
681 UnusedField_NothingToDefine: 0,
682 }
683 }
684 image::ViewKind::D2 => {
685 assert_eq!(info.range.layers, 0 .. 1);
686 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2D;
687 *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_DSV { MipSlice }
688 }
689 image::ViewKind::D2Array if is_msaa => {
690 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY;
691 *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_DSV {
692 FirstArraySlice,
693 ArraySize,
694 }
695 }
696 image::ViewKind::D2Array => {
697 desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
698 *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_DSV {
699 MipSlice,
700 FirstArraySlice,
701 ArraySize,
702 }
703 }
704 image::ViewKind::D3 | image::ViewKind::Cube | image::ViewKind::CubeArray => unimplemented!(),
705 };
706
707 unsafe {
708 device.CreateDepthStencilView(info.resource.as_mut_ptr(), &desc, handle);
709 }
710
711 Ok(())
712 }
713
view_image_as_depth_stencil( &self, info: ViewInfo, ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError>714 fn view_image_as_depth_stencil(
715 &self,
716 info: ViewInfo,
717 ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError> {
718 let handle = self.dsv_pool.lock().unwrap().alloc_handle();
719 Self::view_image_as_depth_stencil_impl(self.raw, handle, info).map(|_| handle)
720 }
721
build_image_as_shader_resource_desc( info: &ViewInfo, ) -> Result<d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC, image::ViewError>722 pub(crate) fn build_image_as_shader_resource_desc(
723 info: &ViewInfo,
724 ) -> Result<d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC, image::ViewError> {
725 #![allow(non_snake_case)]
726
727 let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC {
728 Format: info.format,
729 ViewDimension: 0,
730 Shader4ComponentMapping: info.component_mapping,
731 u: unsafe { mem::zeroed() },
732 };
733
734 let MostDetailedMip = info.range.levels.start as _;
735 let MipLevels = (info.range.levels.end - info.range.levels.start) as _;
736 let FirstArraySlice = info.range.layers.start as _;
737 let ArraySize = (info.range.layers.end - info.range.layers.start) as _;
738
739 if info.range.layers.end > info.kind.num_layers() {
740 return Err(image::ViewError::Layer(image::LayerError::OutOfBounds(
741 info.range.layers.clone(),
742 )));
743 }
744 let is_msaa = info.kind.num_samples() > 1;
745 let is_cube = info.caps.contains(image::ViewCapabilities::KIND_CUBE);
746
747 match info.view_kind {
748 image::ViewKind::D1 => {
749 assert_eq!(info.range.layers, 0 .. 1);
750 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1D;
751 *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_SRV {
752 MostDetailedMip,
753 MipLevels,
754 ResourceMinLODClamp: 0.0,
755 }
756 }
757 image::ViewKind::D1Array => {
758 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1DARRAY;
759 *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_SRV {
760 MostDetailedMip,
761 MipLevels,
762 FirstArraySlice,
763 ArraySize,
764 ResourceMinLODClamp: 0.0,
765 }
766 }
767 image::ViewKind::D2 if is_msaa => {
768 assert_eq!(info.range.layers, 0 .. 1);
769 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMS;
770 *unsafe { desc.u.Texture2DMS_mut() } = d3d12::D3D12_TEX2DMS_SRV {
771 UnusedField_NothingToDefine: 0,
772 }
773 }
774 image::ViewKind::D2 => {
775 assert_eq!(info.range.layers, 0 .. 1);
776 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2D;
777 *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_SRV {
778 MostDetailedMip,
779 MipLevels,
780 PlaneSlice: 0, //TODO
781 ResourceMinLODClamp: 0.0,
782 }
783 }
784 image::ViewKind::D2Array if is_msaa => {
785 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY;
786 *unsafe { desc.u.Texture2DMSArray_mut() } = d3d12::D3D12_TEX2DMS_ARRAY_SRV {
787 FirstArraySlice,
788 ArraySize,
789 }
790 }
791 image::ViewKind::D2Array => {
792 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
793 *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_SRV {
794 MostDetailedMip,
795 MipLevels,
796 FirstArraySlice,
797 ArraySize,
798 PlaneSlice: 0, //TODO
799 ResourceMinLODClamp: 0.0,
800 }
801 }
802 image::ViewKind::D3 => {
803 assert_eq!(info.range.layers, 0 .. 1);
804 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE3D;
805 *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_SRV {
806 MostDetailedMip,
807 MipLevels,
808 ResourceMinLODClamp: 0.0,
809 }
810 }
811 image::ViewKind::Cube if is_cube => {
812 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBE;
813 *unsafe { desc.u.TextureCube_mut() } = d3d12::D3D12_TEXCUBE_SRV {
814 MostDetailedMip,
815 MipLevels,
816 ResourceMinLODClamp: 0.0,
817 }
818 }
819 image::ViewKind::CubeArray if is_cube => {
820 assert_eq!(0, ArraySize % 6);
821 desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
822 *unsafe { desc.u.TextureCubeArray_mut() } = d3d12::D3D12_TEXCUBE_ARRAY_SRV {
823 MostDetailedMip,
824 MipLevels,
825 First2DArrayFace: FirstArraySlice,
826 NumCubes: ArraySize / 6,
827 ResourceMinLODClamp: 0.0,
828 }
829 }
830 image::ViewKind::Cube | image::ViewKind::CubeArray => {
831 error!(
832 "Cube views are not supported for the image, kind: {:?}",
833 info.kind
834 );
835 return Err(image::ViewError::BadKind(info.view_kind));
836 }
837 }
838
839 Ok(desc)
840 }
841
view_image_as_shader_resource( &self, mut info: ViewInfo, ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError>842 fn view_image_as_shader_resource(
843 &self,
844 mut info: ViewInfo,
845 ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError> {
846 #![allow(non_snake_case)]
847
848 // Depth-stencil formats can't be used for SRVs.
849 info.format = match info.format {
850 dxgiformat::DXGI_FORMAT_D16_UNORM => dxgiformat::DXGI_FORMAT_R16_UNORM,
851 dxgiformat::DXGI_FORMAT_D32_FLOAT => dxgiformat::DXGI_FORMAT_R32_FLOAT,
852 format => format,
853 };
854
855 let desc = Self::build_image_as_shader_resource_desc(&info)?;
856 let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
857 unsafe {
858 self.raw
859 .CreateShaderResourceView(info.resource.as_mut_ptr(), &desc, handle);
860 }
861
862 Ok(handle)
863 }
864
view_image_as_storage( &self, info: ViewInfo, ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError>865 fn view_image_as_storage(
866 &self,
867 info: ViewInfo,
868 ) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewError> {
869 #![allow(non_snake_case)]
870 assert_eq!(info.range.levels.start + 1, info.range.levels.end);
871
872 let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC {
873 Format: info.format,
874 ViewDimension: 0,
875 u: unsafe { mem::zeroed() },
876 };
877
878 let MipSlice = info.range.levels.start as _;
879 let FirstArraySlice = info.range.layers.start as _;
880 let ArraySize = (info.range.layers.end - info.range.layers.start) as _;
881
882 if info.range.layers.end > info.kind.num_layers() {
883 return Err(image::ViewError::Layer(image::LayerError::OutOfBounds(
884 info.range.layers,
885 )));
886 }
887 if info.kind.num_samples() > 1 {
888 error!("MSAA images can't be viewed as UAV");
889 return Err(image::ViewError::Unsupported);
890 }
891
892 match info.view_kind {
893 image::ViewKind::D1 => {
894 assert_eq!(info.range.layers, 0 .. 1);
895 desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1D;
896 *unsafe { desc.u.Texture1D_mut() } = d3d12::D3D12_TEX1D_UAV { MipSlice }
897 }
898 image::ViewKind::D1Array => {
899 desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1DARRAY;
900 *unsafe { desc.u.Texture1DArray_mut() } = d3d12::D3D12_TEX1D_ARRAY_UAV {
901 MipSlice,
902 FirstArraySlice,
903 ArraySize,
904 }
905 }
906 image::ViewKind::D2 => {
907 assert_eq!(info.range.layers, 0 .. 1);
908 desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2D;
909 *unsafe { desc.u.Texture2D_mut() } = d3d12::D3D12_TEX2D_UAV {
910 MipSlice,
911 PlaneSlice: 0, //TODO
912 }
913 }
914 image::ViewKind::D2Array => {
915 desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
916 *unsafe { desc.u.Texture2DArray_mut() } = d3d12::D3D12_TEX2D_ARRAY_UAV {
917 MipSlice,
918 FirstArraySlice,
919 ArraySize,
920 PlaneSlice: 0, //TODO
921 }
922 }
923 image::ViewKind::D3 => {
924 assert_eq!(info.range.layers, 0 .. 1);
925 desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE3D;
926 *unsafe { desc.u.Texture3D_mut() } = d3d12::D3D12_TEX3D_UAV {
927 MipSlice,
928 FirstWSlice: 0,
929 WSize: info.kind.extent().depth as _,
930 }
931 }
932 image::ViewKind::Cube | image::ViewKind::CubeArray => {
933 error!("Cubic images can't be viewed as UAV");
934 return Err(image::ViewError::Unsupported);
935 }
936 }
937
938 let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
939 unsafe {
940 self.raw.CreateUnorderedAccessView(
941 info.resource.as_mut_ptr(),
942 ptr::null_mut(),
943 &desc,
944 handle,
945 );
946 }
947
948 Ok(handle)
949 }
950
create_raw_fence(&self, signalled: bool) -> native::Fence951 pub(crate) fn create_raw_fence(&self, signalled: bool) -> native::Fence {
952 let mut handle = native::Fence::null();
953 assert_eq!(winerror::S_OK, unsafe {
954 self.raw.CreateFence(
955 if signalled { 1 } else { 0 },
956 d3d12::D3D12_FENCE_FLAG_NONE,
957 &d3d12::ID3D12Fence::uuidof(),
958 handle.mut_void(),
959 )
960 });
961 handle
962 }
963
create_swapchain_impl( &self, config: &w::SwapchainConfig, window_handle: windef::HWND, factory: native::WeakPtr<dxgi1_4::IDXGIFactory4>, ) -> Result< ( native::WeakPtr<dxgi1_4::IDXGISwapChain3>, dxgiformat::DXGI_FORMAT, ), w::CreationError, >964 pub(crate) fn create_swapchain_impl(
965 &self,
966 config: &w::SwapchainConfig,
967 window_handle: windef::HWND,
968 factory: native::WeakPtr<dxgi1_4::IDXGIFactory4>,
969 ) -> Result<
970 (
971 native::WeakPtr<dxgi1_4::IDXGISwapChain3>,
972 dxgiformat::DXGI_FORMAT,
973 ),
974 w::CreationError,
975 > {
976 let mut swap_chain1 = native::WeakPtr::<dxgi1_2::IDXGISwapChain1>::null();
977
978 //TODO: proper error type?
979 let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap();
980
981 // TODO: double-check values
982 let desc = dxgi1_2::DXGI_SWAP_CHAIN_DESC1 {
983 AlphaMode: dxgi1_2::DXGI_ALPHA_MODE_IGNORE,
984 BufferCount: config.image_count,
985 Width: config.extent.width,
986 Height: config.extent.height,
987 Format: non_srgb_format,
988 Flags: 0,
989 BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT,
990 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
991 Count: 1,
992 Quality: 0,
993 },
994 Scaling: dxgi1_2::DXGI_SCALING_STRETCH,
995 Stereo: FALSE,
996 SwapEffect: dxgi::DXGI_SWAP_EFFECT_FLIP_DISCARD,
997 };
998
999 unsafe {
1000 let hr = factory.CreateSwapChainForHwnd(
1001 self.present_queue.as_mut_ptr() as *mut _,
1002 window_handle,
1003 &desc,
1004 ptr::null(),
1005 ptr::null_mut(),
1006 swap_chain1.mut_void() as *mut *mut _,
1007 );
1008
1009 if !winerror::SUCCEEDED(hr) {
1010 error!("error on swapchain creation 0x{:x}", hr);
1011 }
1012
1013 let (swap_chain3, hr3) = swap_chain1.cast::<dxgi1_4::IDXGISwapChain3>();
1014 if !winerror::SUCCEEDED(hr3) {
1015 error!("error on swapchain cast 0x{:x}", hr3);
1016 }
1017
1018 swap_chain1.destroy();
1019 Ok((swap_chain3, non_srgb_format))
1020 }
1021 }
1022
wrap_swapchain( &self, inner: native::WeakPtr<dxgi1_4::IDXGISwapChain3>, config: &w::SwapchainConfig, ) -> Swapchain1023 pub(crate) fn wrap_swapchain(
1024 &self,
1025 inner: native::WeakPtr<dxgi1_4::IDXGISwapChain3>,
1026 config: &w::SwapchainConfig,
1027 ) -> Swapchain {
1028 let rtv_desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC {
1029 Format: conv::map_format(config.format).unwrap(),
1030 ViewDimension: d3d12::D3D12_RTV_DIMENSION_TEXTURE2D,
1031 ..unsafe { mem::zeroed() }
1032 };
1033 let rtv_heap = Device::create_descriptor_heap_impl(
1034 self.raw,
1035 native::DescriptorHeapType::Rtv,
1036 false,
1037 config.image_count as _,
1038 );
1039
1040 let mut resources = vec![native::Resource::null(); config.image_count as usize];
1041 for (i, res) in resources.iter_mut().enumerate() {
1042 let rtv_handle = rtv_heap.at(i as _, 0).cpu;
1043 unsafe {
1044 inner.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void());
1045 self.raw
1046 .CreateRenderTargetView(res.as_mut_ptr(), &rtv_desc, rtv_handle);
1047 }
1048 }
1049
1050 Swapchain {
1051 inner,
1052 next_frame: 0,
1053 frame_queue: VecDeque::new(),
1054 rtv_heap,
1055 resources,
1056 }
1057 }
1058 }
1059
1060 impl d::Device<B> for Device {
allocate_memory( &self, mem_type: hal::MemoryTypeId, size: u64, ) -> Result<r::Memory, d::AllocationError>1061 unsafe fn allocate_memory(
1062 &self,
1063 mem_type: hal::MemoryTypeId,
1064 size: u64,
1065 ) -> Result<r::Memory, d::AllocationError> {
1066 let mem_type = mem_type.0;
1067 let mem_base_id = mem_type % NUM_HEAP_PROPERTIES;
1068 let heap_property = &self.heap_properties[mem_base_id];
1069
1070 let properties = d3d12::D3D12_HEAP_PROPERTIES {
1071 Type: d3d12::D3D12_HEAP_TYPE_CUSTOM,
1072 CPUPageProperty: heap_property.page_property,
1073 MemoryPoolPreference: heap_property.memory_pool,
1074 CreationNodeMask: 0,
1075 VisibleNodeMask: 0,
1076 };
1077
1078 // Exposed memory types are grouped according to their capabilities.
1079 // See `MemoryGroup` for more details.
1080 let mem_group = mem_type / NUM_HEAP_PROPERTIES;
1081
1082 let desc = d3d12::D3D12_HEAP_DESC {
1083 SizeInBytes: size,
1084 Properties: properties,
1085 Alignment: d3d12::D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT as _, // TODO: not always..?
1086 Flags: match mem_group {
1087 0 => d3d12::D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES,
1088 1 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS,
1089 2 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES,
1090 3 => d3d12::D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES,
1091 _ => unreachable!(),
1092 },
1093 };
1094
1095 let mut heap = native::Heap::null();
1096 let hr = self
1097 .raw
1098 .clone()
1099 .CreateHeap(&desc, &d3d12::ID3D12Heap::uuidof(), heap.mut_void());
1100 if hr == winerror::E_OUTOFMEMORY {
1101 return Err(d::OutOfMemory::Device.into());
1102 }
1103 assert_eq!(winerror::S_OK, hr);
1104
1105 // The first memory heap of each group corresponds to the default heap, which is can never
1106 // be mapped.
1107 // Devices supporting heap tier 1 can only created buffers on mem group 1 (ALLOW_ONLY_BUFFERS).
1108 // Devices supporting heap tier 2 always expose only mem group 0 and don't have any further restrictions.
1109 let is_mapable = mem_base_id != 0
1110 && (mem_group == MemoryGroup::Universal as _
1111 || mem_group == MemoryGroup::BufferOnly as _);
1112
1113 // Create a buffer resource covering the whole memory slice to be able to map the whole memory.
1114 let resource = if is_mapable {
1115 let mut resource = native::Resource::null();
1116 let desc = d3d12::D3D12_RESOURCE_DESC {
1117 Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
1118 Alignment: 0,
1119 Width: size,
1120 Height: 1,
1121 DepthOrArraySize: 1,
1122 MipLevels: 1,
1123 Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
1124 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
1125 Count: 1,
1126 Quality: 0,
1127 },
1128 Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
1129 Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
1130 };
1131
1132 assert_eq!(
1133 winerror::S_OK,
1134 self.raw.clone().CreatePlacedResource(
1135 heap.as_mut_ptr(),
1136 0,
1137 &desc,
1138 d3d12::D3D12_RESOURCE_STATE_COMMON,
1139 ptr::null(),
1140 &d3d12::ID3D12Resource::uuidof(),
1141 resource.mut_void(),
1142 )
1143 );
1144
1145 Some(resource)
1146 } else {
1147 None
1148 };
1149
1150 Ok(r::Memory {
1151 heap,
1152 type_id: mem_type,
1153 size,
1154 resource,
1155 })
1156 }
1157
create_command_pool( &self, family: QueueFamilyId, create_flags: CommandPoolCreateFlags, ) -> Result<CommandPool, d::OutOfMemory>1158 unsafe fn create_command_pool(
1159 &self,
1160 family: QueueFamilyId,
1161 create_flags: CommandPoolCreateFlags,
1162 ) -> Result<CommandPool, d::OutOfMemory> {
1163 let list_type = QUEUE_FAMILIES[family.0].native_type();
1164
1165 let allocator = if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) {
1166 // Allocators are created per individual ID3D12GraphicsCommandList
1167 CommandPoolAllocator::Individual(Vec::new())
1168 } else {
1169 let (command_allocator, hr) = self.raw.create_command_allocator(list_type);
1170
1171 // TODO: error handling
1172 if !winerror::SUCCEEDED(hr) {
1173 error!("error on command allocator creation: {:x}", hr);
1174 }
1175
1176 CommandPoolAllocator::Shared(command_allocator)
1177 };
1178
1179 Ok(CommandPool {
1180 allocator,
1181 device: self.raw,
1182 list_type,
1183 shared: self.shared.clone(),
1184 create_flags,
1185 })
1186 }
1187
destroy_command_pool(&self, pool: CommandPool)1188 unsafe fn destroy_command_pool(&self, pool: CommandPool) {
1189 pool.destroy();
1190 }
1191
create_render_pass<'a, IA, IS, ID>( &self, attachments: IA, subpasses: IS, dependencies: ID, ) -> Result<r::RenderPass, d::OutOfMemory> where IA: IntoIterator, IA::Item: Borrow<pass::Attachment>, IS: IntoIterator, IS::Item: Borrow<pass::SubpassDesc<'a>>, ID: IntoIterator, ID::Item: Borrow<pass::SubpassDependency>,1192 unsafe fn create_render_pass<'a, IA, IS, ID>(
1193 &self,
1194 attachments: IA,
1195 subpasses: IS,
1196 dependencies: ID,
1197 ) -> Result<r::RenderPass, d::OutOfMemory>
1198 where
1199 IA: IntoIterator,
1200 IA::Item: Borrow<pass::Attachment>,
1201 IS: IntoIterator,
1202 IS::Item: Borrow<pass::SubpassDesc<'a>>,
1203 ID: IntoIterator,
1204 ID::Item: Borrow<pass::SubpassDependency>,
1205 {
1206 #[derive(Copy, Clone, Debug, PartialEq)]
1207 enum SubState {
1208 New(d3d12::D3D12_RESOURCE_STATES),
1209 // Color attachment which will be resolved at the end of the subpass
1210 Resolve(d3d12::D3D12_RESOURCE_STATES),
1211 Preserve,
1212 Undefined,
1213 }
1214 /// Temporary information about every sub-pass
1215 struct SubInfo<'a> {
1216 desc: pass::SubpassDesc<'a>,
1217 /// States before the render-pass (in self.start)
1218 /// and after the render-pass (in self.end).
1219 external_dependencies: Range<image::Access>,
1220 /// Counts the number of dependencies that need to be resolved
1221 /// before starting this subpass.
1222 unresolved_dependencies: u16,
1223 }
1224 struct AttachmentInfo {
1225 sub_states: Vec<SubState>,
1226 last_state: d3d12::D3D12_RESOURCE_STATES,
1227 barrier_start_index: usize,
1228 }
1229
1230 let attachments = attachments
1231 .into_iter()
1232 .map(|attachment| attachment.borrow().clone())
1233 .collect::<SmallVec<[_; 5]>>();
1234 let mut sub_infos = subpasses
1235 .into_iter()
1236 .map(|desc| {
1237 SubInfo {
1238 desc: desc.borrow().clone(),
1239 external_dependencies: image::Access::empty() .. image::Access::empty(),
1240 unresolved_dependencies: 0,
1241 }
1242 })
1243 .collect::<SmallVec<[_; 1]>>();
1244 let dependencies = dependencies.into_iter().collect::<SmallVec<[_; 2]>>();
1245
1246 let mut att_infos = (0 .. attachments.len())
1247 .map(|_| AttachmentInfo {
1248 sub_states: vec![SubState::Undefined; sub_infos.len()],
1249 last_state: d3d12::D3D12_RESOURCE_STATE_COMMON, // is to be overwritten
1250 barrier_start_index: 0,
1251 })
1252 .collect::<SmallVec<[_; 5]>>();
1253
1254 for dep in &dependencies {
1255 use hal::pass::SubpassRef as Sr;
1256 let dep = dep.borrow();
1257 match dep.passes {
1258 Range { start: Sr::External, end: Sr::External } => {
1259 error!("Unexpected external-external dependency!");
1260 }
1261 Range { start: Sr::External, end: Sr::Pass(sid) } => {
1262 sub_infos[sid].external_dependencies.start |= dep.accesses.start;
1263 }
1264 Range { start: Sr::Pass(sid), end: Sr::External } => {
1265 sub_infos[sid].external_dependencies.end |= dep.accesses.end;
1266 }
1267 Range { start: Sr::Pass(from_sid), end: Sr::Pass(sid) } => {
1268 //Note: self-dependencies are ignored
1269 if from_sid != sid {
1270 sub_infos[sid].unresolved_dependencies += 1;
1271 }
1272 }
1273 }
1274 }
1275
1276 // Fill out subpass known layouts
1277 for (sid, sub_info) in sub_infos.iter().enumerate() {
1278 let sub = &sub_info.desc;
1279 for (i, &(id, _layout)) in sub.colors.iter().enumerate() {
1280 let target_state = d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET;
1281 let state = match sub.resolves.get(i) {
1282 Some(_) => SubState::Resolve(target_state),
1283 None => SubState::New(target_state),
1284 };
1285 let old = mem::replace(&mut att_infos[id].sub_states[sid], state);
1286 debug_assert_eq!(SubState::Undefined, old);
1287 }
1288 for &(id, layout) in sub.depth_stencil {
1289 let state = SubState::New(match layout {
1290 image::Layout::DepthStencilAttachmentOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE,
1291 image::Layout::DepthStencilReadOnlyOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_READ,
1292 image::Layout::General => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE,
1293 _ => {
1294 error!("Unexpected depth/stencil layout: {:?}", layout);
1295 d3d12::D3D12_RESOURCE_STATE_COMMON
1296 }
1297 });
1298 let old = mem::replace(&mut att_infos[id].sub_states[sid], state);
1299 debug_assert_eq!(SubState::Undefined, old);
1300 }
1301 for &(id, _layout) in sub.inputs {
1302 let state = SubState::New(d3d12::D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
1303 let old = mem::replace(&mut att_infos[id].sub_states[sid], state);
1304 debug_assert_eq!(SubState::Undefined, old);
1305 }
1306 for &(id, _layout) in sub.resolves {
1307 let state = SubState::New(d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST);
1308 let old = mem::replace(&mut att_infos[id].sub_states[sid], state);
1309 debug_assert_eq!(SubState::Undefined, old);
1310 }
1311 for &id in sub.preserves {
1312 let old = mem::replace(&mut att_infos[id].sub_states[sid], SubState::Preserve);
1313 debug_assert_eq!(SubState::Undefined, old);
1314 }
1315 }
1316
1317 let mut rp = r::RenderPass {
1318 attachments: attachments.iter().cloned().collect(),
1319 subpasses: Vec::new(),
1320 post_barriers: Vec::new(),
1321 };
1322
1323 while let Some(sid) = sub_infos.iter().position(|si| si.unresolved_dependencies == 0) {
1324 for dep in &dependencies {
1325 let dep = dep.borrow();
1326 if dep.passes.start != dep.passes.end
1327 && dep.passes.start == pass::SubpassRef::Pass(sid)
1328 {
1329 if let pass::SubpassRef::Pass(other) = dep.passes.end {
1330 sub_infos[other].unresolved_dependencies -= 1;
1331 }
1332 }
1333 }
1334
1335 let si = &mut sub_infos[sid];
1336 si.unresolved_dependencies = !0; // mark as done
1337
1338 // Subpass barriers
1339 let mut pre_barriers = Vec::new();
1340 let mut post_barriers = Vec::new();
1341 for (att_id, (ai, att)) in att_infos.iter_mut().zip(attachments.iter()).enumerate() {
1342 // Attachment wasn't used before, figure out the initial state
1343 if ai.barrier_start_index == 0 {
1344 //Note: the external dependencies are provided for all attachments that are
1345 // first used in this sub-pass, so they may contain more states than we expect
1346 // for this particular attachment.
1347 ai.last_state = conv::map_image_resource_state(
1348 si.external_dependencies.start,
1349 att.layouts.start,
1350 );
1351 }
1352 // Barrier from previous subpass to current or following subpasses.
1353 match ai.sub_states[sid] {
1354 SubState::Preserve => {
1355 ai.barrier_start_index = rp.subpasses.len() + 1;
1356 }
1357 SubState::New(state) if state != ai.last_state => {
1358 let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state);
1359 match rp.subpasses.get_mut(ai.barrier_start_index) {
1360 Some(past_subpass) => {
1361 let split = barrier.split();
1362 past_subpass.pre_barriers.push(split.start);
1363 pre_barriers.push(split.end);
1364 }
1365 None => pre_barriers.push(barrier),
1366 }
1367 ai.last_state = state;
1368 ai.barrier_start_index = rp.subpasses.len() + 1;
1369 }
1370 SubState::Resolve(state) => {
1371 // 1. Standard pre barrier to update state from previous pass into desired substate.
1372 if state != ai.last_state {
1373 let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state);
1374 match rp.subpasses.get_mut(ai.barrier_start_index) {
1375 Some(past_subpass) => {
1376 let split = barrier.split();
1377 past_subpass.pre_barriers.push(split.start);
1378 pre_barriers.push(split.end);
1379 }
1380 None => pre_barriers.push(barrier),
1381 }
1382 }
1383
1384 // 2. Post Barrier at the end of the subpass into RESOLVE_SOURCE.
1385 let resolve_state = d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE;
1386 let barrier = r::BarrierDesc::new(att_id, state .. resolve_state);
1387 post_barriers.push(barrier);
1388
1389 ai.last_state = resolve_state;
1390 ai.barrier_start_index = rp.subpasses.len() + 1;
1391 }
1392 SubState::Undefined |
1393 SubState::New(_) => {}
1394 };
1395 }
1396
1397 rp.subpasses.push(r::SubpassDesc {
1398 color_attachments: si.desc.colors.iter().cloned().collect(),
1399 depth_stencil_attachment: si.desc.depth_stencil.cloned(),
1400 input_attachments: si.desc.inputs.iter().cloned().collect(),
1401 resolve_attachments: si.desc.resolves.iter().cloned().collect(),
1402 pre_barriers,
1403 post_barriers,
1404 });
1405 }
1406 // if this fails, our graph has cycles
1407 assert_eq!(rp.subpasses.len(), sub_infos.len());
1408 assert!(sub_infos.iter().all(|si| si.unresolved_dependencies == !0));
1409
1410 // take care of the post-pass transitions at the end of the renderpass.
1411 for (att_id, (ai, att)) in att_infos.iter().zip(attachments.iter()).enumerate() {
1412 let state_dst = if ai.barrier_start_index == 0 {
1413 // attachment wasn't used in any sub-pass?
1414 continue
1415 } else {
1416 let si = &sub_infos[ai.barrier_start_index - 1];
1417 conv::map_image_resource_state(si.external_dependencies.end, att.layouts.end)
1418 };
1419 if state_dst == ai.last_state {
1420 continue;
1421 }
1422 let barrier = r::BarrierDesc::new(att_id, ai.last_state .. state_dst);
1423 match rp.subpasses.get_mut(ai.barrier_start_index) {
1424 Some(past_subpass) => {
1425 let split = barrier.split();
1426 past_subpass.pre_barriers.push(split.start);
1427 rp.post_barriers.push(split.end);
1428 }
1429 None => rp.post_barriers.push(barrier),
1430 }
1431 }
1432
1433 Ok(rp)
1434 }
1435
create_pipeline_layout<IS, IR>( &self, sets: IS, push_constant_ranges: IR, ) -> Result<r::PipelineLayout, d::OutOfMemory> where IS: IntoIterator, IS::Item: Borrow<r::DescriptorSetLayout>, IR: IntoIterator, IR::Item: Borrow<(pso::ShaderStageFlags, Range<u32>)>,1436 unsafe fn create_pipeline_layout<IS, IR>(
1437 &self,
1438 sets: IS,
1439 push_constant_ranges: IR,
1440 ) -> Result<r::PipelineLayout, d::OutOfMemory>
1441 where
1442 IS: IntoIterator,
1443 IS::Item: Borrow<r::DescriptorSetLayout>,
1444 IR: IntoIterator,
1445 IR::Item: Borrow<(pso::ShaderStageFlags, Range<u32>)>,
1446 {
1447 // Pipeline layouts are implemented as RootSignature for D3D12.
1448 //
1449 // Push Constants are implemented as root constants.
1450 //
1451 // Each descriptor set layout will be one table entry of the root signature.
1452 // We have the additional restriction that SRV/CBV/UAV and samplers need to be
1453 // separated, so each set layout will actually occupy up to 2 entries!
1454 //
1455 // Dynamic uniform buffers are implemented as root descriptors.
1456 // This allows to handle the dynamic offsets properly, which would not be feasible
1457 // with a combination of root constant and descriptor table.
1458 //
1459 // Root signature layout:
1460 // Root Constants: Register: Offest/4, Space: 0
1461 // ...
1462 // DescriptorTable0: Space: 1 (+1) (SrvCbvUav)
1463 // Root Descriptors
1464 // DescriptorTable0: Space: 2 (+1) (Sampler)
1465 // DescriptorTable1: Space: 3 (+1) (SrvCbvUav)
1466 // ...
1467
1468 let sets = sets.into_iter().collect::<Vec<_>>();
1469
1470 let mut root_offset = 0;
1471 let root_constants = root_constants::split(push_constant_ranges)
1472 .iter()
1473 .map(|constant| {
1474 assert!(constant.range.start <= constant.range.end);
1475 root_offset += (constant.range.end - constant.range.start) as usize;
1476
1477 RootConstant {
1478 stages: constant.stages,
1479 range: constant.range.start .. constant.range.end,
1480 }
1481 })
1482 .collect::<Vec<_>>();
1483
1484 info!(
1485 "Creating a pipeline layout with {} sets and {} root constants",
1486 sets.len(),
1487 root_constants.len()
1488 );
1489
1490 // Number of elements in the root signature.
1491 // Guarantees that no re-allocation is done, and our pointers are valid
1492 let mut parameters = Vec::with_capacity(root_constants.len() + sets.len() * 2);
1493
1494 // Convert root signature descriptions into root signature parameters.
1495 for root_constant in root_constants.iter() {
1496 debug!(
1497 "\tRoot constant set={} range {:?}",
1498 ROOT_CONSTANT_SPACE, root_constant.range
1499 );
1500 parameters.push(native::RootParameter::constants(
1501 conv::map_shader_visibility(root_constant.stages),
1502 native::Binding {
1503 register: root_constant.range.start as _,
1504 space: ROOT_CONSTANT_SPACE,
1505 },
1506 (root_constant.range.end - root_constant.range.start) as _,
1507 ));
1508 }
1509
1510 // Offest of `spaceN` for descriptor tables. Root constants will be in
1511 // `space0`.
1512 // This has to match `patch_spirv_resources` logic.
1513 let root_space_offset = if !root_constants.is_empty() { 1 } else { 0 };
1514
1515 // Collect the whole number of bindings we will create upfront.
1516 // It allows us to preallocate enough storage to avoid reallocation,
1517 // which could cause invalid pointers.
1518 let total = sets
1519 .iter()
1520 .map(|desc_set| {
1521 let mut sum = 0;
1522 for binding in desc_set.borrow().bindings.iter() {
1523 let content = r::DescriptorContent::from(binding.ty);
1524 if !content.is_dynamic() {
1525 sum += content.bits().count_ones() as usize;
1526 }
1527 }
1528 sum
1529 })
1530 .sum();
1531 let mut ranges = Vec::with_capacity(total);
1532
1533 let elements = sets.iter().enumerate().map(|(i, set)| {
1534 let set = set.borrow();
1535 let space = (root_space_offset + i) as u32;
1536 let mut table_type = r::SetTableTypes::empty();
1537 let root_table_offset = root_offset;
1538
1539 //TODO: split between sampler and non-sampler tables
1540 let visibility = conv::map_shader_visibility(
1541 set.bindings
1542 .iter()
1543 .fold(pso::ShaderStageFlags::empty(), |u, bind| {
1544 u | bind.stage_flags
1545 }),
1546 );
1547
1548 for bind in set.bindings.iter() {
1549 debug!("\tRange {:?} at space={}", bind, space);
1550 }
1551
1552 let describe = |bind: &pso::DescriptorSetLayoutBinding, ty| {
1553 native::DescriptorRange::new(
1554 ty,
1555 bind.count as _,
1556 native::Binding {
1557 register: bind.binding as _,
1558 space,
1559 },
1560 d3d12::D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND,
1561 )
1562 };
1563
1564 let mut descriptors = Vec::new();
1565 let mut range_base = ranges.len();
1566 for bind in set.bindings.iter() {
1567 let content = r::DescriptorContent::from(bind.ty);
1568
1569 if content.is_dynamic() {
1570 // Root Descriptor
1571 let binding = native::Binding {
1572 register: bind.binding as _,
1573 space,
1574 };
1575
1576 if content.contains(r::DescriptorContent::CBV) {
1577 descriptors.push(r::RootDescriptor {
1578 offset: root_offset,
1579 });
1580 parameters.push(native::RootParameter::cbv_descriptor(visibility, binding));
1581 root_offset += 2;
1582 } else {
1583 // SRV and UAV not implemented so far
1584 unimplemented!()
1585 }
1586 } else {
1587 // Descriptor table ranges
1588 if content.contains(r::DescriptorContent::CBV) {
1589 ranges.push(describe(bind, native::DescriptorRangeType::CBV));
1590 }
1591 if content.contains(r::DescriptorContent::SRV) {
1592 ranges.push(describe(bind, native::DescriptorRangeType::SRV));
1593 }
1594 if content.contains(r::DescriptorContent::UAV) {
1595 ranges.push(describe(bind, native::DescriptorRangeType::UAV));
1596 }
1597 }
1598 }
1599 if ranges.len() > range_base {
1600 parameters.push(native::RootParameter::descriptor_table(
1601 visibility,
1602 &ranges[range_base ..],
1603 ));
1604 table_type |= r::SRV_CBV_UAV;
1605 root_offset += 1;
1606 }
1607
1608 range_base = ranges.len();
1609 for bind in set.bindings.iter() {
1610 let content = r::DescriptorContent::from(bind.ty);
1611 if content.contains(r::DescriptorContent::SAMPLER) {
1612 ranges.push(describe(bind, native::DescriptorRangeType::Sampler));
1613 }
1614 }
1615 if ranges.len() > range_base {
1616 parameters.push(native::RootParameter::descriptor_table(
1617 visibility,
1618 &ranges[range_base ..],
1619 ));
1620 table_type |= r::SAMPLERS;
1621 root_offset += 1;
1622 }
1623
1624 r::RootElement {
1625 table: r::RootTable {
1626 ty: table_type,
1627 offset: root_table_offset as _,
1628 },
1629 descriptors,
1630 }
1631 }).collect();
1632
1633 // Ensure that we didn't reallocate!
1634 debug_assert_eq!(ranges.len(), total);
1635
1636 // TODO: error handling
1637 let (signature_raw, error) = match self.library.serialize_root_signature(
1638 native::RootSignatureVersion::V1_0,
1639 ¶meters,
1640 &[],
1641 native::RootSignatureFlags::ALLOW_IA_INPUT_LAYOUT,
1642 ) {
1643 Ok((pair, hr)) if winerror::SUCCEEDED(hr) => pair,
1644 Ok((_, hr)) => panic!("Can't serialize root signature: {:?}", hr),
1645 Err(e) => panic!("Can't find serialization function: {:?}", e),
1646 };
1647
1648 if !error.is_null() {
1649 error!(
1650 "Root signature serialization error: {:?}",
1651 error.as_c_str().to_str().unwrap()
1652 );
1653 error.destroy();
1654 }
1655
1656 // TODO: error handling
1657 let (signature, _hr) = self.raw.create_root_signature(signature_raw, 0);
1658 signature_raw.destroy();
1659
1660 Ok(r::PipelineLayout {
1661 raw: signature,
1662 constants: root_constants,
1663 elements,
1664 num_parameter_slots: parameters.len(),
1665 })
1666 }
1667
create_pipeline_cache(&self, _data: Option<&[u8]>) -> Result<(), d::OutOfMemory>1668 unsafe fn create_pipeline_cache(&self, _data: Option<&[u8]>) -> Result<(), d::OutOfMemory> {
1669 Ok(())
1670 }
1671
get_pipeline_cache_data(&self, _cache: &()) -> Result<Vec<u8>, d::OutOfMemory>1672 unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result<Vec<u8>, d::OutOfMemory> {
1673 //empty
1674 Ok(Vec::new())
1675 }
1676
destroy_pipeline_cache(&self, _: ())1677 unsafe fn destroy_pipeline_cache(&self, _: ()) {
1678 //empty
1679 }
1680
merge_pipeline_caches<I>(&self, _: &(), _: I) -> Result<(), d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<()>,1681 unsafe fn merge_pipeline_caches<I>(&self, _: &(), _: I) -> Result<(), d::OutOfMemory>
1682 where
1683 I: IntoIterator,
1684 I::Item: Borrow<()>,
1685 {
1686 //empty
1687 Ok(())
1688 }
1689
create_graphics_pipeline<'a>( &self, desc: &pso::GraphicsPipelineDesc<'a, B>, _cache: Option<&()>, ) -> Result<r::GraphicsPipeline, pso::CreationError>1690 unsafe fn create_graphics_pipeline<'a>(
1691 &self,
1692 desc: &pso::GraphicsPipelineDesc<'a, B>,
1693 _cache: Option<&()>,
1694 ) -> Result<r::GraphicsPipeline, pso::CreationError> {
1695 enum ShaderBc {
1696 Owned(native::Blob),
1697 Borrowed(native::Blob),
1698 None,
1699 }
1700 impl ShaderBc {
1701 pub fn shader(&self) -> native::Shader {
1702 match *self {
1703 ShaderBc::Owned(ref bc) | ShaderBc::Borrowed(ref bc) => {
1704 native::Shader::from_blob(*bc)
1705 }
1706 ShaderBc::None => native::Shader::null(),
1707 }
1708 }
1709 }
1710
1711 let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, B>>| {
1712 let source = match source {
1713 Some(src) => src,
1714 None => return Ok(ShaderBc::None),
1715 };
1716
1717 match Self::extract_entry_point(stage, source, desc.layout) {
1718 Ok((shader, true)) => Ok(ShaderBc::Owned(shader)),
1719 Ok((shader, false)) => Ok(ShaderBc::Borrowed(shader)),
1720 Err(err) => Err(pso::CreationError::Shader(err)),
1721 }
1722 };
1723
1724 let vs = build_shader(pso::Stage::Vertex, Some(&desc.shaders.vertex))?;
1725 let ps = build_shader(pso::Stage::Fragment, desc.shaders.fragment.as_ref())?;
1726 let gs = build_shader(pso::Stage::Geometry, desc.shaders.geometry.as_ref())?;
1727 let ds = build_shader(pso::Stage::Domain, desc.shaders.domain.as_ref())?;
1728 let hs = build_shader(pso::Stage::Hull, desc.shaders.hull.as_ref())?;
1729
1730 // Rebind vertex buffers, see native.rs for more details.
1731 let mut vertex_bindings = [None; MAX_VERTEX_BUFFERS];
1732 let mut vertex_strides = [0; MAX_VERTEX_BUFFERS];
1733
1734 for buffer in &desc.vertex_buffers {
1735 vertex_strides[buffer.binding as usize] = buffer.stride;
1736 }
1737 // Fill in identity mapping where we don't need to adjust anything.
1738 for attrib in &desc.attributes {
1739 let binding = attrib.binding as usize;
1740 let stride = vertex_strides[attrib.binding as usize];
1741 if attrib.element.offset < stride {
1742 vertex_bindings[binding] = Some(r::VertexBinding {
1743 stride: vertex_strides[attrib.binding as usize],
1744 offset: 0,
1745 mapped_binding: binding,
1746 });
1747 }
1748 }
1749
1750 // Define input element descriptions
1751 let input_element_descs = desc
1752 .attributes
1753 .iter()
1754 .filter_map(|attrib| {
1755 let buffer_desc = match desc
1756 .vertex_buffers
1757 .iter()
1758 .find(|buffer_desc| buffer_desc.binding == attrib.binding)
1759 {
1760 Some(buffer_desc) => buffer_desc,
1761 None => {
1762 error!(
1763 "Couldn't find associated vertex buffer description {:?}",
1764 attrib.binding
1765 );
1766 return Some(Err(pso::CreationError::Other));
1767 }
1768 };
1769
1770 let (slot_class, step_rate) = match buffer_desc.rate {
1771 VertexInputRate::Vertex => {
1772 (d3d12::D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0)
1773 }
1774 VertexInputRate::Instance(divisor) => {
1775 (d3d12::D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA, divisor)
1776 }
1777 };
1778 let format = attrib.element.format;
1779
1780 // Check if we need to add a new remapping in-case the offset is
1781 // higher than the vertex stride.
1782 // In this case we rebase the attribute to zero offset.
1783 let binding = attrib.binding as usize;
1784 let stride = vertex_strides[binding];
1785 let offset = attrib.element.offset;
1786 let (input_slot, offset) = if stride <= offset {
1787 // Number of input attributes may not exceed bindings, see limits.
1788 // We will always find at least one free binding.
1789 let mapping = vertex_bindings.iter().position(Option::is_none).unwrap();
1790 vertex_bindings[mapping] = Some(r::VertexBinding {
1791 stride: vertex_strides[binding],
1792 offset: offset,
1793 mapped_binding: binding,
1794 });
1795
1796 (mapping, 0)
1797 } else {
1798 (binding, offset)
1799 };
1800
1801 Some(Ok(d3d12::D3D12_INPUT_ELEMENT_DESC {
1802 SemanticName: "TEXCOORD\0".as_ptr() as *const _, // Semantic name used by SPIRV-Cross
1803 SemanticIndex: attrib.location,
1804 Format: match conv::map_format(format) {
1805 Some(fm) => fm,
1806 None => {
1807 error!("Unable to find DXGI format for {:?}", format);
1808 return Some(Err(pso::CreationError::Other));
1809 }
1810 },
1811 InputSlot: input_slot as _,
1812 AlignedByteOffset: offset,
1813 InputSlotClass: slot_class,
1814 InstanceDataStepRate: step_rate as _,
1815 }))
1816 })
1817 .collect::<Result<Vec<_>, _>>()?;
1818
1819 // TODO: check maximum number of rtvs
1820 // Get associated subpass information
1821 let pass = {
1822 let subpass = &desc.subpass;
1823 match subpass.main_pass.subpasses.get(subpass.index) {
1824 Some(subpass) => subpass,
1825 None => return Err(pso::CreationError::InvalidSubpass(subpass.index)),
1826 }
1827 };
1828
1829 // Get color attachment formats from subpass
1830 let (rtvs, num_rtvs) = {
1831 let mut rtvs = [dxgiformat::DXGI_FORMAT_UNKNOWN; 8];
1832 let mut num_rtvs = 0;
1833 for (rtv, target) in rtvs.iter_mut().zip(pass.color_attachments.iter()) {
1834 let format = desc.subpass.main_pass.attachments[target.0].format;
1835 *rtv = format
1836 .and_then(conv::map_format)
1837 .unwrap_or(dxgiformat::DXGI_FORMAT_UNKNOWN);
1838 num_rtvs += 1;
1839 }
1840 (rtvs, num_rtvs)
1841 };
1842
1843 let sample_desc = dxgitype::DXGI_SAMPLE_DESC {
1844 Count: match desc.multisampling {
1845 Some(ref ms) => ms.rasterization_samples as _,
1846 None => 1,
1847 },
1848 Quality: 0,
1849 };
1850
1851 // Setup pipeline description
1852 let pso_desc = d3d12::D3D12_GRAPHICS_PIPELINE_STATE_DESC {
1853 pRootSignature: desc.layout.raw.as_mut_ptr(),
1854 VS: *vs.shader(),
1855 PS: *ps.shader(),
1856 GS: *gs.shader(),
1857 DS: *ds.shader(),
1858 HS: *hs.shader(),
1859 StreamOutput: d3d12::D3D12_STREAM_OUTPUT_DESC {
1860 pSODeclaration: ptr::null(),
1861 NumEntries: 0,
1862 pBufferStrides: ptr::null(),
1863 NumStrides: 0,
1864 RasterizedStream: 0,
1865 },
1866 BlendState: d3d12::D3D12_BLEND_DESC {
1867 AlphaToCoverageEnable: desc.multisampling.as_ref().map_or(FALSE, |ms| {
1868 if ms.alpha_coverage {
1869 TRUE
1870 } else {
1871 FALSE
1872 }
1873 }),
1874 IndependentBlendEnable: TRUE,
1875 RenderTarget: conv::map_render_targets(&desc.blender.targets),
1876 },
1877 SampleMask: UINT::max_value(),
1878 RasterizerState: conv::map_rasterizer(&desc.rasterizer),
1879 DepthStencilState: conv::map_depth_stencil(&desc.depth_stencil),
1880 InputLayout: d3d12::D3D12_INPUT_LAYOUT_DESC {
1881 pInputElementDescs: if input_element_descs.is_empty() {
1882 ptr::null()
1883 } else {
1884 input_element_descs.as_ptr()
1885 },
1886 NumElements: input_element_descs.len() as u32,
1887 },
1888 IBStripCutValue: d3d12::D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED, // TODO
1889 PrimitiveTopologyType: conv::map_topology_type(desc.input_assembler.primitive),
1890 NumRenderTargets: num_rtvs,
1891 RTVFormats: rtvs,
1892 DSVFormat: pass
1893 .depth_stencil_attachment
1894 .and_then(|att_ref| {
1895 desc.subpass.main_pass.attachments[att_ref.0]
1896 .format
1897 .and_then(|f| conv::map_format_dsv(f.base_format().0))
1898 })
1899 .unwrap_or(dxgiformat::DXGI_FORMAT_UNKNOWN),
1900 SampleDesc: sample_desc,
1901 NodeMask: 0,
1902 CachedPSO: d3d12::D3D12_CACHED_PIPELINE_STATE {
1903 pCachedBlob: ptr::null(),
1904 CachedBlobSizeInBytes: 0,
1905 },
1906 Flags: d3d12::D3D12_PIPELINE_STATE_FLAG_NONE,
1907 };
1908
1909 let topology = conv::map_topology(&desc.input_assembler);
1910
1911 // Create PSO
1912 let mut pipeline = native::PipelineState::null();
1913 let hr = if desc.depth_stencil.depth_bounds {
1914 // The DepthBoundsTestEnable option isn't available in the original D3D12_GRAPHICS_PIPELINE_STATE_DESC struct.
1915 // Instead, we must use the newer subobject stream method.
1916 let (device2, hr) = self.raw.cast::<d3d12::ID3D12Device2>();
1917 if winerror::SUCCEEDED(hr) {
1918 let mut pss_stream = GraphicsPipelineStateSubobjectStream::new(&pso_desc, true);
1919 let pss_desc = d3d12::D3D12_PIPELINE_STATE_STREAM_DESC {
1920 SizeInBytes: mem::size_of_val(&pss_stream),
1921 pPipelineStateSubobjectStream: &mut pss_stream as *mut _ as _,
1922 };
1923 device2.CreatePipelineState(
1924 &pss_desc,
1925 &d3d12::ID3D12PipelineState::uuidof(),
1926 pipeline.mut_void(),
1927 )
1928 } else {
1929 hr
1930 }
1931 } else {
1932 self.raw.clone().CreateGraphicsPipelineState(
1933 &pso_desc,
1934 &d3d12::ID3D12PipelineState::uuidof(),
1935 pipeline.mut_void(),
1936 )
1937 };
1938
1939 let destroy_shader = |shader: ShaderBc| {
1940 if let ShaderBc::Owned(bc) = shader {
1941 bc.destroy();
1942 }
1943 };
1944
1945 destroy_shader(vs);
1946 destroy_shader(ps);
1947 destroy_shader(gs);
1948 destroy_shader(hs);
1949 destroy_shader(ds);
1950
1951 if winerror::SUCCEEDED(hr) {
1952 let mut baked_states = desc.baked_states.clone();
1953 if !desc.depth_stencil.depth_bounds {
1954 baked_states.depth_bounds = None;
1955 }
1956
1957 Ok(r::GraphicsPipeline {
1958 raw: pipeline,
1959 signature: desc.layout.raw,
1960 num_parameter_slots: desc.layout.num_parameter_slots,
1961 topology,
1962 constants: desc.layout.constants.clone(),
1963 vertex_bindings,
1964 baked_states,
1965 })
1966 } else {
1967 Err(pso::CreationError::Other)
1968 }
1969 }
1970
create_compute_pipeline<'a>( &self, desc: &pso::ComputePipelineDesc<'a, B>, _cache: Option<&()>, ) -> Result<r::ComputePipeline, pso::CreationError>1971 unsafe fn create_compute_pipeline<'a>(
1972 &self,
1973 desc: &pso::ComputePipelineDesc<'a, B>,
1974 _cache: Option<&()>,
1975 ) -> Result<r::ComputePipeline, pso::CreationError> {
1976 let (cs, cs_destroy) =
1977 Self::extract_entry_point(pso::Stage::Compute, &desc.shader, desc.layout)
1978 .map_err(|err| pso::CreationError::Shader(err))?;
1979
1980 let (pipeline, hr) = self.raw.create_compute_pipeline_state(
1981 desc.layout.raw,
1982 native::Shader::from_blob(cs),
1983 0,
1984 native::CachedPSO::null(),
1985 native::PipelineStateFlags::empty(),
1986 );
1987
1988 if cs_destroy {
1989 cs.destroy();
1990 }
1991
1992 if winerror::SUCCEEDED(hr) {
1993 Ok(r::ComputePipeline {
1994 raw: pipeline,
1995 signature: desc.layout.raw,
1996 num_parameter_slots: desc.layout.num_parameter_slots,
1997 constants: desc.layout.constants.clone(),
1998 })
1999 } else {
2000 Err(pso::CreationError::Other)
2001 }
2002 }
2003
create_framebuffer<I>( &self, _renderpass: &r::RenderPass, attachments: I, extent: image::Extent, ) -> Result<r::Framebuffer, d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<r::ImageView>,2004 unsafe fn create_framebuffer<I>(
2005 &self,
2006 _renderpass: &r::RenderPass,
2007 attachments: I,
2008 extent: image::Extent,
2009 ) -> Result<r::Framebuffer, d::OutOfMemory>
2010 where
2011 I: IntoIterator,
2012 I::Item: Borrow<r::ImageView>,
2013 {
2014 Ok(r::Framebuffer {
2015 attachments: attachments.into_iter().map(|att| *att.borrow()).collect(),
2016 layers: extent.depth as _,
2017 })
2018 }
2019
create_shader_module( &self, raw_data: &[u32], ) -> Result<r::ShaderModule, d::ShaderError>2020 unsafe fn create_shader_module(
2021 &self,
2022 raw_data: &[u32],
2023 ) -> Result<r::ShaderModule, d::ShaderError> {
2024 Ok(r::ShaderModule::Spirv(raw_data.into()))
2025 }
2026
create_buffer( &self, mut size: u64, usage: buffer::Usage, ) -> Result<r::Buffer, buffer::CreationError>2027 unsafe fn create_buffer(
2028 &self,
2029 mut size: u64,
2030 usage: buffer::Usage,
2031 ) -> Result<r::Buffer, buffer::CreationError> {
2032 if usage.contains(buffer::Usage::UNIFORM) {
2033 // Constant buffer view sizes need to be aligned.
2034 // Coupled with the offset alignment we can enforce an aligned CBV size
2035 // on descriptor updates.
2036 size = (size + 255) & !255;
2037 }
2038 if usage.contains(buffer::Usage::TRANSFER_DST) {
2039 // minimum of 1 word for the clear UAV
2040 size = size.max(4);
2041 }
2042
2043 let type_mask_shift = if self.private_caps.heterogeneous_resource_heaps {
2044 MEM_TYPE_UNIVERSAL_SHIFT
2045 } else {
2046 MEM_TYPE_BUFFER_SHIFT
2047 };
2048
2049 let requirements = memory::Requirements {
2050 size,
2051 alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
2052 type_mask: MEM_TYPE_MASK << type_mask_shift,
2053 };
2054
2055 Ok(r::Buffer::Unbound(r::BufferUnbound {
2056 requirements,
2057 usage,
2058 }))
2059 }
2060
get_buffer_requirements(&self, buffer: &r::Buffer) -> Requirements2061 unsafe fn get_buffer_requirements(&self, buffer: &r::Buffer) -> Requirements {
2062 match buffer {
2063 r::Buffer::Unbound(b) => b.requirements,
2064 r::Buffer::Bound(b) => b.requirements,
2065 }
2066 }
2067
bind_buffer_memory( &self, memory: &r::Memory, offset: u64, buffer: &mut r::Buffer, ) -> Result<(), d::BindError>2068 unsafe fn bind_buffer_memory(
2069 &self,
2070 memory: &r::Memory,
2071 offset: u64,
2072 buffer: &mut r::Buffer,
2073 ) -> Result<(), d::BindError> {
2074 let buffer_unbound = *buffer.expect_unbound();
2075 if buffer_unbound.requirements.type_mask & (1 << memory.type_id) == 0 {
2076 error!(
2077 "Bind memory failure: supported mask 0x{:x}, given id {}",
2078 buffer_unbound.requirements.type_mask, memory.type_id
2079 );
2080 return Err(d::BindError::WrongMemory);
2081 }
2082 if offset + buffer_unbound.requirements.size > memory.size {
2083 return Err(d::BindError::OutOfBounds);
2084 }
2085
2086 let mut resource = native::Resource::null();
2087 let desc = d3d12::D3D12_RESOURCE_DESC {
2088 Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
2089 Alignment: 0,
2090 Width: buffer_unbound.requirements.size,
2091 Height: 1,
2092 DepthOrArraySize: 1,
2093 MipLevels: 1,
2094 Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
2095 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
2096 Count: 1,
2097 Quality: 0,
2098 },
2099 Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
2100 Flags: conv::map_buffer_flags(buffer_unbound.usage),
2101 };
2102
2103 assert_eq!(
2104 winerror::S_OK,
2105 self.raw.clone().CreatePlacedResource(
2106 memory.heap.as_mut_ptr(),
2107 offset,
2108 &desc,
2109 d3d12::D3D12_RESOURCE_STATE_COMMON,
2110 ptr::null(),
2111 &d3d12::ID3D12Resource::uuidof(),
2112 resource.mut_void(),
2113 )
2114 );
2115
2116 let clear_uav = if buffer_unbound.usage.contains(buffer::Usage::TRANSFER_DST) {
2117 let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
2118 let mut view_desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC {
2119 Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS,
2120 ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER,
2121 u: mem::zeroed(),
2122 };
2123
2124 *view_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV {
2125 FirstElement: 0,
2126 NumElements: (buffer_unbound.requirements.size / 4) as _,
2127 StructureByteStride: 0,
2128 CounterOffsetInBytes: 0,
2129 Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW,
2130 };
2131
2132 self.raw.CreateUnorderedAccessView(
2133 resource.as_mut_ptr(),
2134 ptr::null_mut(),
2135 &view_desc,
2136 handle,
2137 );
2138 Some(handle)
2139 } else {
2140 None
2141 };
2142
2143 *buffer = r::Buffer::Bound(r::BufferBound {
2144 resource,
2145 requirements: buffer_unbound.requirements,
2146 clear_uav,
2147 });
2148
2149 Ok(())
2150 }
2151
create_buffer_view<R: RangeArg<u64>>( &self, buffer: &r::Buffer, format: Option<format::Format>, range: R, ) -> Result<r::BufferView, buffer::ViewCreationError>2152 unsafe fn create_buffer_view<R: RangeArg<u64>>(
2153 &self,
2154 buffer: &r::Buffer,
2155 format: Option<format::Format>,
2156 range: R,
2157 ) -> Result<r::BufferView, buffer::ViewCreationError> {
2158 let buffer = buffer.expect_bound();
2159 let buffer_features = {
2160 let idx = format.map(|fmt| fmt as usize).unwrap_or(0);
2161 self.format_properties.get(idx).properties.buffer_features
2162 };
2163 let (format, format_desc) = match format.and_then(conv::map_format) {
2164 Some(fmt) => (fmt, format.unwrap().surface_desc()),
2165 None => return Err(buffer::ViewCreationError::UnsupportedFormat { format }),
2166 };
2167
2168 let start = *range.start().unwrap_or(&0);
2169 let end = *range.end().unwrap_or(&(buffer.requirements.size as _));
2170
2171 let bytes_per_texel = (format_desc.bits / 8) as u64;
2172 // Check if it adheres to the texel buffer offset limit
2173 assert_eq!(start % bytes_per_texel, 0);
2174 let first_element = start / bytes_per_texel;
2175 let num_elements = (end - start) / bytes_per_texel; // rounds down to next smaller size
2176
2177 let handle_srv = if buffer_features.contains(format::BufferFeature::UNIFORM_TEXEL) {
2178 let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC {
2179 Format: format,
2180 ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER,
2181 Shader4ComponentMapping: IDENTITY_MAPPING,
2182 u: mem::zeroed(),
2183 };
2184
2185 *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV {
2186 FirstElement: first_element,
2187 NumElements: num_elements as _,
2188 StructureByteStride: bytes_per_texel as _,
2189 Flags: d3d12::D3D12_BUFFER_SRV_FLAG_NONE,
2190 };
2191
2192 let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
2193 self.raw
2194 .clone()
2195 .CreateShaderResourceView(buffer.resource.as_mut_ptr(), &desc, handle);
2196 handle
2197 } else {
2198 d3d12::D3D12_CPU_DESCRIPTOR_HANDLE { ptr: 0 }
2199 };
2200
2201 let handle_uav = if buffer_features.intersects(
2202 format::BufferFeature::STORAGE_TEXEL | format::BufferFeature::STORAGE_TEXEL_ATOMIC,
2203 ) {
2204 let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC {
2205 Format: format,
2206 ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER,
2207 u: mem::zeroed(),
2208 };
2209
2210 *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV {
2211 FirstElement: first_element,
2212 NumElements: num_elements as _,
2213 StructureByteStride: bytes_per_texel as _,
2214 Flags: d3d12::D3D12_BUFFER_UAV_FLAG_NONE,
2215 CounterOffsetInBytes: 0,
2216 };
2217
2218 let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
2219 self.raw.clone().CreateUnorderedAccessView(
2220 buffer.resource.as_mut_ptr(),
2221 ptr::null_mut(),
2222 &desc,
2223 handle,
2224 );
2225 handle
2226 } else {
2227 d3d12::D3D12_CPU_DESCRIPTOR_HANDLE { ptr: 0 }
2228 };
2229
2230 return Ok(r::BufferView {
2231 handle_srv,
2232 handle_uav,
2233 });
2234 }
2235
create_image( &self, kind: image::Kind, mip_levels: image::Level, format: format::Format, tiling: image::Tiling, usage: image::Usage, view_caps: image::ViewCapabilities, ) -> Result<r::Image, image::CreationError>2236 unsafe fn create_image(
2237 &self,
2238 kind: image::Kind,
2239 mip_levels: image::Level,
2240 format: format::Format,
2241 tiling: image::Tiling,
2242 usage: image::Usage,
2243 view_caps: image::ViewCapabilities,
2244 ) -> Result<r::Image, image::CreationError> {
2245 assert!(mip_levels <= kind.num_levels());
2246
2247 let base_format = format.base_format();
2248 let format_desc = base_format.0.desc();
2249 let bytes_per_block = (format_desc.bits / 8) as _;
2250 let block_dim = format_desc.dim;
2251 let extent = kind.extent();
2252
2253 let format_info = self.format_properties.get(format as usize);
2254 let (layout, features) = match tiling {
2255 image::Tiling::Optimal => (
2256 d3d12::D3D12_TEXTURE_LAYOUT_UNKNOWN,
2257 format_info.properties.optimal_tiling,
2258 ),
2259 image::Tiling::Linear => (
2260 d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
2261 format_info.properties.linear_tiling,
2262 ),
2263 };
2264 if format_info.sample_count_mask & kind.num_samples() == 0 {
2265 return Err(image::CreationError::Samples(kind.num_samples()));
2266 }
2267
2268 let desc = d3d12::D3D12_RESOURCE_DESC {
2269 Dimension: match kind {
2270 image::Kind::D1(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE1D,
2271 image::Kind::D2(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D,
2272 image::Kind::D3(..) => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE3D,
2273 },
2274 Alignment: 0,
2275 Width: extent.width as _,
2276 Height: extent.height as _,
2277 DepthOrArraySize: if extent.depth > 1 {
2278 extent.depth as _
2279 } else {
2280 kind.num_layers() as _
2281 },
2282 MipLevels: mip_levels as _,
2283 Format: match conv::map_surface_type(base_format.0) {
2284 Some(format) => format,
2285 None => return Err(image::CreationError::Format(format)),
2286 },
2287 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
2288 Count: kind.num_samples() as _,
2289 Quality: 0,
2290 },
2291 Layout: layout,
2292 Flags: conv::map_image_flags(usage, features),
2293 };
2294
2295 let alloc_info = self.raw.clone().GetResourceAllocationInfo(0, 1, &desc);
2296
2297 // Image usages which require RT/DS heap due to internal implementation.
2298 let target_usage = image::Usage::COLOR_ATTACHMENT
2299 | image::Usage::DEPTH_STENCIL_ATTACHMENT
2300 | image::Usage::TRANSFER_DST;
2301
2302 let type_mask_shift = if self.private_caps.heterogeneous_resource_heaps {
2303 MEM_TYPE_UNIVERSAL_SHIFT
2304 } else if usage.intersects(target_usage) {
2305 MEM_TYPE_TARGET_SHIFT
2306 } else {
2307 MEM_TYPE_IMAGE_SHIFT
2308 };
2309
2310 Ok(r::Image::Unbound(r::ImageUnbound {
2311 view_format: conv::map_format(format),
2312 dsv_format: conv::map_format_dsv(base_format.0),
2313 desc,
2314 requirements: memory::Requirements {
2315 size: alloc_info.SizeInBytes,
2316 alignment: alloc_info.Alignment,
2317 type_mask: MEM_TYPE_MASK << type_mask_shift,
2318 },
2319 format,
2320 kind,
2321 usage,
2322 tiling,
2323 view_caps,
2324 bytes_per_block,
2325 block_dim,
2326 }))
2327 }
2328
get_image_requirements(&self, image: &r::Image) -> Requirements2329 unsafe fn get_image_requirements(&self, image: &r::Image) -> Requirements {
2330 match image {
2331 r::Image::Bound(i) => i.requirements,
2332 r::Image::Unbound(i) => i.requirements,
2333 }
2334 }
2335
get_image_subresource_footprint( &self, image: &r::Image, sub: image::Subresource, ) -> image::SubresourceFootprint2336 unsafe fn get_image_subresource_footprint(
2337 &self,
2338 image: &r::Image,
2339 sub: image::Subresource,
2340 ) -> image::SubresourceFootprint {
2341 let mut num_rows = 0;
2342 let mut total_bytes = 0;
2343 let _desc = match image {
2344 r::Image::Bound(i) => i.descriptor,
2345 r::Image::Unbound(i) => i.desc,
2346 };
2347 let footprint = {
2348 let mut footprint = mem::zeroed();
2349 self.raw.GetCopyableFootprints(
2350 image.get_desc(),
2351 image.calc_subresource(sub.level as _, sub.layer as _, 0),
2352 1,
2353 0,
2354 &mut footprint,
2355 &mut num_rows,
2356 ptr::null_mut(), // row size in bytes
2357 &mut total_bytes,
2358 );
2359 footprint
2360 };
2361
2362 let depth_pitch = (footprint.Footprint.RowPitch * num_rows) as buffer::Offset;
2363 let array_pitch = footprint.Footprint.Depth as buffer::Offset * depth_pitch;
2364 image::SubresourceFootprint {
2365 slice: footprint.Offset .. footprint.Offset + total_bytes,
2366 row_pitch: footprint.Footprint.RowPitch as _,
2367 depth_pitch,
2368 array_pitch,
2369 }
2370 }
2371
bind_image_memory( &self, memory: &r::Memory, offset: u64, image: &mut r::Image, ) -> Result<(), d::BindError>2372 unsafe fn bind_image_memory(
2373 &self,
2374 memory: &r::Memory,
2375 offset: u64,
2376 image: &mut r::Image,
2377 ) -> Result<(), d::BindError> {
2378 use self::image::Usage;
2379
2380 let image_unbound = *image.expect_unbound();
2381 if image_unbound.requirements.type_mask & (1 << memory.type_id) == 0 {
2382 error!(
2383 "Bind memory failure: supported mask 0x{:x}, given id {}",
2384 image_unbound.requirements.type_mask, memory.type_id
2385 );
2386 return Err(d::BindError::WrongMemory);
2387 }
2388 if offset + image_unbound.requirements.size > memory.size {
2389 return Err(d::BindError::OutOfBounds);
2390 }
2391
2392 let mut resource = native::Resource::null();
2393 let num_layers = image_unbound.kind.num_layers();
2394
2395 assert_eq!(
2396 winerror::S_OK,
2397 self.raw.clone().CreatePlacedResource(
2398 memory.heap.as_mut_ptr(),
2399 offset,
2400 &image_unbound.desc,
2401 d3d12::D3D12_RESOURCE_STATE_COMMON,
2402 ptr::null(),
2403 &d3d12::ID3D12Resource::uuidof(),
2404 resource.mut_void(),
2405 )
2406 );
2407
2408 let info = ViewInfo {
2409 resource,
2410 kind: image_unbound.kind,
2411 caps: image::ViewCapabilities::empty(),
2412 view_kind: match image_unbound.kind {
2413 image::Kind::D1(..) => image::ViewKind::D1Array,
2414 image::Kind::D2(..) => image::ViewKind::D2Array,
2415 image::Kind::D3(..) => image::ViewKind::D3,
2416 },
2417 format: image_unbound.desc.Format,
2418 component_mapping: IDENTITY_MAPPING,
2419 range: image::SubresourceRange {
2420 aspects: Aspects::empty(),
2421 levels: 0 .. 0,
2422 layers: 0 .. 0,
2423 },
2424 };
2425
2426 //TODO: the clear_Xv is incomplete. We should support clearing images created without XXX_ATTACHMENT usage.
2427 // for this, we need to check the format and force the `RENDER_TARGET` flag behind the user's back
2428 // if the format supports being rendered into, allowing us to create clear_Xv
2429 let format_properties = self
2430 .format_properties
2431 .get(image_unbound.format as usize)
2432 .properties;
2433 let props = match image_unbound.tiling {
2434 image::Tiling::Optimal => format_properties.optimal_tiling,
2435 image::Tiling::Linear => format_properties.linear_tiling,
2436 };
2437 let can_clear_color = image_unbound
2438 .usage
2439 .intersects(Usage::TRANSFER_DST | Usage::COLOR_ATTACHMENT)
2440 && props.contains(format::ImageFeature::COLOR_ATTACHMENT);
2441 let can_clear_depth = image_unbound
2442 .usage
2443 .intersects(Usage::TRANSFER_DST | Usage::DEPTH_STENCIL_ATTACHMENT)
2444 && props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT);
2445 let aspects = image_unbound.format.surface_desc().aspects;
2446
2447 *image = r::Image::Bound(r::ImageBound {
2448 resource: resource,
2449 place: r::Place::Heap {
2450 raw: memory.heap.clone(),
2451 offset,
2452 },
2453 surface_type: image_unbound.format.base_format().0,
2454 kind: image_unbound.kind,
2455 usage: image_unbound.usage,
2456 default_view_format: image_unbound.view_format,
2457 view_caps: image_unbound.view_caps,
2458 descriptor: image_unbound.desc,
2459 bytes_per_block: image_unbound.bytes_per_block,
2460 block_dim: image_unbound.block_dim,
2461 clear_cv: if aspects.contains(Aspects::COLOR) && can_clear_color {
2462 let format = image_unbound.view_format.unwrap();
2463 (0 .. num_layers)
2464 .map(|layer| {
2465 self.view_image_as_render_target(ViewInfo {
2466 format,
2467 range: image::SubresourceRange {
2468 aspects: Aspects::COLOR,
2469 levels: 0 .. 1, //TODO?
2470 layers: layer .. layer + 1,
2471 },
2472 ..info.clone()
2473 })
2474 .unwrap()
2475 })
2476 .collect()
2477 } else {
2478 Vec::new()
2479 },
2480 clear_dv: if aspects.contains(Aspects::DEPTH) && can_clear_depth {
2481 let format = image_unbound.dsv_format.unwrap();
2482 (0 .. num_layers)
2483 .map(|layer| {
2484 self.view_image_as_depth_stencil(ViewInfo {
2485 format,
2486 range: image::SubresourceRange {
2487 aspects: Aspects::DEPTH,
2488 levels: 0 .. 1, //TODO?
2489 layers: layer .. layer + 1,
2490 },
2491 ..info.clone()
2492 })
2493 .unwrap()
2494 })
2495 .collect()
2496 } else {
2497 Vec::new()
2498 },
2499 clear_sv: if aspects.contains(Aspects::STENCIL) && can_clear_depth {
2500 let format = image_unbound.dsv_format.unwrap();
2501 (0 .. num_layers)
2502 .map(|layer| {
2503 self.view_image_as_depth_stencil(ViewInfo {
2504 format,
2505 range: image::SubresourceRange {
2506 aspects: Aspects::STENCIL,
2507 levels: 0 .. 1, //TODO?
2508 layers: layer .. layer + 1,
2509 },
2510 ..info.clone()
2511 })
2512 .unwrap()
2513 })
2514 .collect()
2515 } else {
2516 Vec::new()
2517 },
2518 requirements: image_unbound.requirements,
2519 });
2520
2521 Ok(())
2522 }
2523
create_image_view( &self, image: &r::Image, view_kind: image::ViewKind, format: format::Format, swizzle: format::Swizzle, range: image::SubresourceRange, ) -> Result<r::ImageView, image::ViewError>2524 unsafe fn create_image_view(
2525 &self,
2526 image: &r::Image,
2527 view_kind: image::ViewKind,
2528 format: format::Format,
2529 swizzle: format::Swizzle,
2530 range: image::SubresourceRange,
2531 ) -> Result<r::ImageView, image::ViewError> {
2532 let image = image.expect_bound();
2533 let is_array = image.kind.num_layers() > 1;
2534 let mip_levels = (range.levels.start, range.levels.end);
2535 let layers = (range.layers.start, range.layers.end);
2536
2537 let info = ViewInfo {
2538 resource: image.resource,
2539 kind: image.kind,
2540 caps: image.view_caps,
2541 // D3D12 doesn't allow looking at a single slice of an array as a non-array
2542 view_kind: if is_array && view_kind == image::ViewKind::D2 {
2543 image::ViewKind::D2Array
2544 } else if is_array && view_kind == image::ViewKind::D1 {
2545 image::ViewKind::D1Array
2546 } else {
2547 view_kind
2548 },
2549 format: conv::map_format(format).ok_or(image::ViewError::BadFormat(format))?,
2550 component_mapping: conv::map_swizzle(swizzle),
2551 range,
2552 };
2553
2554 //Note: we allow RTV/DSV/SRV/UAV views to fail to be created here,
2555 // because we don't know if the user will even need to use them.
2556
2557 Ok(r::ImageView {
2558 resource: image.resource,
2559 handle_srv: if image
2560 .usage
2561 .intersects(image::Usage::SAMPLED | image::Usage::INPUT_ATTACHMENT)
2562 {
2563 self.view_image_as_shader_resource(info.clone()).ok()
2564 } else {
2565 None
2566 },
2567 handle_rtv: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) {
2568 self.view_image_as_render_target(info.clone()).ok()
2569 } else {
2570 None
2571 },
2572 handle_uav: if image.usage.contains(image::Usage::STORAGE) {
2573 self.view_image_as_storage(info.clone()).ok()
2574 } else {
2575 None
2576 },
2577 handle_dsv: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) {
2578 match conv::map_format_dsv(format.base_format().0) {
2579 Some(dsv_format) => self
2580 .view_image_as_depth_stencil(ViewInfo {
2581 format: dsv_format,
2582 ..info
2583 })
2584 .ok(),
2585 None => None,
2586 }
2587 } else {
2588 None
2589 },
2590 dxgi_format: image.default_view_format.unwrap(),
2591 num_levels: image.descriptor.MipLevels as image::Level,
2592 mip_levels,
2593 layers,
2594 kind: info.kind,
2595 })
2596 }
2597
create_sampler( &self, info: &image::SamplerDesc, ) -> Result<r::Sampler, d::AllocationError>2598 unsafe fn create_sampler(
2599 &self,
2600 info: &image::SamplerDesc,
2601 ) -> Result<r::Sampler, d::AllocationError> {
2602 assert!(info.normalized);
2603 let handle = self.sampler_pool.lock().unwrap().alloc_handle();
2604
2605 let op = match info.comparison {
2606 Some(_) => d3d12::D3D12_FILTER_REDUCTION_TYPE_COMPARISON,
2607 None => d3d12::D3D12_FILTER_REDUCTION_TYPE_STANDARD,
2608 };
2609 self.raw.create_sampler(
2610 handle,
2611 conv::map_filter(
2612 info.mag_filter,
2613 info.min_filter,
2614 info.mip_filter,
2615 op,
2616 info.anisotropic,
2617 ),
2618 [
2619 conv::map_wrap(info.wrap_mode.0),
2620 conv::map_wrap(info.wrap_mode.1),
2621 conv::map_wrap(info.wrap_mode.2),
2622 ],
2623 info.lod_bias.0,
2624 match info.anisotropic {
2625 image::Anisotropic::On(max) => max as _, // TODO: check support here?
2626 image::Anisotropic::Off => 0,
2627 },
2628 conv::map_comparison(info.comparison.unwrap_or(pso::Comparison::Always)),
2629 info.border.into(),
2630 info.lod_range.start.0 .. info.lod_range.end.0,
2631 );
2632
2633 Ok(r::Sampler { handle })
2634 }
2635
create_descriptor_pool<I>( &self, max_sets: usize, descriptor_pools: I, _flags: pso::DescriptorPoolCreateFlags, ) -> Result<r::DescriptorPool, d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<pso::DescriptorRangeDesc>,2636 unsafe fn create_descriptor_pool<I>(
2637 &self,
2638 max_sets: usize,
2639 descriptor_pools: I,
2640 _flags: pso::DescriptorPoolCreateFlags,
2641 ) -> Result<r::DescriptorPool, d::OutOfMemory>
2642 where
2643 I: IntoIterator,
2644 I::Item: Borrow<pso::DescriptorRangeDesc>,
2645 {
2646 // Descriptor pools are implemented as slices of the global descriptor heaps.
2647 // A descriptor pool will occupy a contiguous space in each heap (CBV/SRV/UAV and Sampler) depending
2648 // on the total requested amount of descriptors.
2649
2650 let mut num_srv_cbv_uav = 0;
2651 let mut num_samplers = 0;
2652
2653 let descriptor_pools = descriptor_pools
2654 .into_iter()
2655 .map(|desc| *desc.borrow())
2656 .collect::<Vec<_>>();
2657
2658 info!("create_descriptor_pool with {} max sets", max_sets);
2659 for desc in &descriptor_pools {
2660 let content = r::DescriptorContent::from(desc.ty);
2661 debug!("\tcontent {:?}", content);
2662 if content.contains(r::DescriptorContent::CBV) {
2663 num_srv_cbv_uav += desc.count;
2664 }
2665 if content.contains(r::DescriptorContent::SRV) {
2666 num_srv_cbv_uav += desc.count;
2667 }
2668 if content.contains(r::DescriptorContent::UAV) {
2669 num_srv_cbv_uav += desc.count;
2670 }
2671 if content.contains(r::DescriptorContent::SAMPLER) {
2672 num_samplers += desc.count;
2673 }
2674 }
2675
2676 info!(
2677 "total {} views and {} samplers",
2678 num_srv_cbv_uav, num_samplers
2679 );
2680
2681 // Allocate slices of the global GPU descriptor heaps.
2682 let heap_srv_cbv_uav = {
2683 let mut heap_srv_cbv_uav = self.heap_srv_cbv_uav.lock().unwrap();
2684
2685 let range = match num_srv_cbv_uav {
2686 0 => 0 .. 0,
2687 _ => heap_srv_cbv_uav
2688 .range_allocator
2689 .allocate_range(num_srv_cbv_uav as _)
2690 .unwrap(), // TODO: error/resize
2691 };
2692
2693 r::DescriptorHeapSlice {
2694 heap: heap_srv_cbv_uav.raw.clone(),
2695 handle_size: heap_srv_cbv_uav.handle_size as _,
2696 range_allocator: RangeAllocator::new(range),
2697 start: heap_srv_cbv_uav.start,
2698 }
2699 };
2700
2701 let heap_sampler = {
2702 let mut heap_sampler = self.heap_sampler.lock().unwrap();
2703
2704 let range = match num_samplers {
2705 0 => 0 .. 0,
2706 _ => heap_sampler
2707 .range_allocator
2708 .allocate_range(num_samplers as _)
2709 .unwrap(), // TODO: error/resize
2710 };
2711
2712 r::DescriptorHeapSlice {
2713 heap: heap_sampler.raw.clone(),
2714 handle_size: heap_sampler.handle_size as _,
2715 range_allocator: RangeAllocator::new(range),
2716 start: heap_sampler.start,
2717 }
2718 };
2719
2720 Ok(r::DescriptorPool {
2721 heap_srv_cbv_uav,
2722 heap_sampler,
2723 pools: descriptor_pools,
2724 max_size: max_sets as _,
2725 })
2726 }
2727
create_descriptor_set_layout<I, J>( &self, bindings: I, _immutable_samplers: J, ) -> Result<r::DescriptorSetLayout, d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<pso::DescriptorSetLayoutBinding>, J: IntoIterator, J::Item: Borrow<r::Sampler>,2728 unsafe fn create_descriptor_set_layout<I, J>(
2729 &self,
2730 bindings: I,
2731 _immutable_samplers: J,
2732 ) -> Result<r::DescriptorSetLayout, d::OutOfMemory>
2733 where
2734 I: IntoIterator,
2735 I::Item: Borrow<pso::DescriptorSetLayoutBinding>,
2736 J: IntoIterator,
2737 J::Item: Borrow<r::Sampler>,
2738 {
2739 Ok(r::DescriptorSetLayout {
2740 bindings: bindings.into_iter().map(|b| b.borrow().clone()).collect(),
2741 })
2742 }
2743
write_descriptor_sets<'a, I, J>(&self, write_iter: I) where I: IntoIterator<Item = pso::DescriptorSetWrite<'a, B, J>>, J: IntoIterator, J::Item: Borrow<pso::Descriptor<'a, B>>,2744 unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I)
2745 where
2746 I: IntoIterator<Item = pso::DescriptorSetWrite<'a, B, J>>,
2747 J: IntoIterator,
2748 J::Item: Borrow<pso::Descriptor<'a, B>>,
2749 {
2750 let mut descriptor_update_pools = self.descriptor_update_pools.lock().unwrap();
2751 let mut update_pool_index = 0;
2752
2753 //TODO: combine destination ranges
2754 let mut dst_samplers = Vec::new();
2755 let mut dst_views = Vec::new();
2756 let mut src_samplers = Vec::new();
2757 let mut src_views = Vec::new();
2758 let mut num_samplers = Vec::new();
2759 let mut num_views = Vec::new();
2760 debug!("write_descriptor_sets");
2761
2762 for write in write_iter {
2763 let mut offset = write.array_offset as u64;
2764 let mut target_binding = write.binding as usize;
2765 let mut bind_info = &write.set.binding_infos[target_binding];
2766 debug!(
2767 "\t{:?} binding {} array offset {}",
2768 bind_info, target_binding, offset
2769 );
2770 for descriptor in write.descriptors {
2771 // spill over the writes onto the next binding
2772 while offset >= bind_info.count {
2773 assert_eq!(offset, bind_info.count);
2774 target_binding += 1;
2775 bind_info = &write.set.binding_infos[target_binding];
2776 offset = 0;
2777 }
2778 let mut src_cbv = None;
2779 let mut src_srv = None;
2780 let mut src_uav = None;
2781 let mut src_sampler = None;
2782
2783 match *descriptor.borrow() {
2784 pso::Descriptor::Buffer(buffer, ref range) => {
2785 let buffer = buffer.expect_bound();
2786
2787 if bind_info.content.is_dynamic() {
2788 // Root Descriptor
2789 let buffer_offset = range.start.unwrap_or(0);
2790 let buffer_address = (*buffer.resource).GetGPUVirtualAddress();
2791
2792 // Descriptor sets need to be externally synchronized according to specification
2793 let dynamic_descriptors = &mut *bind_info.dynamic_descriptors.get();
2794 dynamic_descriptors[offset as usize].gpu_buffer_location = buffer_address + buffer_offset;
2795 } else {
2796 // Descriptor table
2797 if update_pool_index == descriptor_update_pools.len() {
2798 let max_size = 1u64 << 12; //arbitrary
2799 descriptor_update_pools.push(descriptors_cpu::HeapLinear::new(
2800 self.raw,
2801 native::DescriptorHeapType::CbvSrvUav,
2802 max_size as _,
2803 ));
2804 }
2805 let mut heap = descriptor_update_pools.last_mut().unwrap();
2806 let start = range.start.unwrap_or(0);
2807 let end = range.end.unwrap_or(buffer.requirements.size as _);
2808
2809 if bind_info.content.contains(r::DescriptorContent::CBV) {
2810 // Making the size field of buffer requirements for uniform
2811 // buffers a multiple of 256 and setting the required offset
2812 // alignment to 256 allows us to patch the size here.
2813 // We can always enforce the size to be aligned to 256 for
2814 // CBVs without going out-of-bounds.
2815 let size = ((end - start) + 255) & !255;
2816 let desc = d3d12::D3D12_CONSTANT_BUFFER_VIEW_DESC {
2817 BufferLocation: (*buffer.resource).GetGPUVirtualAddress() + start,
2818 SizeInBytes: size as _,
2819 };
2820 let handle = heap.alloc_handle();
2821 self.raw.CreateConstantBufferView(&desc, handle);
2822 src_cbv = Some(handle);
2823 }
2824 if bind_info.content.contains(r::DescriptorContent::SRV) {
2825 assert_eq!((end - start) % 4, 0);
2826 let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC {
2827 Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS,
2828 Shader4ComponentMapping: IDENTITY_MAPPING,
2829 ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER,
2830 u: mem::zeroed(),
2831 };
2832 *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV {
2833 FirstElement: start as _,
2834 NumElements: ((end - start) / 4) as _,
2835 StructureByteStride: 0,
2836 Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW,
2837 };
2838 let handle = heap.alloc_handle();
2839 self.raw.CreateShaderResourceView(
2840 buffer.resource.as_mut_ptr(),
2841 &desc,
2842 handle,
2843 );
2844 src_srv = Some(handle);
2845 }
2846 if bind_info.content.contains(r::DescriptorContent::UAV) {
2847 assert_eq!((end - start) % 4, 0);
2848 let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC {
2849 Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS,
2850 ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER,
2851 u: mem::zeroed(),
2852 };
2853 *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV {
2854 FirstElement: start as _,
2855 NumElements: ((end - start) / 4) as _,
2856 StructureByteStride: 0,
2857 CounterOffsetInBytes: 0,
2858 Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW,
2859 };
2860 if heap.is_full() {
2861 // pool is full, move to the next one
2862 update_pool_index += 1;
2863 let max_size = 1u64 << 12; //arbitrary
2864 descriptor_update_pools.push(descriptors_cpu::HeapLinear::new(
2865 self.raw,
2866 native::DescriptorHeapType::CbvSrvUav,
2867 max_size as _,
2868 ));
2869 heap = descriptor_update_pools.last_mut().unwrap();
2870 }
2871 let handle = heap.alloc_handle();
2872 self.raw.CreateUnorderedAccessView(
2873 buffer.resource.as_mut_ptr(),
2874 ptr::null_mut(),
2875 &desc,
2876 handle,
2877 );
2878 src_uav = Some(handle);
2879 }
2880
2881 // always leave this block of code prepared
2882 if heap.is_full() {
2883 // pool is full, move to the next one
2884 update_pool_index += 1;
2885 }
2886 }
2887 }
2888 pso::Descriptor::Image(image, _layout) => {
2889 if bind_info.content.contains(r::DescriptorContent::SRV) {
2890 src_srv = image.handle_srv;
2891 }
2892 if bind_info.content.contains(r::DescriptorContent::UAV) {
2893 src_uav = image.handle_uav;
2894 }
2895 }
2896 pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => {
2897 src_srv = image.handle_srv;
2898 src_sampler = Some(sampler.handle);
2899 }
2900 pso::Descriptor::Sampler(sampler) => {
2901 src_sampler = Some(sampler.handle);
2902 }
2903 pso::Descriptor::UniformTexelBuffer(buffer_view) => {
2904 let handle = buffer_view.handle_srv;
2905 src_srv = Some(handle);
2906 if handle.ptr == 0 {
2907 error!("SRV handle of the uniform texel buffer is zero (not supported by specified format).");
2908 }
2909 }
2910 pso::Descriptor::StorageTexelBuffer(buffer_view) => {
2911 if bind_info.content.contains(r::DescriptorContent::SRV) {
2912 let handle = buffer_view.handle_srv;
2913 src_srv = Some(handle);
2914 if handle.ptr == 0 {
2915 error!("SRV handle of the storage texel buffer is zero (not supported by specified format).");
2916 }
2917 }
2918 if bind_info.content.contains(r::DescriptorContent::UAV) {
2919 let handle = buffer_view.handle_uav;
2920 src_uav = Some(handle);
2921 if handle.ptr == 0 {
2922 error!("UAV handle of the storage texel buffer is zero (not supported by specified format).");
2923 }
2924 }
2925 }
2926 }
2927
2928 if let Some(handle) = src_cbv {
2929 trace!("\tcbv offset {}", offset);
2930 src_views.push(handle);
2931 dst_views.push(bind_info.view_range.as_ref().unwrap().at(offset));
2932 num_views.push(1);
2933 }
2934 if let Some(handle) = src_srv {
2935 trace!("\tsrv offset {}", offset);
2936 src_views.push(handle);
2937 dst_views.push(bind_info.view_range.as_ref().unwrap().at(offset));
2938 num_views.push(1);
2939 }
2940 if let Some(handle) = src_uav {
2941 let uav_offset = if bind_info.content.contains(r::DescriptorContent::SRV) {
2942 bind_info.count + offset
2943 } else {
2944 offset
2945 };
2946 trace!("\tuav offset {}", uav_offset);
2947 src_views.push(handle);
2948 dst_views.push(bind_info.view_range.as_ref().unwrap().at(uav_offset));
2949 num_views.push(1);
2950 }
2951 if let Some(handle) = src_sampler {
2952 trace!("\tsampler offset {}", offset);
2953 src_samplers.push(handle);
2954 dst_samplers.push(bind_info.sampler_range.as_ref().unwrap().at(offset));
2955 num_samplers.push(1);
2956 }
2957
2958 offset += 1;
2959 }
2960 }
2961
2962 if !num_views.is_empty() {
2963 self.raw.clone().CopyDescriptors(
2964 dst_views.len() as u32,
2965 dst_views.as_ptr(),
2966 num_views.as_ptr(),
2967 src_views.len() as u32,
2968 src_views.as_ptr(),
2969 num_views.as_ptr(),
2970 d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
2971 );
2972 }
2973 if !num_samplers.is_empty() {
2974 self.raw.clone().CopyDescriptors(
2975 dst_samplers.len() as u32,
2976 dst_samplers.as_ptr(),
2977 num_samplers.as_ptr(),
2978 src_samplers.len() as u32,
2979 src_samplers.as_ptr(),
2980 num_samplers.as_ptr(),
2981 d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
2982 );
2983 }
2984
2985 // reset the temporary CPU-size descriptor pools
2986 for buffer_desc_pool in descriptor_update_pools.iter_mut() {
2987 buffer_desc_pool.clear();
2988 }
2989 }
2990
copy_descriptor_sets<'a, I>(&self, copy_iter: I) where I: IntoIterator, I::Item: Borrow<pso::DescriptorSetCopy<'a, B>>,2991 unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I)
2992 where
2993 I: IntoIterator,
2994 I::Item: Borrow<pso::DescriptorSetCopy<'a, B>>,
2995 {
2996 let mut dst_samplers = Vec::new();
2997 let mut dst_views = Vec::new();
2998 let mut src_samplers = Vec::new();
2999 let mut src_views = Vec::new();
3000 let mut num_samplers = Vec::new();
3001 let mut num_views = Vec::new();
3002
3003 for copy_wrap in copy_iter {
3004 let copy = copy_wrap.borrow();
3005 let src_info = ©.src_set.binding_infos[copy.src_binding as usize];
3006 let dst_info = ©.dst_set.binding_infos[copy.dst_binding as usize];
3007 if let (Some(src_range), Some(dst_range)) =
3008 (src_info.view_range.as_ref(), dst_info.view_range.as_ref())
3009 {
3010 assert!(copy.src_array_offset + copy.count <= src_range.count as usize);
3011 assert!(copy.dst_array_offset + copy.count <= dst_range.count as usize);
3012 src_views.push(src_range.at(copy.src_array_offset as _));
3013 dst_views.push(dst_range.at(copy.dst_array_offset as _));
3014 num_views.push(copy.count as u32);
3015
3016 if (src_info.content & dst_info.content)
3017 .contains(r::DescriptorContent::SRV | r::DescriptorContent::UAV)
3018 {
3019 assert!(
3020 src_info.count as usize + copy.src_array_offset + copy.count
3021 <= src_range.count as usize
3022 );
3023 assert!(
3024 dst_info.count as usize + copy.dst_array_offset + copy.count
3025 <= dst_range.count as usize
3026 );
3027 src_views.push(src_range.at(src_info.count + copy.src_array_offset as u64));
3028 dst_views.push(dst_range.at(dst_info.count + copy.dst_array_offset as u64));
3029 num_views.push(copy.count as u32);
3030 }
3031 }
3032 if let (Some(src_range), Some(dst_range)) = (
3033 src_info.sampler_range.as_ref(),
3034 dst_info.sampler_range.as_ref(),
3035 ) {
3036 assert!(copy.src_array_offset + copy.count <= src_range.count as usize);
3037 assert!(copy.dst_array_offset + copy.count <= dst_range.count as usize);
3038 src_samplers.push(src_range.at(copy.src_array_offset as _));
3039 dst_samplers.push(dst_range.at(copy.dst_array_offset as _));
3040 num_samplers.push(copy.count as u32);
3041 }
3042 }
3043
3044 if !num_views.is_empty() {
3045 self.raw.clone().CopyDescriptors(
3046 dst_views.len() as u32,
3047 dst_views.as_ptr(),
3048 num_views.as_ptr(),
3049 src_views.len() as u32,
3050 src_views.as_ptr(),
3051 num_views.as_ptr(),
3052 d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
3053 );
3054 }
3055 if !num_samplers.is_empty() {
3056 self.raw.clone().CopyDescriptors(
3057 dst_samplers.len() as u32,
3058 dst_samplers.as_ptr(),
3059 num_samplers.as_ptr(),
3060 src_samplers.len() as u32,
3061 src_samplers.as_ptr(),
3062 num_samplers.as_ptr(),
3063 d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
3064 );
3065 }
3066 }
3067
map_memory<R>(&self, memory: &r::Memory, range: R) -> Result<*mut u8, d::MapError> where R: RangeArg<u64>,3068 unsafe fn map_memory<R>(&self, memory: &r::Memory, range: R) -> Result<*mut u8, d::MapError>
3069 where
3070 R: RangeArg<u64>,
3071 {
3072 if let Some(mem) = memory.resource {
3073 let start = range.start().unwrap_or(&0);
3074 let end = range.end().unwrap_or(&memory.size);
3075 assert!(start <= end);
3076
3077 let mut ptr = ptr::null_mut();
3078 assert_eq!(
3079 winerror::S_OK,
3080 (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, &mut ptr)
3081 );
3082 ptr = ptr.offset(*start as _);
3083 Ok(ptr as *mut _)
3084 } else {
3085 panic!("Memory not created with a memory type exposing `CPU_VISIBLE`.")
3086 }
3087 }
3088
unmap_memory(&self, memory: &r::Memory)3089 unsafe fn unmap_memory(&self, memory: &r::Memory) {
3090 if let Some(mem) = memory.resource {
3091 (*mem).Unmap(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 });
3092 }
3093 }
3094
flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<(&'a r::Memory, R)>, R: RangeArg<u64>,3095 unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory>
3096 where
3097 I: IntoIterator,
3098 I::Item: Borrow<(&'a r::Memory, R)>,
3099 R: RangeArg<u64>,
3100 {
3101 for range in ranges {
3102 let &(ref memory, ref range) = range.borrow();
3103 if let Some(mem) = memory.resource {
3104 // map and immediately unmap, hoping that dx12 drivers internally cache
3105 // currently mapped buffers.
3106 assert_eq!(
3107 winerror::S_OK,
3108 (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, ptr::null_mut())
3109 );
3110
3111 let start = *range.start().unwrap_or(&0);
3112 let end = *range.end().unwrap_or(&memory.size); // TODO: only need to be end of current mapping
3113
3114 (*mem).Unmap(
3115 0,
3116 &d3d12::D3D12_RANGE {
3117 Begin: start as _,
3118 End: end as _,
3119 },
3120 );
3121 }
3122 }
3123
3124 Ok(())
3125 }
3126
invalidate_mapped_memory_ranges<'a, I, R>( &self, ranges: I, ) -> Result<(), d::OutOfMemory> where I: IntoIterator, I::Item: Borrow<(&'a r::Memory, R)>, R: RangeArg<u64>,3127 unsafe fn invalidate_mapped_memory_ranges<'a, I, R>(
3128 &self,
3129 ranges: I,
3130 ) -> Result<(), d::OutOfMemory>
3131 where
3132 I: IntoIterator,
3133 I::Item: Borrow<(&'a r::Memory, R)>,
3134 R: RangeArg<u64>,
3135 {
3136 for range in ranges {
3137 let &(ref memory, ref range) = range.borrow();
3138 if let Some(mem) = memory.resource {
3139 let start = *range.start().unwrap_or(&0);
3140 let end = *range.end().unwrap_or(&memory.size); // TODO: only need to be end of current mapping
3141
3142 // map and immediately unmap, hoping that dx12 drivers internally cache
3143 // currently mapped buffers.
3144 assert_eq!(
3145 winerror::S_OK,
3146 (*mem).Map(
3147 0,
3148 &d3d12::D3D12_RANGE {
3149 Begin: start as _,
3150 End: end as _,
3151 },
3152 ptr::null_mut(),
3153 )
3154 );
3155
3156 (*mem).Unmap(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 });
3157 }
3158 }
3159
3160 Ok(())
3161 }
3162
create_semaphore(&self) -> Result<r::Semaphore, d::OutOfMemory>3163 fn create_semaphore(&self) -> Result<r::Semaphore, d::OutOfMemory> {
3164 let fence = self.create_fence(false)?;
3165 Ok(r::Semaphore { raw: fence.raw })
3166 }
3167
create_fence(&self, signalled: bool) -> Result<r::Fence, d::OutOfMemory>3168 fn create_fence(&self, signalled: bool) -> Result<r::Fence, d::OutOfMemory> {
3169 Ok(r::Fence {
3170 raw: self.create_raw_fence(signalled),
3171 })
3172 }
3173
reset_fence(&self, fence: &r::Fence) -> Result<(), d::OutOfMemory>3174 unsafe fn reset_fence(&self, fence: &r::Fence) -> Result<(), d::OutOfMemory> {
3175 assert_eq!(winerror::S_OK, fence.raw.signal(0));
3176 Ok(())
3177 }
3178
wait_for_fences<I>( &self, fences: I, wait: d::WaitFor, timeout_ns: u64, ) -> Result<bool, d::OomOrDeviceLost> where I: IntoIterator, I::Item: Borrow<r::Fence>,3179 unsafe fn wait_for_fences<I>(
3180 &self,
3181 fences: I,
3182 wait: d::WaitFor,
3183 timeout_ns: u64,
3184 ) -> Result<bool, d::OomOrDeviceLost>
3185 where
3186 I: IntoIterator,
3187 I::Item: Borrow<r::Fence>,
3188 {
3189 let fences = fences.into_iter().collect::<Vec<_>>();
3190 let mut events = self.events.lock().unwrap();
3191 for _ in events.len() .. fences.len() {
3192 events.push(native::Event::create(false, false));
3193 }
3194
3195 for (&event, fence) in events.iter().zip(fences.iter()) {
3196 synchapi::ResetEvent(event.0);
3197 assert_eq!(
3198 winerror::S_OK,
3199 fence.borrow().raw.set_event_on_completion(event, 1)
3200 );
3201 }
3202
3203 let all = match wait {
3204 d::WaitFor::Any => FALSE,
3205 d::WaitFor::All => TRUE,
3206 };
3207
3208 let hr = {
3209 // This block handles overflow when converting to u32 and always rounds up
3210 // The Vulkan specification allows to wait more than specified
3211 let timeout_ms = {
3212 if timeout_ns > (<u32>::max_value() as u64) * 1_000_000 {
3213 <u32>::max_value()
3214 } else {
3215 ((timeout_ns + 999_999) / 1_000_000) as u32
3216 }
3217 };
3218
3219 synchapi::WaitForMultipleObjects(
3220 fences.len() as u32,
3221 events.as_ptr() as *const _,
3222 all,
3223 timeout_ms,
3224 )
3225 };
3226
3227 const WAIT_OBJECT_LAST: u32 = winbase::WAIT_OBJECT_0 + winnt::MAXIMUM_WAIT_OBJECTS;
3228 const WAIT_ABANDONED_LAST: u32 = winbase::WAIT_ABANDONED_0 + winnt::MAXIMUM_WAIT_OBJECTS;
3229 match hr {
3230 winbase::WAIT_OBJECT_0 ..= WAIT_OBJECT_LAST => Ok(true),
3231 winbase::WAIT_ABANDONED_0 ..= WAIT_ABANDONED_LAST => Ok(true), //TODO?
3232 winerror::WAIT_TIMEOUT => Ok(false),
3233 _ => panic!("Unexpected wait status 0x{:X}", hr),
3234 }
3235 }
3236
get_fence_status(&self, fence: &r::Fence) -> Result<bool, d::DeviceLost>3237 unsafe fn get_fence_status(&self, fence: &r::Fence) -> Result<bool, d::DeviceLost> {
3238 match fence.raw.GetCompletedValue() {
3239 0 => Ok(false),
3240 1 => Ok(true),
3241 _ => Err(d::DeviceLost),
3242 }
3243 }
3244
create_event(&self) -> Result<(), d::OutOfMemory>3245 fn create_event(&self) -> Result<(), d::OutOfMemory> {
3246 unimplemented!()
3247 }
3248
get_event_status(&self, _event: &()) -> Result<bool, d::OomOrDeviceLost>3249 unsafe fn get_event_status(&self, _event: &()) -> Result<bool, d::OomOrDeviceLost> {
3250 unimplemented!()
3251 }
3252
set_event(&self, _event: &()) -> Result<(), d::OutOfMemory>3253 unsafe fn set_event(&self, _event: &()) -> Result<(), d::OutOfMemory> {
3254 unimplemented!()
3255 }
3256
reset_event(&self, _event: &()) -> Result<(), d::OutOfMemory>3257 unsafe fn reset_event(&self, _event: &()) -> Result<(), d::OutOfMemory> {
3258 unimplemented!()
3259 }
3260
free_memory(&self, memory: r::Memory)3261 unsafe fn free_memory(&self, memory: r::Memory) {
3262 memory.heap.destroy();
3263 if let Some(buffer) = memory.resource {
3264 buffer.destroy();
3265 }
3266 }
3267
create_query_pool( &self, query_ty: query::Type, count: query::Id, ) -> Result<r::QueryPool, query::CreationError>3268 unsafe fn create_query_pool(
3269 &self,
3270 query_ty: query::Type,
3271 count: query::Id,
3272 ) -> Result<r::QueryPool, query::CreationError> {
3273 let heap_ty = match query_ty {
3274 query::Type::Occlusion => native::QueryHeapType::Occlusion,
3275 query::Type::PipelineStatistics(_) => native::QueryHeapType::PipelineStatistics,
3276 query::Type::Timestamp => native::QueryHeapType::Timestamp,
3277 };
3278
3279 let (query_heap, hr) = self.raw.create_query_heap(heap_ty, count, 0);
3280 assert_eq!(winerror::S_OK, hr);
3281
3282 Ok(r::QueryPool {
3283 raw: query_heap,
3284 ty: heap_ty,
3285 })
3286 }
3287
destroy_query_pool(&self, pool: r::QueryPool)3288 unsafe fn destroy_query_pool(&self, pool: r::QueryPool) {
3289 pool.raw.destroy();
3290 }
3291
get_query_pool_results( &self, _pool: &r::QueryPool, _queries: Range<query::Id>, _data: &mut [u8], _stride: buffer::Offset, _flags: query::ResultFlags, ) -> Result<bool, d::OomOrDeviceLost>3292 unsafe fn get_query_pool_results(
3293 &self,
3294 _pool: &r::QueryPool,
3295 _queries: Range<query::Id>,
3296 _data: &mut [u8],
3297 _stride: buffer::Offset,
3298 _flags: query::ResultFlags,
3299 ) -> Result<bool, d::OomOrDeviceLost> {
3300 unimplemented!()
3301 }
3302
destroy_shader_module(&self, shader_lib: r::ShaderModule)3303 unsafe fn destroy_shader_module(&self, shader_lib: r::ShaderModule) {
3304 if let r::ShaderModule::Compiled(shaders) = shader_lib {
3305 for (_, blob) in shaders {
3306 blob.destroy();
3307 }
3308 }
3309 }
3310
destroy_render_pass(&self, _rp: r::RenderPass)3311 unsafe fn destroy_render_pass(&self, _rp: r::RenderPass) {
3312 // Just drop
3313 }
3314
destroy_pipeline_layout(&self, layout: r::PipelineLayout)3315 unsafe fn destroy_pipeline_layout(&self, layout: r::PipelineLayout) {
3316 layout.raw.destroy();
3317 }
3318
destroy_graphics_pipeline(&self, pipeline: r::GraphicsPipeline)3319 unsafe fn destroy_graphics_pipeline(&self, pipeline: r::GraphicsPipeline) {
3320 pipeline.raw.destroy();
3321 }
3322
destroy_compute_pipeline(&self, pipeline: r::ComputePipeline)3323 unsafe fn destroy_compute_pipeline(&self, pipeline: r::ComputePipeline) {
3324 pipeline.raw.destroy();
3325 }
3326
destroy_framebuffer(&self, _fb: r::Framebuffer)3327 unsafe fn destroy_framebuffer(&self, _fb: r::Framebuffer) {
3328 // Just drop
3329 }
3330
destroy_buffer(&self, buffer: r::Buffer)3331 unsafe fn destroy_buffer(&self, buffer: r::Buffer) {
3332 match buffer {
3333 r::Buffer::Bound(buffer) => {
3334 buffer.resource.destroy();
3335 }
3336 r::Buffer::Unbound(_) => {}
3337 }
3338 }
3339
destroy_buffer_view(&self, _view: r::BufferView)3340 unsafe fn destroy_buffer_view(&self, _view: r::BufferView) {
3341 // empty
3342 }
3343
destroy_image(&self, image: r::Image)3344 unsafe fn destroy_image(&self, image: r::Image) {
3345 match image {
3346 r::Image::Bound(image) => {
3347 image.resource.destroy();
3348 }
3349 r::Image::Unbound(_) => {}
3350 }
3351 }
3352
destroy_image_view(&self, _view: r::ImageView)3353 unsafe fn destroy_image_view(&self, _view: r::ImageView) {
3354 // Just drop
3355 }
3356
destroy_sampler(&self, _sampler: r::Sampler)3357 unsafe fn destroy_sampler(&self, _sampler: r::Sampler) {
3358 // Just drop
3359 }
3360
destroy_descriptor_pool(&self, _pool: r::DescriptorPool)3361 unsafe fn destroy_descriptor_pool(&self, _pool: r::DescriptorPool) {
3362 // Just drop
3363 // Allocated descriptor sets don't need to be freed beforehand.
3364 }
3365
destroy_descriptor_set_layout(&self, _layout: r::DescriptorSetLayout)3366 unsafe fn destroy_descriptor_set_layout(&self, _layout: r::DescriptorSetLayout) {
3367 // Just drop
3368 }
3369
destroy_fence(&self, fence: r::Fence)3370 unsafe fn destroy_fence(&self, fence: r::Fence) {
3371 fence.raw.destroy();
3372 }
3373
destroy_semaphore(&self, semaphore: r::Semaphore)3374 unsafe fn destroy_semaphore(&self, semaphore: r::Semaphore) {
3375 semaphore.raw.destroy();
3376 }
3377
destroy_event(&self, _event: ())3378 unsafe fn destroy_event(&self, _event: ()) {
3379 unimplemented!()
3380 }
3381
create_swapchain( &self, surface: &mut Surface, config: w::SwapchainConfig, old_swapchain: Option<Swapchain>, ) -> Result<(Swapchain, Vec<r::Image>), w::CreationError>3382 unsafe fn create_swapchain(
3383 &self,
3384 surface: &mut Surface,
3385 config: w::SwapchainConfig,
3386 old_swapchain: Option<Swapchain>,
3387 ) -> Result<(Swapchain, Vec<r::Image>), w::CreationError> {
3388 if let Some(old_swapchain) = old_swapchain {
3389 self.destroy_swapchain(old_swapchain);
3390 }
3391
3392 let (swap_chain3, non_srgb_format) =
3393 self.create_swapchain_impl(&config, surface.wnd_handle, surface.factory)?;
3394
3395 let swapchain = self.wrap_swapchain(swap_chain3, &config);
3396
3397 let mut images = Vec::with_capacity(config.image_count as usize);
3398 for (i, &resource) in swapchain.resources.iter().enumerate() {
3399 let rtv_handle = swapchain.rtv_heap.at(i as _, 0).cpu;
3400 let surface_type = config.format.base_format().0;
3401 let format_desc = surface_type.desc();
3402
3403 let bytes_per_block = (format_desc.bits / 8) as _;
3404 let block_dim = format_desc.dim;
3405 let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1);
3406
3407 images.push(r::Image::Bound(r::ImageBound {
3408 resource,
3409 place: r::Place::SwapChain,
3410 surface_type,
3411 kind,
3412 usage: config.image_usage,
3413 default_view_format: Some(non_srgb_format),
3414 view_caps: image::ViewCapabilities::empty(),
3415 descriptor: d3d12::D3D12_RESOURCE_DESC {
3416 Dimension: d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D,
3417 Alignment: 0,
3418 Width: config.extent.width as _,
3419 Height: config.extent.height as _,
3420 DepthOrArraySize: 1,
3421 MipLevels: 1,
3422 Format: non_srgb_format,
3423 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
3424 Count: 1,
3425 Quality: 0,
3426 },
3427 Layout: d3d12::D3D12_TEXTURE_LAYOUT_UNKNOWN,
3428 Flags: 0,
3429 },
3430 bytes_per_block,
3431 block_dim,
3432 clear_cv: vec![rtv_handle],
3433 clear_dv: Vec::new(),
3434 clear_sv: Vec::new(),
3435 // Dummy values, image is already bound
3436 requirements: memory::Requirements {
3437 alignment: 1,
3438 size: 1,
3439 type_mask: MEM_TYPE_MASK,
3440 },
3441 }));
3442 }
3443
3444 Ok((swapchain, images))
3445 }
3446
destroy_swapchain(&self, swapchain: Swapchain)3447 unsafe fn destroy_swapchain(&self, swapchain: Swapchain) {
3448 let inner = swapchain.release_resources();
3449 inner.destroy();
3450 }
3451
wait_idle(&self) -> Result<(), d::OutOfMemory>3452 fn wait_idle(&self) -> Result<(), d::OutOfMemory> {
3453 for queue in &self.queues {
3454 queue.wait_idle()?;
3455 }
3456 Ok(())
3457 }
3458
set_image_name(&self, _image: &mut r::Image, _name: &str)3459 unsafe fn set_image_name(&self, _image: &mut r::Image, _name: &str) {
3460 // TODO
3461 }
3462
set_buffer_name(&self, _buffer: &mut r::Buffer, _name: &str)3463 unsafe fn set_buffer_name(&self, _buffer: &mut r::Buffer, _name: &str) {
3464 // TODO
3465 }
3466
set_command_buffer_name( &self, _command_buffer: &mut cmd::CommandBuffer, _name: &str )3467 unsafe fn set_command_buffer_name(
3468 &self,
3469 _command_buffer: &mut cmd::CommandBuffer,
3470 _name: &str
3471 ) {
3472 // TODO
3473 }
3474
set_semaphore_name(&self, _semaphore: &mut r::Semaphore, _name: &str)3475 unsafe fn set_semaphore_name(&self, _semaphore: &mut r::Semaphore, _name: &str) {
3476 // TODO
3477 }
3478
set_fence_name(&self, _fence: &mut r::Fence, _name: &str)3479 unsafe fn set_fence_name(&self, _fence: &mut r::Fence, _name: &str) {
3480 // TODO
3481 }
3482
set_framebuffer_name(&self, _framebuffer: &mut r::Framebuffer, _name: &str)3483 unsafe fn set_framebuffer_name(&self, _framebuffer: &mut r::Framebuffer, _name: &str) {
3484 // TODO
3485 }
3486
set_render_pass_name(&self, _render_pass: &mut r::RenderPass, _name: &str)3487 unsafe fn set_render_pass_name(&self, _render_pass: &mut r::RenderPass, _name: &str) {
3488 // TODO
3489 }
3490
set_descriptor_set_name(&self, _descriptor_set: &mut r::DescriptorSet, _name: &str)3491 unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut r::DescriptorSet, _name: &str) {
3492 // TODO
3493 }
3494
set_descriptor_set_layout_name( &self, _descriptor_set_layout: &mut r::DescriptorSetLayout, _name: &str, )3495 unsafe fn set_descriptor_set_layout_name(
3496 &self,
3497 _descriptor_set_layout: &mut r::DescriptorSetLayout,
3498 _name: &str,
3499 ) {
3500 // TODO
3501 }
3502 }
3503
3504 #[test]
test_identity_mapping()3505 fn test_identity_mapping() {
3506 assert_eq!(conv::map_swizzle(format::Swizzle::NO), IDENTITY_MAPPING);
3507 }
3508