1 /*!
2 # DirectX12 API internals.
3
4 Generally the mapping is straightforwad.
5
6 ## Resource transitions
7
8 D3D12 API matches WebGPU internal states very well. The only
9 caveat here is issuing a special UAV barrier whenever both source
10 and destination states match, and they are for storage sync.
11
12 ## Memory
13
14 For now, all resources are created with "committed" memory.
15
16 ## Resource binding
17
18 See ['Device::create_pipeline_layout`] documentation for the structure
19 of the root signature corresponding to WebGPU pipeline layout.
20
21 Binding groups is mostly straightforward, with one big caveat:
22 all bindings have to be reset whenever the pipeline layout changes.
23 This is the rule of D3D12, and we can do nothing to help it.
24
25 We detect this change at both [`crate::CommandEncoder::set_bind_group`]
26 and [`crate::CommandEncoder::set_render_pipeline`] with
27 [`crate::CommandEncoder::set_compute_pipeline`].
28
29 For this reason, in order avoid repeating the binding code,
30 we are binding everything in [`CommandEncoder::update_root_elements`].
31 When the pipeline layout is changed, we reset all bindings.
32 Otherwise, we pass a range corresponding only to the current bind group.
33
34 !*/
35
36 mod adapter;
37 mod command;
38 mod conv;
39 mod descriptor;
40 mod device;
41 mod instance;
42 mod view;
43
44 use arrayvec::ArrayVec;
45 use parking_lot::Mutex;
46 use std::{borrow::Cow, ffi, mem, num::NonZeroU32, ptr, sync::Arc};
47 use winapi::{
48 shared::{dxgi, dxgi1_2, dxgi1_4, dxgitype, windef, winerror},
49 um::{d3d12, synchapi, winbase, winnt},
50 Interface as _,
51 };
52
53 #[derive(Clone)]
54 pub struct Api;
55
56 impl crate::Api for Api {
57 type Instance = Instance;
58 type Surface = Surface;
59 type Adapter = Adapter;
60 type Device = Device;
61
62 type Queue = Queue;
63 type CommandEncoder = CommandEncoder;
64 type CommandBuffer = CommandBuffer;
65
66 type Buffer = Buffer;
67 type Texture = Texture;
68 type SurfaceTexture = Texture;
69 type TextureView = TextureView;
70 type Sampler = Sampler;
71 type QuerySet = QuerySet;
72 type Fence = Fence;
73
74 type BindGroupLayout = BindGroupLayout;
75 type BindGroup = BindGroup;
76 type PipelineLayout = PipelineLayout;
77 type ShaderModule = ShaderModule;
78 type RenderPipeline = RenderPipeline;
79 type ComputePipeline = ComputePipeline;
80 }
81
82 trait HResult<O> {
into_result(self) -> Result<O, Cow<'static, str>>83 fn into_result(self) -> Result<O, Cow<'static, str>>;
into_device_result(self, description: &str) -> Result<O, crate::DeviceError>84 fn into_device_result(self, description: &str) -> Result<O, crate::DeviceError>;
85 }
86 impl HResult<()> for i32 {
into_result(self) -> Result<(), Cow<'static, str>>87 fn into_result(self) -> Result<(), Cow<'static, str>> {
88 if self >= 0 {
89 return Ok(());
90 }
91 let description = match self {
92 winerror::E_UNEXPECTED => "unexpected",
93 winerror::E_NOTIMPL => "not implemented",
94 winerror::E_OUTOFMEMORY => "out of memory",
95 winerror::E_INVALIDARG => "invalid argument",
96 _ => return Err(Cow::Owned(format!("0x{:X}", self as u32))),
97 };
98 Err(Cow::Borrowed(description))
99 }
into_device_result(self, description: &str) -> Result<(), crate::DeviceError>100 fn into_device_result(self, description: &str) -> Result<(), crate::DeviceError> {
101 self.into_result().map_err(|err| {
102 log::error!("{} failed: {}", description, err);
103 if self == winerror::E_OUTOFMEMORY {
104 crate::DeviceError::OutOfMemory
105 } else {
106 crate::DeviceError::Lost
107 }
108 })
109 }
110 }
111
112 impl<T> HResult<T> for (T, i32) {
into_result(self) -> Result<T, Cow<'static, str>>113 fn into_result(self) -> Result<T, Cow<'static, str>> {
114 self.1.into_result().map(|()| self.0)
115 }
into_device_result(self, description: &str) -> Result<T, crate::DeviceError>116 fn into_device_result(self, description: &str) -> Result<T, crate::DeviceError> {
117 self.1.into_device_result(description).map(|()| self.0)
118 }
119 }
120
121 // Limited by D3D12's root signature size of 64. Each element takes 1 or 2 entries.
122 const MAX_ROOT_ELEMENTS: usize = 64;
123 const ZERO_BUFFER_SIZE: wgt::BufferAddress = 256 << 10;
124
125 pub struct Instance {
126 factory: native::Factory4,
127 library: Arc<native::D3D12Lib>,
128 _lib_dxgi: native::DxgiLib,
129 flags: crate::InstanceFlags,
130 }
131
132 unsafe impl Send for Instance {}
133 unsafe impl Sync for Instance {}
134
135 struct SwapChain {
136 raw: native::WeakPtr<dxgi1_4::IDXGISwapChain3>,
137 // need to associate raw image pointers with the swapchain so they can be properly released
138 // when the swapchain is destroyed
139 resources: Vec<native::Resource>,
140 waitable: winnt::HANDLE,
141 acquired_count: usize,
142 present_mode: wgt::PresentMode,
143 format: wgt::TextureFormat,
144 size: wgt::Extent3d,
145 }
146
147 pub struct Surface {
148 factory: native::WeakPtr<dxgi1_4::IDXGIFactory4>,
149 wnd_handle: windef::HWND,
150 swap_chain: Option<SwapChain>,
151 }
152
153 unsafe impl Send for Surface {}
154 unsafe impl Sync for Surface {}
155
156 #[derive(Debug, Clone, Copy)]
157 enum MemoryArchitecture {
158 Unified {
159 #[allow(unused)]
160 cache_coherent: bool,
161 },
162 NonUnified,
163 }
164
165 #[derive(Debug, Clone, Copy)]
166 struct PrivateCapabilities {
167 instance_flags: crate::InstanceFlags,
168 #[allow(unused)]
169 heterogeneous_resource_heaps: bool,
170 memory_architecture: MemoryArchitecture,
171 heap_create_not_zeroed: bool,
172 }
173
174 #[derive(Default)]
175 struct Workarounds {
176 // On WARP, temporary CPU descriptors are still used by the runtime
177 // after we call `CopyDescriptors`.
178 avoid_cpu_descriptor_overwrites: bool,
179 }
180
181 pub struct Adapter {
182 raw: native::WeakPtr<dxgi1_2::IDXGIAdapter2>,
183 device: native::Device,
184 library: Arc<native::D3D12Lib>,
185 private_caps: PrivateCapabilities,
186 //Note: this isn't used right now, but we'll need it later.
187 #[allow(unused)]
188 workarounds: Workarounds,
189 }
190
191 unsafe impl Send for Adapter {}
192 unsafe impl Sync for Adapter {}
193
194 /// Helper structure for waiting for GPU.
195 struct Idler {
196 fence: native::Fence,
197 event: native::Event,
198 }
199
200 impl Idler {
destroy(self)201 unsafe fn destroy(self) {
202 self.fence.destroy();
203 }
204 }
205
206 struct CommandSignatures {
207 draw: native::CommandSignature,
208 draw_indexed: native::CommandSignature,
209 dispatch: native::CommandSignature,
210 }
211
212 impl CommandSignatures {
destroy(&self)213 unsafe fn destroy(&self) {
214 self.draw.destroy();
215 self.draw_indexed.destroy();
216 self.dispatch.destroy();
217 }
218 }
219
220 struct DeviceShared {
221 features: wgt::Features,
222 zero_buffer: native::Resource,
223 cmd_signatures: CommandSignatures,
224 heap_views: descriptor::GeneralHeap,
225 heap_samplers: descriptor::GeneralHeap,
226 }
227
228 impl DeviceShared {
destroy(&self)229 unsafe fn destroy(&self) {
230 self.zero_buffer.destroy();
231 self.cmd_signatures.destroy();
232 self.heap_views.raw.destroy();
233 self.heap_samplers.raw.destroy();
234 }
235 }
236
237 pub struct Device {
238 raw: native::Device,
239 present_queue: native::CommandQueue,
240 idler: Idler,
241 private_caps: PrivateCapabilities,
242 shared: Arc<DeviceShared>,
243 // CPU only pools
244 rtv_pool: Mutex<descriptor::CpuPool>,
245 dsv_pool: Mutex<descriptor::CpuPool>,
246 srv_uav_pool: Mutex<descriptor::CpuPool>,
247 sampler_pool: Mutex<descriptor::CpuPool>,
248 // library
249 library: Arc<native::D3D12Lib>,
250 #[cfg(feature = "renderdoc")]
251 render_doc: crate::auxil::renderdoc::RenderDoc,
252 }
253
254 unsafe impl Send for Device {}
255 unsafe impl Sync for Device {}
256
257 pub struct Queue {
258 raw: native::CommandQueue,
259 temp_lists: Vec<native::CommandList>,
260 }
261
262 unsafe impl Send for Queue {}
263 unsafe impl Sync for Queue {}
264
265 #[derive(Default)]
266 struct Temp {
267 marker: Vec<u16>,
268 barriers: Vec<d3d12::D3D12_RESOURCE_BARRIER>,
269 }
270
271 impl Temp {
clear(&mut self)272 fn clear(&mut self) {
273 self.marker.clear();
274 self.barriers.clear();
275 }
276 }
277
278 struct PassResolve {
279 src: (native::Resource, u32),
280 dst: (native::Resource, u32),
281 format: native::Format,
282 }
283
284 #[derive(Clone, Copy)]
285 enum RootElement {
286 Empty,
287 SpecialConstantBuffer {
288 base_vertex: i32,
289 base_instance: u32,
290 other: u32,
291 },
292 /// Descriptor table.
293 Table(native::GpuDescriptor),
294 /// Descriptor for a buffer that has dynamic offset.
295 DynamicOffsetBuffer {
296 kind: BufferViewKind,
297 address: native::GpuAddress,
298 },
299 }
300
301 #[derive(Clone, Copy)]
302 enum PassKind {
303 Render,
304 Compute,
305 Transfer,
306 }
307
308 struct PassState {
309 has_label: bool,
310 resolves: ArrayVec<PassResolve, { crate::MAX_COLOR_TARGETS }>,
311 layout: PipelineLayoutShared,
312 root_elements: [RootElement; MAX_ROOT_ELEMENTS],
313 dirty_root_elements: u64,
314 vertex_buffers: [d3d12::D3D12_VERTEX_BUFFER_VIEW; crate::MAX_VERTEX_BUFFERS],
315 dirty_vertex_buffers: usize,
316 kind: PassKind,
317 }
318
319 #[test]
test_dirty_mask()320 fn test_dirty_mask() {
321 assert_eq!(MAX_ROOT_ELEMENTS, std::mem::size_of::<u64>() * 8);
322 }
323
324 impl PassState {
new() -> Self325 fn new() -> Self {
326 PassState {
327 has_label: false,
328 resolves: ArrayVec::new(),
329 layout: PipelineLayoutShared {
330 signature: native::RootSignature::null(),
331 total_root_elements: 0,
332 special_constants_root_index: None,
333 },
334 root_elements: [RootElement::Empty; MAX_ROOT_ELEMENTS],
335 dirty_root_elements: 0,
336 vertex_buffers: [unsafe { mem::zeroed() }; crate::MAX_VERTEX_BUFFERS],
337 dirty_vertex_buffers: 0,
338 kind: PassKind::Transfer,
339 }
340 }
341
clear(&mut self)342 fn clear(&mut self) {
343 // careful about heap allocations!
344 *self = Self::new();
345 }
346 }
347
348 pub struct CommandEncoder {
349 allocator: native::CommandAllocator,
350 device: native::Device,
351 shared: Arc<DeviceShared>,
352 list: Option<native::GraphicsCommandList>,
353 free_lists: Vec<native::GraphicsCommandList>,
354 pass: PassState,
355 temp: Temp,
356 }
357
358 unsafe impl Send for CommandEncoder {}
359 unsafe impl Sync for CommandEncoder {}
360
361 pub struct CommandBuffer {
362 raw: native::GraphicsCommandList,
363 }
364
365 unsafe impl Send for CommandBuffer {}
366 unsafe impl Sync for CommandBuffer {}
367
368 #[derive(Debug)]
369 pub struct Buffer {
370 resource: native::Resource,
371 size: wgt::BufferAddress,
372 }
373
374 unsafe impl Send for Buffer {}
375 unsafe impl Sync for Buffer {}
376
377 impl crate::BufferBinding<'_, Api> {
resolve_size(&self) -> wgt::BufferAddress378 fn resolve_size(&self) -> wgt::BufferAddress {
379 match self.size {
380 Some(size) => size.get(),
381 None => self.buffer.size - self.offset,
382 }
383 }
384
resolve_address(&self) -> wgt::BufferAddress385 fn resolve_address(&self) -> wgt::BufferAddress {
386 self.buffer.resource.gpu_virtual_address() + self.offset
387 }
388 }
389
390 #[derive(Debug)]
391 pub struct Texture {
392 resource: native::Resource,
393 format: wgt::TextureFormat,
394 dimension: wgt::TextureDimension,
395 size: wgt::Extent3d,
396 mip_level_count: u32,
397 sample_count: u32,
398 }
399
400 unsafe impl Send for Texture {}
401 unsafe impl Sync for Texture {}
402
403 impl Texture {
array_layer_count(&self) -> u32404 fn array_layer_count(&self) -> u32 {
405 match self.dimension {
406 wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => {
407 self.size.depth_or_array_layers
408 }
409 wgt::TextureDimension::D3 => 1,
410 }
411 }
412
calc_subresource(&self, mip_level: u32, array_layer: u32, plane: u32) -> u32413 fn calc_subresource(&self, mip_level: u32, array_layer: u32, plane: u32) -> u32 {
414 mip_level + (array_layer + plane * self.array_layer_count()) * self.mip_level_count
415 }
416
calc_subresource_for_copy(&self, base: &crate::TextureCopyBase) -> u32417 fn calc_subresource_for_copy(&self, base: &crate::TextureCopyBase) -> u32 {
418 self.calc_subresource(base.mip_level, base.array_layer, 0)
419 }
420 }
421
422 #[derive(Debug)]
423 pub struct TextureView {
424 raw_format: native::Format,
425 format_aspects: crate::FormatAspects, // May explicitly ignore stencil aspect of raw_format!
426 target_base: (native::Resource, u32),
427 handle_srv: Option<descriptor::Handle>,
428 handle_uav: Option<descriptor::Handle>,
429 handle_rtv: Option<descriptor::Handle>,
430 handle_dsv_ro: Option<descriptor::Handle>,
431 handle_dsv_rw: Option<descriptor::Handle>,
432 }
433
434 unsafe impl Send for TextureView {}
435 unsafe impl Sync for TextureView {}
436
437 #[derive(Debug)]
438 pub struct Sampler {
439 handle: descriptor::Handle,
440 }
441
442 unsafe impl Send for Sampler {}
443 unsafe impl Sync for Sampler {}
444
445 #[derive(Debug)]
446 pub struct QuerySet {
447 raw: native::QueryHeap,
448 raw_ty: d3d12::D3D12_QUERY_TYPE,
449 }
450
451 unsafe impl Send for QuerySet {}
452 unsafe impl Sync for QuerySet {}
453
454 #[derive(Debug)]
455 pub struct Fence {
456 raw: native::Fence,
457 }
458
459 unsafe impl Send for Fence {}
460 unsafe impl Sync for Fence {}
461
462 pub struct BindGroupLayout {
463 /// Sorted list of entries.
464 entries: Vec<wgt::BindGroupLayoutEntry>,
465 cpu_heap_views: Option<descriptor::CpuHeap>,
466 cpu_heap_samplers: Option<descriptor::CpuHeap>,
467 copy_counts: Vec<u32>, // all 1's
468 }
469
470 #[derive(Clone, Copy)]
471 enum BufferViewKind {
472 Constant,
473 ShaderResource,
474 UnorderedAccess,
475 }
476
477 #[derive(Debug)]
478 pub struct BindGroup {
479 handle_views: Option<descriptor::DualHandle>,
480 handle_samplers: Option<descriptor::DualHandle>,
481 dynamic_buffers: Vec<native::GpuAddress>,
482 }
483
484 bitflags::bitflags! {
485 struct TableTypes: u8 {
486 const SRV_CBV_UAV = 1 << 0;
487 const SAMPLERS = 1 << 1;
488 }
489 }
490
491 // Element (also known as parameter) index into the root signature.
492 type RootIndex = u32;
493
494 struct BindGroupInfo {
495 base_root_index: RootIndex,
496 tables: TableTypes,
497 dynamic_buffers: Vec<BufferViewKind>,
498 }
499
500 #[derive(Clone)]
501 struct PipelineLayoutShared {
502 signature: native::RootSignature,
503 total_root_elements: RootIndex,
504 special_constants_root_index: Option<RootIndex>,
505 }
506
507 unsafe impl Send for PipelineLayoutShared {}
508 unsafe impl Sync for PipelineLayoutShared {}
509
510 pub struct PipelineLayout {
511 shared: PipelineLayoutShared,
512 // Storing for each associated bind group, which tables we created
513 // in the root signature. This is required for binding descriptor sets.
514 bind_group_infos: ArrayVec<BindGroupInfo, { crate::MAX_BIND_GROUPS }>,
515 naga_options: naga::back::hlsl::Options,
516 }
517
518 #[derive(Debug)]
519 pub struct ShaderModule {
520 naga: crate::NagaShader,
521 raw_name: Option<ffi::CString>,
522 }
523
524 pub struct RenderPipeline {
525 raw: native::PipelineState,
526 layout: PipelineLayoutShared,
527 topology: d3d12::D3D12_PRIMITIVE_TOPOLOGY,
528 vertex_strides: [Option<NonZeroU32>; crate::MAX_VERTEX_BUFFERS],
529 }
530
531 unsafe impl Send for RenderPipeline {}
532 unsafe impl Sync for RenderPipeline {}
533
534 pub struct ComputePipeline {
535 raw: native::PipelineState,
536 layout: PipelineLayoutShared,
537 }
538
539 unsafe impl Send for ComputePipeline {}
540 unsafe impl Sync for ComputePipeline {}
541
542 impl SwapChain {
release_resources(self) -> native::WeakPtr<dxgi1_4::IDXGISwapChain3>543 unsafe fn release_resources(self) -> native::WeakPtr<dxgi1_4::IDXGISwapChain3> {
544 for resource in self.resources {
545 resource.destroy();
546 }
547 self.raw
548 }
549
wait(&mut self, timeout_ms: u32) -> Result<bool, crate::SurfaceError>550 unsafe fn wait(&mut self, timeout_ms: u32) -> Result<bool, crate::SurfaceError> {
551 match synchapi::WaitForSingleObject(self.waitable, timeout_ms) {
552 winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::SurfaceError::Lost),
553 winbase::WAIT_OBJECT_0 => Ok(true),
554 winerror::WAIT_TIMEOUT => Ok(false),
555 other => {
556 log::error!("Unexpected wait status: 0x{:x}", other);
557 Err(crate::SurfaceError::Lost)
558 }
559 }
560 }
561 }
562
563 impl crate::Surface<Api> for Surface {
configure( &mut self, device: &Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError>564 unsafe fn configure(
565 &mut self,
566 device: &Device,
567 config: &crate::SurfaceConfiguration,
568 ) -> Result<(), crate::SurfaceError> {
569 let mut flags = dxgi::DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT;
570 match config.present_mode {
571 wgt::PresentMode::Immediate => {
572 flags |= dxgi::DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
573 }
574 _ => {}
575 }
576
577 let non_srgb_format = conv::map_texture_format_nosrgb(config.format);
578
579 let swap_chain = match self.swap_chain.take() {
580 //Note: this path doesn't properly re-initialize all of the things
581 Some(sc) => {
582 // can't have image resources in flight used by GPU
583 let _ = device.wait_idle();
584
585 let raw = sc.release_resources();
586 let result = raw.ResizeBuffers(
587 config.swap_chain_size,
588 config.extent.width,
589 config.extent.height,
590 non_srgb_format,
591 flags,
592 );
593 if let Err(err) = result.into_result() {
594 log::error!("ResizeBuffers failed: {}", err);
595 return Err(crate::SurfaceError::Other("window is in use"));
596 }
597 raw
598 }
599 None => {
600 let mut swap_chain1 = native::WeakPtr::<dxgi1_2::IDXGISwapChain1>::null();
601
602 let raw_desc = dxgi1_2::DXGI_SWAP_CHAIN_DESC1 {
603 AlphaMode: conv::map_acomposite_alpha_mode(config.composite_alpha_mode),
604 BufferCount: config.swap_chain_size,
605 Width: config.extent.width,
606 Height: config.extent.height,
607 Format: non_srgb_format,
608 Flags: flags,
609 BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT,
610 SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
611 Count: 1,
612 Quality: 0,
613 },
614 Scaling: dxgi1_2::DXGI_SCALING_STRETCH,
615 Stereo: 0,
616 SwapEffect: dxgi::DXGI_SWAP_EFFECT_FLIP_DISCARD,
617 };
618
619 let hr = {
620 profiling::scope!("IDXGIFactory4::CreateSwapChainForHwnd");
621 self.factory.CreateSwapChainForHwnd(
622 device.present_queue.as_mut_ptr() as *mut _,
623 self.wnd_handle,
624 &raw_desc,
625 ptr::null(),
626 ptr::null_mut(),
627 swap_chain1.mut_void() as *mut *mut _,
628 )
629 };
630
631 if let Err(err) = hr.into_result() {
632 log::error!("SwapChain creation error: {}", err);
633 return Err(crate::SurfaceError::Other("swap chain creation"));
634 }
635
636 match swap_chain1.cast::<dxgi1_4::IDXGISwapChain3>().into_result() {
637 Ok(swap_chain3) => {
638 swap_chain1.destroy();
639 swap_chain3
640 }
641 Err(err) => {
642 log::error!("Unable to cast swap chain: {}", err);
643 return Err(crate::SurfaceError::Other("swap chain cast to 3"));
644 }
645 }
646 }
647 };
648
649 // Disable automatic Alt+Enter handling by DXGI.
650 const DXGI_MWA_NO_WINDOW_CHANGES: u32 = 1;
651 const DXGI_MWA_NO_ALT_ENTER: u32 = 2;
652 self.factory.MakeWindowAssociation(
653 self.wnd_handle,
654 DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER,
655 );
656
657 swap_chain.SetMaximumFrameLatency(config.swap_chain_size);
658 let waitable = swap_chain.GetFrameLatencyWaitableObject();
659
660 let mut resources = vec![native::Resource::null(); config.swap_chain_size as usize];
661 for (i, res) in resources.iter_mut().enumerate() {
662 swap_chain.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void());
663 }
664
665 self.swap_chain = Some(SwapChain {
666 raw: swap_chain,
667 resources,
668 waitable,
669 acquired_count: 0,
670 present_mode: config.present_mode,
671 format: config.format,
672 size: config.extent,
673 });
674
675 Ok(())
676 }
677
unconfigure(&mut self, device: &Device)678 unsafe fn unconfigure(&mut self, device: &Device) {
679 if let Some(mut sc) = self.swap_chain.take() {
680 let _ = sc.wait(winbase::INFINITE);
681 //TODO: this shouldn't be needed,
682 // but it complains that the queue is still used otherwise
683 let _ = device.wait_idle();
684 let raw = sc.release_resources();
685 raw.destroy();
686 }
687 }
688
acquire_texture( &mut self, timeout_ms: u32, ) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError>689 unsafe fn acquire_texture(
690 &mut self,
691 timeout_ms: u32,
692 ) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
693 let sc = self.swap_chain.as_mut().unwrap();
694
695 sc.wait(timeout_ms)?;
696
697 let base_index = sc.raw.GetCurrentBackBufferIndex() as usize;
698 let index = (base_index + sc.acquired_count) % sc.resources.len();
699 sc.acquired_count += 1;
700
701 let texture = Texture {
702 resource: sc.resources[index],
703 format: sc.format,
704 dimension: wgt::TextureDimension::D2,
705 size: sc.size,
706 mip_level_count: 1,
707 sample_count: 1,
708 };
709 Ok(Some(crate::AcquiredSurfaceTexture {
710 texture,
711 suboptimal: false,
712 }))
713 }
discard_texture(&mut self, _texture: Texture)714 unsafe fn discard_texture(&mut self, _texture: Texture) {
715 let sc = self.swap_chain.as_mut().unwrap();
716 sc.acquired_count -= 1;
717 }
718 }
719
720 impl crate::Queue<Api> for Queue {
submit( &mut self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError>721 unsafe fn submit(
722 &mut self,
723 command_buffers: &[&CommandBuffer],
724 signal_fence: Option<(&mut Fence, crate::FenceValue)>,
725 ) -> Result<(), crate::DeviceError> {
726 self.temp_lists.clear();
727 for cmd_buf in command_buffers {
728 self.temp_lists.push(cmd_buf.raw.as_list());
729 }
730
731 {
732 profiling::scope!("ID3D12CommandQueue::ExecuteCommandLists");
733 self.raw.execute_command_lists(&self.temp_lists);
734 }
735
736 if let Some((fence, value)) = signal_fence {
737 self.raw
738 .signal(fence.raw, value)
739 .into_device_result("Signal fence")?;
740 }
741 Ok(())
742 }
present( &mut self, surface: &mut Surface, _texture: Texture, ) -> Result<(), crate::SurfaceError>743 unsafe fn present(
744 &mut self,
745 surface: &mut Surface,
746 _texture: Texture,
747 ) -> Result<(), crate::SurfaceError> {
748 let sc = surface.swap_chain.as_mut().unwrap();
749 sc.acquired_count -= 1;
750
751 let (interval, flags) = match sc.present_mode {
752 wgt::PresentMode::Immediate => (0, dxgi::DXGI_PRESENT_ALLOW_TEARING),
753 wgt::PresentMode::Fifo => (1, 0),
754 wgt::PresentMode::Mailbox => (1, 0),
755 };
756
757 profiling::scope!("IDXGISwapchain3::Present");
758 sc.raw.Present(interval, flags);
759
760 Ok(())
761 }
762
get_timestamp_period(&self) -> f32763 unsafe fn get_timestamp_period(&self) -> f32 {
764 let mut frequency = 0u64;
765 self.raw.GetTimestampFrequency(&mut frequency);
766 (1_000_000_000.0 / frequency as f64) as f32
767 }
768 }
769