1 //#[deny(missing_docs)]
2 
3 extern crate gfx_hal as hal;
4 extern crate auxil;
5 extern crate range_alloc;
6 #[macro_use]
7 extern crate bitflags;
8 extern crate libloading;
9 #[macro_use]
10 extern crate log;
11 extern crate parking_lot;
12 extern crate smallvec;
13 extern crate spirv_cross;
14 #[macro_use]
15 extern crate winapi;
16 extern crate wio;
17 
18 use hal::{
19     adapter,
20     buffer,
21     command,
22     format,
23     image,
24     memory,
25     pass,
26     pso,
27     query,
28     queue,
29     range::RangeArg,
30     window,
31     DrawCount,
32     IndexCount,
33     InstanceCount,
34     Limits,
35     VertexCount,
36     VertexOffset,
37     WorkGroupCount,
38 };
39 
40 use range_alloc::RangeAllocator;
41 
42 use winapi::shared::dxgi::{IDXGIAdapter, IDXGIFactory, IDXGISwapChain};
43 use winapi::shared::minwindef::{FALSE, UINT, HMODULE};
44 use winapi::shared::windef::{HWND, RECT};
45 use winapi::shared::{dxgiformat, winerror};
46 use winapi::um::winuser::GetClientRect;
47 use winapi::um::{d3d11, d3dcommon};
48 use winapi::Interface as _;
49 
50 use wio::com::ComPtr;
51 
52 use parking_lot::{Condvar, Mutex};
53 
54 use std::borrow::Borrow;
55 use std::cell::RefCell;
56 use std::fmt;
57 use std::mem;
58 use std::ops::Range;
59 use std::ptr;
60 use std::sync::Arc;
61 
62 use std::os::raw::c_void;
63 
64 macro_rules! debug_scope {
65     ($context:expr, $($arg:tt)+) => ({
66         #[cfg(debug_assertions)]
67         {
68             $crate::debug::DebugScope::with_name(
69                 $context,
70                 format_args!($($arg)+),
71             )
72         }
73         #[cfg(not(debug_assertions))]
74         {
75             ()
76         }
77     });
78 }
79 
80 macro_rules! debug_marker {
81     ($context:expr, $($arg:tt)+) => ({
82         #[cfg(debug_assertions)]
83         {
84             $crate::debug::debug_marker(
85                 $context,
86                 format_args!($($arg)+),
87             );
88         }
89     });
90 }
91 
92 mod conv;
93 #[cfg(debug_assertions)]
94 mod debug;
95 mod device;
96 mod dxgi;
97 mod internal;
98 mod shader;
99 
100 #[derive(Clone)]
101 pub(crate) struct ViewInfo {
102     resource: *mut d3d11::ID3D11Resource,
103     kind: image::Kind,
104     caps: image::ViewCapabilities,
105     view_kind: image::ViewKind,
106     format: dxgiformat::DXGI_FORMAT,
107     range: image::SubresourceRange,
108 }
109 
110 impl fmt::Debug for ViewInfo {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result111     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
112         fmt.write_str("ViewInfo")
113     }
114 }
115 
116 #[derive(Debug)]
117 pub struct Instance {
118     pub(crate) factory: ComPtr<IDXGIFactory>,
119     pub(crate) dxgi_version: dxgi::DxgiVersion,
120     library: libloading::Library,
121 }
122 
123 unsafe impl Send for Instance {}
124 unsafe impl Sync for Instance {}
125 
126 impl Instance {
create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface127     pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
128         Surface {
129             factory: self.factory.clone(),
130             wnd_handle: hwnd as *mut _,
131             presentation: None,
132         }
133     }
134 }
135 
get_features( _device: ComPtr<d3d11::ID3D11Device>, _feature_level: d3dcommon::D3D_FEATURE_LEVEL, ) -> hal::Features136 fn get_features(
137     _device: ComPtr<d3d11::ID3D11Device>,
138     _feature_level: d3dcommon::D3D_FEATURE_LEVEL,
139 ) -> hal::Features {
140     hal::Features::ROBUST_BUFFER_ACCESS
141         | hal::Features::FULL_DRAW_INDEX_U32
142         | hal::Features::FORMAT_BC
143         | hal::Features::INSTANCE_RATE
144         | hal::Features::SAMPLER_MIP_LOD_BIAS
145 }
146 
get_format_properties( device: ComPtr<d3d11::ID3D11Device>, ) -> [format::Properties; format::NUM_FORMATS]147 fn get_format_properties(
148     device: ComPtr<d3d11::ID3D11Device>,
149 ) -> [format::Properties; format::NUM_FORMATS] {
150     let mut format_properties = [format::Properties::default(); format::NUM_FORMATS];
151     for (i, props) in &mut format_properties.iter_mut().enumerate().skip(1) {
152         let format: format::Format = unsafe { mem::transmute(i as u32) };
153 
154         let dxgi_format = match conv::map_format(format) {
155             Some(format) => format,
156             None => continue,
157         };
158 
159         let mut support = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT {
160             InFormat: dxgi_format,
161             OutFormatSupport: 0,
162         };
163         let mut support_2 = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2 {
164             InFormat: dxgi_format,
165             OutFormatSupport2: 0,
166         };
167 
168         let hr = unsafe {
169             device.CheckFeatureSupport(
170                 d3d11::D3D11_FEATURE_FORMAT_SUPPORT,
171                 &mut support as *mut _ as *mut _,
172                 mem::size_of::<d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT>() as UINT,
173             )
174         };
175 
176         if hr == winerror::S_OK {
177             let can_buffer = 0 != support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BUFFER;
178             let can_image = 0
179                 != support.OutFormatSupport
180                     & (d3d11::D3D11_FORMAT_SUPPORT_TEXTURE1D
181                         | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE2D
182                         | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE3D
183                         | d3d11::D3D11_FORMAT_SUPPORT_TEXTURECUBE);
184             let can_linear = can_image && !format.surface_desc().is_compressed();
185             if can_image {
186                 props.optimal_tiling |=
187                     format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC;
188             }
189             if can_linear {
190                 props.linear_tiling |=
191                     format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC;
192             }
193             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER != 0 {
194                 props.buffer_features |= format::BufferFeature::VERTEX;
195             }
196             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_SAMPLE != 0 {
197                 props.optimal_tiling |= format::ImageFeature::SAMPLED_LINEAR;
198             }
199             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_RENDER_TARGET != 0 {
200                 props.optimal_tiling |=
201                     format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST;
202                 if can_linear {
203                     props.linear_tiling |=
204                         format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST;
205                 }
206             }
207             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BLENDABLE != 0 {
208                 props.optimal_tiling |= format::ImageFeature::COLOR_ATTACHMENT_BLEND;
209             }
210             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_DEPTH_STENCIL != 0 {
211                 props.optimal_tiling |= format::ImageFeature::DEPTH_STENCIL_ATTACHMENT;
212             }
213             if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_LOAD != 0 {
214                 //TODO: check d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD ?
215                 if can_buffer {
216                     props.buffer_features |= format::BufferFeature::UNIFORM_TEXEL;
217                 }
218             }
219 
220             let hr = unsafe {
221                 device.CheckFeatureSupport(
222                     d3d11::D3D11_FEATURE_FORMAT_SUPPORT2,
223                     &mut support_2 as *mut _ as *mut _,
224                     mem::size_of::<d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2>() as UINT,
225                 )
226             };
227             if hr == winerror::S_OK {
228                 if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 {
229                     //TODO: other atomic flags?
230                     if can_buffer {
231                         props.buffer_features |= format::BufferFeature::STORAGE_TEXEL_ATOMIC;
232                     }
233                     if can_image {
234                         props.optimal_tiling |= format::ImageFeature::STORAGE_ATOMIC;
235                     }
236                 }
237                 if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 {
238                     if can_buffer {
239                         props.buffer_features |= format::BufferFeature::STORAGE_TEXEL;
240                     }
241                     if can_image {
242                         props.optimal_tiling |= format::ImageFeature::STORAGE;
243                     }
244                 }
245             }
246         }
247 
248         //TODO: blits, linear tiling
249     }
250 
251     format_properties
252 }
253 
254 impl hal::Instance<Backend> for Instance {
create(_: &str, _: u32) -> Result<Self, hal::UnsupportedBackend>255     fn create(_: &str, _: u32) -> Result<Self, hal::UnsupportedBackend> {
256         // TODO: get the latest factory we can find
257 
258         match dxgi::get_dxgi_factory() {
259             Ok((factory, dxgi_version)) => {
260                 info!("DXGI version: {:?}", dxgi_version);
261                 let library = libloading::Library::new("d3d11.dll")
262                     .map_err(|_| hal::UnsupportedBackend)?;
263                 Ok(Instance {
264                     factory,
265                     dxgi_version,
266                     library,
267                 })
268             }
269             Err(hr) => {
270                 info!("Failed on factory creation: {:?}", hr);
271                 Err(hal::UnsupportedBackend)
272             }
273         }
274     }
275 
enumerate_adapters(&self) -> Vec<adapter::Adapter<Backend>>276     fn enumerate_adapters(&self) -> Vec<adapter::Adapter<Backend>> {
277         type Fun = extern "system" fn(
278             *mut IDXGIAdapter,
279             UINT,
280             HMODULE,
281             UINT,
282             *const UINT,
283             UINT,
284             UINT,
285             *mut *mut d3d11::ID3D11Device,
286             *mut UINT,
287             *mut *mut d3d11::ID3D11DeviceContext,
288         ) -> winerror::HRESULT;
289 
290         let mut adapters = Vec::new();
291         let mut idx = 0;
292 
293         let func: libloading::Symbol<Fun> = match unsafe {
294             self.library.get(b"D3D11CreateDevice")
295         } {
296             Ok(func) => func,
297             Err(e) => {
298                 error!("Unable to get device creation function: {:?}", e);
299                 return Vec::new();
300             }
301         };
302 
303         while let Ok((adapter, info)) =
304             dxgi::get_adapter(idx, self.factory.as_raw(), self.dxgi_version)
305         {
306             idx += 1;
307 
308             use hal::memory::Properties;
309 
310             // TODO: move into function?
311             let (device, feature_level) = {
312                 let feature_level = get_feature_level(adapter.as_raw());
313 
314                 let mut device = ptr::null_mut();
315                 let hr = func(
316                     adapter.as_raw() as *mut _,
317                     d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
318                     ptr::null_mut(),
319                     0,
320                     [feature_level].as_ptr(),
321                     1,
322                     d3d11::D3D11_SDK_VERSION,
323                     &mut device as *mut *mut _ as *mut *mut _,
324                     ptr::null_mut(),
325                     ptr::null_mut(),
326                 );
327 
328                 if !winerror::SUCCEEDED(hr) {
329                     continue;
330                 }
331 
332                 (
333                     unsafe { ComPtr::<d3d11::ID3D11Device>::from_raw(device) },
334                     feature_level,
335                 )
336             };
337 
338             let memory_properties = adapter::MemoryProperties {
339                 memory_types: vec![
340                     adapter::MemoryType {
341                         properties: Properties::DEVICE_LOCAL,
342                         heap_index: 0,
343                     },
344                     adapter::MemoryType {
345                         properties: Properties::CPU_VISIBLE
346                             | Properties::COHERENT
347                             | Properties::CPU_CACHED,
348                         heap_index: 1,
349                     },
350                     adapter::MemoryType {
351                         properties: Properties::CPU_VISIBLE | Properties::CPU_CACHED,
352                         heap_index: 1,
353                     },
354                 ],
355                 // TODO: would using *VideoMemory and *SystemMemory from
356                 //       DXGI_ADAPTER_DESC be too optimistic? :)
357                 memory_heaps: vec![!0, !0],
358             };
359 
360             let limits = hal::Limits {
361                 max_image_1d_size: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION as _,
362                 max_image_2d_size: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _,
363                 max_image_3d_size: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _,
364                 max_image_cube_size: d3d11::D3D11_REQ_TEXTURECUBE_DIMENSION as _,
365                 max_image_array_layers: d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _,
366                 max_texel_elements: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, //TODO
367                 max_patch_size: 0,                                                    // TODO
368                 max_viewports: d3d11::D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _,
369                 max_viewport_dimensions: [d3d11::D3D11_VIEWPORT_BOUNDS_MAX; 2],
370                 max_framebuffer_extent: hal::image::Extent {
371                     //TODO
372                     width: 4096,
373                     height: 4096,
374                     depth: 1,
375                 },
376                 max_compute_work_group_count: [
377                     d3d11::D3D11_CS_THREAD_GROUP_MAX_X,
378                     d3d11::D3D11_CS_THREAD_GROUP_MAX_Y,
379                     d3d11::D3D11_CS_THREAD_GROUP_MAX_Z,
380                 ],
381                 max_compute_work_group_size: [
382                     d3d11::D3D11_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP,
383                     1,
384                     1,
385                 ], // TODO
386                 max_vertex_input_attribute_offset: 255, // TODO
387                 max_vertex_input_attributes: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _,
388                 max_vertex_input_binding_stride:
389                     d3d11::D3D11_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _,
390                 max_vertex_input_bindings: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, // TODO: verify same as attributes
391                 max_vertex_output_components: d3d11::D3D11_VS_OUTPUT_REGISTER_COUNT as _, // TODO
392                 min_texel_buffer_offset_alignment: 1,                                     // TODO
393                 min_uniform_buffer_offset_alignment: 16, // TODO: verify
394                 min_storage_buffer_offset_alignment: 1,  // TODO
395                 framebuffer_color_sample_counts: 1,      // TODO
396                 framebuffer_depth_sample_counts: 1,      // TODO
397                 framebuffer_stencil_sample_counts: 1,    // TODO
398                 max_color_attachments: d3d11::D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT as _,
399                 buffer_image_granularity: 1,
400                 non_coherent_atom_size: 1, // TODO
401                 max_sampler_anisotropy: 16.,
402                 optimal_buffer_copy_offset_alignment: 1, // TODO
403                 optimal_buffer_copy_pitch_alignment: 1,  // TODO
404                 min_vertex_input_binding_stride_alignment: 1,
405                 ..hal::Limits::default() //TODO
406             };
407 
408             let features = get_features(device.clone(), feature_level);
409             let format_properties = get_format_properties(device.clone());
410 
411             let physical_device = PhysicalDevice {
412                 adapter,
413                 features,
414                 limits,
415                 memory_properties,
416                 format_properties,
417             };
418 
419             info!("{:#?}", info);
420 
421             adapters.push(adapter::Adapter {
422                 info,
423                 physical_device,
424                 queue_families: vec![QueueFamily],
425             });
426         }
427 
428         adapters
429     }
430 
create_surface( &self, has_handle: &impl raw_window_handle::HasRawWindowHandle, ) -> Result<Surface, hal::window::InitError>431     unsafe fn create_surface(
432         &self,
433         has_handle: &impl raw_window_handle::HasRawWindowHandle,
434     ) -> Result<Surface, hal::window::InitError> {
435         match has_handle.raw_window_handle() {
436             raw_window_handle::RawWindowHandle::Windows(handle) => {
437                 Ok(self.create_surface_from_hwnd(handle.hwnd))
438             }
439             _ => Err(hal::window::InitError::UnsupportedWindowHandle),
440         }
441     }
442 
destroy_surface(&self, _surface: Surface)443     unsafe fn destroy_surface(&self, _surface: Surface) {
444         // TODO: Implement Surface cleanup
445     }
446 }
447 
448 pub struct PhysicalDevice {
449     adapter: ComPtr<IDXGIAdapter>,
450     features: hal::Features,
451     limits: hal::Limits,
452     memory_properties: adapter::MemoryProperties,
453     format_properties: [format::Properties; format::NUM_FORMATS],
454 }
455 
456 impl fmt::Debug for PhysicalDevice {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result457     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
458         fmt.write_str("PhysicalDevice")
459     }
460 }
461 
462 unsafe impl Send for PhysicalDevice {}
463 unsafe impl Sync for PhysicalDevice {}
464 
465 // TODO: does the adapter we get earlier matter for feature level?
get_feature_level(adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL466 fn get_feature_level(adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL {
467     let requested_feature_levels = [
468         d3dcommon::D3D_FEATURE_LEVEL_11_1,
469         d3dcommon::D3D_FEATURE_LEVEL_11_0,
470         d3dcommon::D3D_FEATURE_LEVEL_10_1,
471         d3dcommon::D3D_FEATURE_LEVEL_10_0,
472         d3dcommon::D3D_FEATURE_LEVEL_9_3,
473         d3dcommon::D3D_FEATURE_LEVEL_9_2,
474         d3dcommon::D3D_FEATURE_LEVEL_9_1,
475     ];
476 
477     let mut feature_level = d3dcommon::D3D_FEATURE_LEVEL_9_1;
478     let hr = unsafe {
479         d3d11::D3D11CreateDevice(
480             adapter,
481             d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
482             ptr::null_mut(),
483             0,
484             requested_feature_levels[..].as_ptr(),
485             requested_feature_levels.len() as _,
486             d3d11::D3D11_SDK_VERSION,
487             ptr::null_mut(),
488             &mut feature_level as *mut _,
489             ptr::null_mut(),
490         )
491     };
492 
493     if !winerror::SUCCEEDED(hr) {
494         // if there is no 11.1 runtime installed, requesting
495         // `D3D_FEATURE_LEVEL_11_1` will return E_INVALIDARG so we just retry
496         // without that
497         if hr == winerror::E_INVALIDARG {
498             let hr = unsafe {
499                 d3d11::D3D11CreateDevice(
500                     adapter,
501                     d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
502                     ptr::null_mut(),
503                     0,
504                     requested_feature_levels[1 ..].as_ptr(),
505                     (requested_feature_levels.len() - 1) as _,
506                     d3d11::D3D11_SDK_VERSION,
507                     ptr::null_mut(),
508                     &mut feature_level as *mut _,
509                     ptr::null_mut(),
510                 )
511             };
512 
513             if !winerror::SUCCEEDED(hr) {
514                 // TODO: device might not support any feature levels?
515                 unimplemented!();
516             }
517         }
518     }
519 
520     feature_level
521 }
522 
523 // TODO: PhysicalDevice
524 impl adapter::PhysicalDevice<Backend> for PhysicalDevice {
open( &self, families: &[(&QueueFamily, &[queue::QueuePriority])], requested_features: hal::Features, ) -> Result<adapter::Gpu<Backend>, hal::device::CreationError>525     unsafe fn open(
526         &self,
527         families: &[(&QueueFamily, &[queue::QueuePriority])],
528         requested_features: hal::Features,
529     ) -> Result<adapter::Gpu<Backend>, hal::device::CreationError> {
530         let (device, cxt) = {
531             if !self.features().contains(requested_features) {
532                 return Err(hal::device::CreationError::MissingFeature);
533             }
534 
535             let feature_level = get_feature_level(self.adapter.as_raw());
536             let mut returned_level = d3dcommon::D3D_FEATURE_LEVEL_9_1;
537 
538             #[cfg(debug_assertions)]
539             let create_flags = d3d11::D3D11_CREATE_DEVICE_DEBUG;
540             #[cfg(not(debug_assertions))]
541             let create_flags = 0;
542 
543             // TODO: request debug device only on debug config?
544             let mut device = ptr::null_mut();
545             let mut cxt = ptr::null_mut();
546             let hr = d3d11::D3D11CreateDevice(
547                 self.adapter.as_raw() as *mut _,
548                 d3dcommon::D3D_DRIVER_TYPE_UNKNOWN,
549                 ptr::null_mut(),
550                 create_flags,
551                 [feature_level].as_ptr(),
552                 1,
553                 d3d11::D3D11_SDK_VERSION,
554                 &mut device as *mut *mut _ as *mut *mut _,
555                 &mut returned_level as *mut _,
556                 &mut cxt as *mut *mut _ as *mut *mut _,
557             );
558 
559             // NOTE: returns error if adapter argument is non-null and driver
560             // type is not unknown; or if debug device is requested but not
561             // present
562             if !winerror::SUCCEEDED(hr) {
563                 return Err(hal::device::CreationError::InitializationFailed);
564             }
565 
566             info!("feature level={:x}", feature_level);
567 
568             (ComPtr::from_raw(device), ComPtr::from_raw(cxt))
569         };
570 
571         let device = device::Device::new(device, cxt, self.memory_properties.clone());
572 
573         // TODO: deferred context => 1 cxt/queue?
574         let queue_groups = families
575             .into_iter()
576             .map(|&(_family, prio)| {
577                 assert_eq!(prio.len(), 1);
578                 let mut group = queue::QueueGroup::new(queue::QueueFamilyId(0));
579 
580                 // TODO: multiple queues?
581                 let queue = CommandQueue {
582                     context: device.context.clone(),
583                 };
584                 group.add_queue(queue);
585                 group
586             })
587             .collect();
588 
589         Ok(adapter::Gpu {
590             device,
591             queue_groups,
592         })
593     }
594 
format_properties(&self, fmt: Option<format::Format>) -> format::Properties595     fn format_properties(&self, fmt: Option<format::Format>) -> format::Properties {
596         let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0);
597         self.format_properties[idx]
598     }
599 
image_format_properties( &self, format: format::Format, dimensions: u8, tiling: image::Tiling, usage: image::Usage, view_caps: image::ViewCapabilities, ) -> Option<image::FormatProperties>600     fn image_format_properties(
601         &self,
602         format: format::Format,
603         dimensions: u8,
604         tiling: image::Tiling,
605         usage: image::Usage,
606         view_caps: image::ViewCapabilities,
607     ) -> Option<image::FormatProperties> {
608         conv::map_format(format)?; //filter out unknown formats
609 
610         let supported_usage = {
611             use hal::image::Usage as U;
612             let format_props = &self.format_properties[format as usize];
613             let props = match tiling {
614                 image::Tiling::Optimal => format_props.optimal_tiling,
615                 image::Tiling::Linear => format_props.linear_tiling,
616             };
617             let mut flags = U::empty();
618             // Note: these checks would have been nicer if we had explicit BLIT usage
619             if props.contains(format::ImageFeature::BLIT_SRC) {
620                 flags |= U::TRANSFER_SRC;
621             }
622             if props.contains(format::ImageFeature::BLIT_DST) {
623                 flags |= U::TRANSFER_DST;
624             }
625             if props.contains(format::ImageFeature::SAMPLED) {
626                 flags |= U::SAMPLED;
627             }
628             if props.contains(format::ImageFeature::STORAGE) {
629                 flags |= U::STORAGE;
630             }
631             if props.contains(format::ImageFeature::COLOR_ATTACHMENT) {
632                 flags |= U::COLOR_ATTACHMENT;
633             }
634             if props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) {
635                 flags |= U::DEPTH_STENCIL_ATTACHMENT;
636             }
637             flags
638         };
639         if !supported_usage.contains(usage) {
640             return None;
641         }
642 
643         let max_resource_size =
644             (d3d11::D3D11_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20;
645         Some(match tiling {
646             image::Tiling::Optimal => image::FormatProperties {
647                 max_extent: match dimensions {
648                     1 => image::Extent {
649                         width: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION,
650                         height: 1,
651                         depth: 1,
652                     },
653                     2 => image::Extent {
654                         width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
655                         height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
656                         depth: 1,
657                     },
658                     3 => image::Extent {
659                         width: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
660                         height: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
661                         depth: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION,
662                     },
663                     _ => return None,
664                 },
665                 max_levels: d3d11::D3D11_REQ_MIP_LEVELS as _,
666                 max_layers: match dimensions {
667                     1 => d3d11::D3D11_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _,
668                     2 => d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _,
669                     _ => return None,
670                 },
671                 sample_count_mask: if dimensions == 2
672                     && !view_caps.contains(image::ViewCapabilities::KIND_CUBE)
673                     && (usage.contains(image::Usage::COLOR_ATTACHMENT)
674                         | usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT))
675                 {
676                     0x3F //TODO: use D3D12_FEATURE_DATA_FORMAT_SUPPORT
677                 } else {
678                     0x1
679                 },
680                 max_resource_size,
681             },
682             image::Tiling::Linear => image::FormatProperties {
683                 max_extent: match dimensions {
684                     2 => image::Extent {
685                         width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
686                         height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION,
687                         depth: 1,
688                     },
689                     _ => return None,
690                 },
691                 max_levels: 1,
692                 max_layers: 1,
693                 sample_count_mask: 0x1,
694                 max_resource_size,
695             },
696         })
697     }
698 
memory_properties(&self) -> adapter::MemoryProperties699     fn memory_properties(&self) -> adapter::MemoryProperties {
700         self.memory_properties.clone()
701     }
702 
features(&self) -> hal::Features703     fn features(&self) -> hal::Features {
704         self.features
705     }
706 
limits(&self) -> Limits707     fn limits(&self) -> Limits {
708         self.limits
709     }
710 }
711 
712 struct Presentation {
713     swapchain: ComPtr<IDXGISwapChain>,
714     view: ComPtr<d3d11::ID3D11RenderTargetView>,
715     format: format::Format,
716     size: window::Extent2D,
717 }
718 
719 pub struct Surface {
720     pub(crate) factory: ComPtr<IDXGIFactory>,
721     wnd_handle: HWND,
722     presentation: Option<Presentation>,
723 }
724 
725 
726 impl fmt::Debug for Surface {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result727     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
728         fmt.write_str("Surface")
729     }
730 }
731 
732 unsafe impl Send for Surface {}
733 unsafe impl Sync for Surface {}
734 
735 impl window::Surface<Backend> for Surface {
supports_queue_family(&self, _queue_family: &QueueFamily) -> bool736     fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool {
737         true
738     }
739 
capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities740     fn capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities {
741         let current_extent = unsafe {
742             let mut rect: RECT = mem::zeroed();
743             assert_ne!(
744                 0,
745                 GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT)
746             );
747             Some(window::Extent2D {
748                 width: (rect.right - rect.left) as u32,
749                 height: (rect.bottom - rect.top) as u32,
750             })
751         };
752 
753         // TODO: flip swap effects require dx11.1/windows8
754         // NOTE: some swap effects affect msaa capabilities..
755         // TODO: _DISCARD swap effects can only have one image?
756         window::SurfaceCapabilities {
757             present_modes: window::PresentMode::FIFO, //TODO
758             composite_alpha_modes: window::CompositeAlphaMode::OPAQUE, //TODO
759             image_count: 1 ..= 16, // TODO:
760             current_extent,
761             extents: window::Extent2D {
762                 width: 16,
763                 height: 16,
764             } ..= window::Extent2D {
765                 width: 4096,
766                 height: 4096,
767             },
768             max_image_layers: 1,
769             usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
770         }
771     }
772 
supported_formats(&self, _physical_device: &PhysicalDevice) -> Option<Vec<format::Format>>773     fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option<Vec<format::Format>> {
774          Some(vec![
775             format::Format::Bgra8Srgb,
776             format::Format::Bgra8Unorm,
777             format::Format::Rgba8Srgb,
778             format::Format::Rgba8Unorm,
779             format::Format::A2b10g10r10Unorm,
780             format::Format::Rgba16Sfloat,
781         ])
782     }
783 }
784 
785 impl window::PresentationSurface<Backend> for Surface {
786     type SwapchainImage = ImageView;
787 
configure_swapchain( &mut self, device: &device::Device, config: window::SwapchainConfig, ) -> Result<(), window::CreationError>788     unsafe fn configure_swapchain(
789         &mut self,
790         device: &device::Device,
791         config: window::SwapchainConfig,
792     ) -> Result<(), window::CreationError> {
793         assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage));
794 
795         let swapchain = match self.presentation.take() {
796             Some(present) => {
797                 if present.format == config.format && present.size == config.extent {
798                     self.presentation = Some(present);
799                     return Ok(());
800                 }
801                 let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap();
802                 drop(present.view);
803                 let result = present.swapchain.ResizeBuffers(
804                     config.image_count,
805                     config.extent.width,
806                     config.extent.height,
807                     non_srgb_format,
808                     0,
809                 );
810                 if result != winerror::S_OK {
811                     error!("ResizeBuffers failed with 0x{:x}", result as u32);
812                     return Err(window::CreationError::WindowInUse(hal::device::WindowInUse));
813                 }
814                 present.swapchain
815             }
816             None => {
817                 let (swapchain, _) =
818                     device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?;
819                 swapchain
820             }
821         };
822 
823         let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut();
824         assert_eq!(
825             winerror::S_OK,
826             swapchain.GetBuffer(
827                 0 as _,
828                 &d3d11::ID3D11Resource::uuidof(),
829                 &mut resource as *mut *mut _ as *mut *mut _,
830             )
831         );
832 
833         let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1);
834         let format = conv::map_format(config.format).unwrap();
835         let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(format);
836 
837         let view_info = ViewInfo {
838             resource,
839             kind,
840             caps: image::ViewCapabilities::empty(),
841             view_kind: image::ViewKind::D2,
842             format: decomposed.rtv.unwrap(),
843             range: image::SubresourceRange {
844                 aspects: format::Aspects::COLOR,
845                 levels: 0 .. 1,
846                 layers: 0 .. 1,
847             },
848         };
849         let view = device.view_image_as_render_target(&view_info).unwrap();
850 
851         (*resource).Release();
852 
853         self.presentation = Some(Presentation {
854             swapchain,
855             view,
856             format: config.format,
857             size: config.extent,
858         });
859         Ok(())
860     }
861 
unconfigure_swapchain(&mut self, _device: &device::Device)862     unsafe fn unconfigure_swapchain(&mut self, _device: &device::Device) {
863         self.presentation = None;
864     }
865 
acquire_image( &mut self, _timeout_ns: u64, ) -> Result<(ImageView, Option<window::Suboptimal>), window::AcquireError>866     unsafe fn acquire_image(
867         &mut self,
868         _timeout_ns: u64, //TODO: use the timeout
869     ) -> Result<(ImageView, Option<window::Suboptimal>), window::AcquireError> {
870         let present = self.presentation.as_ref().unwrap();
871         let image_view = ImageView {
872             format: present.format,
873             rtv_handle: Some(present.view.clone()),
874             dsv_handle: None,
875             srv_handle: None,
876             uav_handle: None,
877         };
878         Ok((image_view, None))
879     }
880 }
881 
882 
883 pub struct Swapchain {
884     dxgi_swapchain: ComPtr<IDXGISwapChain>,
885 }
886 
887 
888 impl fmt::Debug for Swapchain {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result889     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
890         fmt.write_str("Swapchain")
891     }
892 }
893 
894 unsafe impl Send for Swapchain {}
895 unsafe impl Sync for Swapchain {}
896 
897 impl window::Swapchain<Backend> for Swapchain {
acquire_image( &mut self, _timeout_ns: u64, _semaphore: Option<&Semaphore>, _fence: Option<&Fence>, ) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError>898     unsafe fn acquire_image(
899         &mut self,
900         _timeout_ns: u64,
901         _semaphore: Option<&Semaphore>,
902         _fence: Option<&Fence>,
903     ) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
904         // TODO: non-`_DISCARD` swap effects have more than one buffer, `FLIP`
905         //       effects are dxgi 1.3 (w10+?) in which case there is
906         //       `GetCurrentBackBufferIndex()` on the swapchain
907         Ok((0, None))
908     }
909 }
910 
911 #[derive(Debug, Clone, Copy)]
912 pub struct QueueFamily;
913 
914 impl queue::QueueFamily for QueueFamily {
queue_type(&self) -> queue::QueueType915     fn queue_type(&self) -> queue::QueueType {
916         queue::QueueType::General
917     }
max_queues(&self) -> usize918     fn max_queues(&self) -> usize {
919         1
920     }
id(&self) -> queue::QueueFamilyId921     fn id(&self) -> queue::QueueFamilyId {
922         queue::QueueFamilyId(0)
923     }
924 }
925 
926 #[derive(Clone)]
927 pub struct CommandQueue {
928     context: ComPtr<d3d11::ID3D11DeviceContext>,
929 }
930 
931 impl fmt::Debug for CommandQueue {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result932     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
933         fmt.write_str("CommandQueue")
934     }
935 }
936 
937 unsafe impl Send for CommandQueue {}
938 unsafe impl Sync for CommandQueue {}
939 
940 impl queue::CommandQueue<Backend> for CommandQueue {
submit<'a, T, Ic, S, Iw, Is>( &mut self, submission: queue::Submission<Ic, Iw, Is>, fence: Option<&Fence>, ) where T: 'a + Borrow<CommandBuffer>, Ic: IntoIterator<Item = &'a T>, S: 'a + Borrow<Semaphore>, Iw: IntoIterator<Item = (&'a S, pso::PipelineStage)>, Is: IntoIterator<Item = &'a S>,941     unsafe fn submit<'a, T, Ic, S, Iw, Is>(
942         &mut self,
943         submission: queue::Submission<Ic, Iw, Is>,
944         fence: Option<&Fence>,
945     ) where
946         T: 'a + Borrow<CommandBuffer>,
947         Ic: IntoIterator<Item = &'a T>,
948         S: 'a + Borrow<Semaphore>,
949         Iw: IntoIterator<Item = (&'a S, pso::PipelineStage)>,
950         Is: IntoIterator<Item = &'a S>,
951     {
952         let _scope = debug_scope!(&self.context, "Submit(fence={:?})", fence);
953         for cmd_buf in submission.command_buffers {
954             let cmd_buf = cmd_buf.borrow();
955 
956             let _scope = debug_scope!(
957                 &self.context,
958                 "CommandBuffer ({}/{})",
959                 cmd_buf.flush_coherent_memory.len(),
960                 cmd_buf.invalidate_coherent_memory.len()
961             );
962 
963             {
964                 let _scope = debug_scope!(&self.context, "Pre-Exec: Flush");
965                 for sync in &cmd_buf.flush_coherent_memory {
966                     sync.do_flush(&self.context);
967                 }
968             }
969             self.context
970                 .ExecuteCommandList(cmd_buf.as_raw_list().as_raw(), FALSE);
971             {
972                 let _scope = debug_scope!(&self.context, "Post-Exec: Invalidate");
973                 for sync in &cmd_buf.invalidate_coherent_memory {
974                     sync.do_invalidate(&self.context);
975                 }
976             }
977         }
978 
979         if let Some(fence) = fence {
980             *fence.mutex.lock() = true;
981             fence.condvar.notify_all();
982         }
983     }
984 
present<'a, W, Is, S, Iw>( &mut self, swapchains: Is, _wait_semaphores: Iw, ) -> Result<Option<window::Suboptimal>, window::PresentError> where W: 'a + Borrow<Swapchain>, Is: IntoIterator<Item = (&'a W, window::SwapImageIndex)>, S: 'a + Borrow<Semaphore>, Iw: IntoIterator<Item = &'a S>,985     unsafe fn present<'a, W, Is, S, Iw>(
986         &mut self,
987         swapchains: Is,
988         _wait_semaphores: Iw,
989     ) -> Result<Option<window::Suboptimal>, window::PresentError>
990     where
991         W: 'a + Borrow<Swapchain>,
992         Is: IntoIterator<Item = (&'a W, window::SwapImageIndex)>,
993         S: 'a + Borrow<Semaphore>,
994         Iw: IntoIterator<Item = &'a S>,
995     {
996         for (swapchain, _idx) in swapchains {
997             swapchain.borrow().dxgi_swapchain.Present(1, 0);
998         }
999 
1000         Ok(None)
1001     }
1002 
present_surface( &mut self, surface: &mut Surface, _image: ImageView, _wait_semaphore: Option<&Semaphore>, ) -> Result<Option<window::Suboptimal>, window::PresentError>1003     unsafe fn present_surface(
1004         &mut self,
1005         surface: &mut Surface,
1006         _image: ImageView,
1007         _wait_semaphore: Option<&Semaphore>,
1008     ) -> Result<Option<window::Suboptimal>, window::PresentError> {
1009         surface
1010             .presentation
1011             .as_ref()
1012             .unwrap()
1013             .swapchain
1014             .Present(1, 0);
1015         Ok(None)
1016     }
1017 
wait_idle(&self) -> Result<(), hal::device::OutOfMemory>1018     fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> {
1019         // unimplemented!()
1020         Ok(())
1021     }
1022 }
1023 
1024 #[derive(Debug)]
1025 pub struct AttachmentClear {
1026     subpass_id: Option<pass::SubpassId>,
1027     attachment_id: usize,
1028     raw: command::AttachmentClear,
1029 }
1030 
1031 #[derive(Debug)]
1032 pub struct RenderPassCache {
1033     pub render_pass: RenderPass,
1034     pub framebuffer: Framebuffer,
1035     pub attachment_clear_values: Vec<AttachmentClear>,
1036     pub target_rect: pso::Rect,
1037     pub current_subpass: usize,
1038 }
1039 
1040 impl RenderPassCache {
start_subpass( &mut self, internal: &mut internal::Internal, context: &ComPtr<d3d11::ID3D11DeviceContext>, cache: &mut CommandBufferState, )1041     pub fn start_subpass(
1042         &mut self,
1043         internal: &mut internal::Internal,
1044         context: &ComPtr<d3d11::ID3D11DeviceContext>,
1045         cache: &mut CommandBufferState,
1046     ) {
1047         let attachments = self
1048             .attachment_clear_values
1049             .iter()
1050             .filter(|clear| clear.subpass_id == Some(self.current_subpass))
1051             .map(|clear| clear.raw);
1052 
1053         cache
1054             .dirty_flag
1055             .insert(DirtyStateFlag::GRAPHICS_PIPELINE | DirtyStateFlag::VIEWPORTS);
1056         internal.clear_attachments(
1057             context,
1058             attachments,
1059             &[pso::ClearRect {
1060                 rect: self.target_rect,
1061                 layers: 0 .. 1,
1062             }],
1063             &self,
1064         );
1065 
1066         let subpass = &self.render_pass.subpasses[self.current_subpass];
1067         let color_views = subpass
1068             .color_attachments
1069             .iter()
1070             .map(|&(id, _)| {
1071                 self.framebuffer.attachments[id]
1072                     .rtv_handle
1073                     .clone()
1074                     .unwrap()
1075                     .as_raw()
1076             })
1077             .collect::<Vec<_>>();
1078         let ds_view = match subpass.depth_stencil_attachment {
1079             Some((id, _)) => Some(
1080                 self.framebuffer.attachments[id]
1081                     .dsv_handle
1082                     .clone()
1083                     .unwrap()
1084                     .as_raw(),
1085             ),
1086             None => None,
1087         };
1088 
1089         cache.set_render_targets(&color_views, ds_view);
1090         cache.bind(context);
1091     }
1092 
next_subpass(&mut self)1093     pub fn next_subpass(&mut self) {
1094         self.current_subpass += 1;
1095     }
1096 }
1097 
1098 bitflags! {
1099     struct DirtyStateFlag : u32 {
1100         const RENDER_TARGETS = (1 << 1);
1101         const VERTEX_BUFFERS = (1 << 2);
1102         const GRAPHICS_PIPELINE = (1 << 3);
1103         const VIEWPORTS = (1 << 4);
1104         const BLEND_STATE = (1 << 5);
1105     }
1106 }
1107 
1108 pub struct CommandBufferState {
1109     dirty_flag: DirtyStateFlag,
1110 
1111     render_target_len: u32,
1112     render_targets: [*mut d3d11::ID3D11RenderTargetView; 8],
1113     depth_target: Option<*mut d3d11::ID3D11DepthStencilView>,
1114     graphics_pipeline: Option<GraphicsPipeline>,
1115 
1116     // a bitmask that keeps track of what vertex buffer bindings have been "bound" into
1117     // our vec
1118     bound_bindings: u32,
1119     // a bitmask that hold the required binding slots to be bound for the currently
1120     // bound pipeline
1121     required_bindings: Option<u32>,
1122     // the highest binding number in currently bound pipeline
1123     max_bindings: Option<u32>,
1124     viewports: Vec<d3d11::D3D11_VIEWPORT>,
1125     vertex_buffers: Vec<*mut d3d11::ID3D11Buffer>,
1126     vertex_offsets: Vec<u32>,
1127     vertex_strides: Vec<u32>,
1128     blend_factor: Option<[f32; 4]>,
1129     // we can only support one face (rather, both faces must have the same value)
1130     stencil_ref: Option<pso::StencilValue>,
1131     stencil_read_mask: Option<pso::StencilValue>,
1132     stencil_write_mask: Option<pso::StencilValue>,
1133     current_blend: Option<*mut d3d11::ID3D11BlendState>,
1134 }
1135 
1136 
1137 impl fmt::Debug for CommandBufferState {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result1138     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1139         fmt.write_str("CommandBufferState")
1140     }
1141 }
1142 
1143 impl CommandBufferState {
new() -> Self1144     fn new() -> Self {
1145         CommandBufferState {
1146             dirty_flag: DirtyStateFlag::empty(),
1147             render_target_len: 0,
1148             render_targets: [ptr::null_mut(); 8],
1149             depth_target: None,
1150             graphics_pipeline: None,
1151             bound_bindings: 0,
1152             required_bindings: None,
1153             max_bindings: None,
1154             viewports: Vec::new(),
1155             vertex_buffers: Vec::new(),
1156             vertex_offsets: Vec::new(),
1157             vertex_strides: Vec::new(),
1158             blend_factor: None,
1159             stencil_ref: None,
1160             stencil_read_mask: None,
1161             stencil_write_mask: None,
1162             current_blend: None,
1163         }
1164     }
1165 
clear(&mut self)1166     fn clear(&mut self) {
1167         self.render_target_len = 0;
1168         self.depth_target = None;
1169         self.graphics_pipeline = None;
1170         self.bound_bindings = 0;
1171         self.required_bindings = None;
1172         self.max_bindings = None;
1173         self.viewports.clear();
1174         self.vertex_buffers.clear();
1175         self.vertex_offsets.clear();
1176         self.vertex_strides.clear();
1177         self.blend_factor = None;
1178         self.stencil_ref = None;
1179         self.stencil_read_mask = None;
1180         self.stencil_write_mask = None;
1181         self.current_blend = None;
1182     }
1183 
set_vertex_buffer( &mut self, index: usize, offset: u32, buffer: *mut d3d11::ID3D11Buffer, )1184     pub fn set_vertex_buffer(
1185         &mut self,
1186         index: usize,
1187         offset: u32,
1188         buffer: *mut d3d11::ID3D11Buffer,
1189     ) {
1190         self.bound_bindings |= 1 << index as u32;
1191 
1192         if index >= self.vertex_buffers.len() {
1193             self.vertex_buffers.push(buffer);
1194             self.vertex_offsets.push(offset);
1195         } else {
1196             self.vertex_buffers[index] = buffer;
1197             self.vertex_offsets[index] = offset;
1198         }
1199 
1200         self.dirty_flag.insert(DirtyStateFlag::VERTEX_BUFFERS);
1201     }
1202 
bind_vertex_buffers(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1203     pub fn bind_vertex_buffers(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1204         if let Some(binding_count) = self.max_bindings {
1205             if self.vertex_buffers.len() >= binding_count as usize
1206                 && self.vertex_strides.len() >= binding_count as usize
1207             {
1208                 unsafe {
1209                     context.IASetVertexBuffers(
1210                         0,
1211                         binding_count,
1212                         self.vertex_buffers.as_ptr(),
1213                         self.vertex_strides.as_ptr(),
1214                         self.vertex_offsets.as_ptr(),
1215                     );
1216                 }
1217 
1218                 self.dirty_flag.remove(DirtyStateFlag::VERTEX_BUFFERS);
1219             }
1220         }
1221     }
1222 
set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT])1223     pub fn set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT]) {
1224         self.viewports.clear();
1225         self.viewports.extend(viewports);
1226 
1227         self.dirty_flag.insert(DirtyStateFlag::VIEWPORTS);
1228     }
1229 
bind_viewports(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1230     pub fn bind_viewports(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1231         if let Some(ref pipeline) = self.graphics_pipeline {
1232             if let Some(ref viewport) = pipeline.baked_states.viewport {
1233                 unsafe {
1234                     context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr());
1235                 }
1236             } else {
1237                 unsafe {
1238                     context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr());
1239                 }
1240             }
1241         } else {
1242             unsafe {
1243                 context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr());
1244             }
1245         }
1246 
1247         self.dirty_flag.remove(DirtyStateFlag::VIEWPORTS);
1248     }
1249 
set_render_targets( &mut self, render_targets: &[*mut d3d11::ID3D11RenderTargetView], depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, )1250     pub fn set_render_targets(
1251         &mut self,
1252         render_targets: &[*mut d3d11::ID3D11RenderTargetView],
1253         depth_target: Option<*mut d3d11::ID3D11DepthStencilView>,
1254     ) {
1255         for (idx, &rt) in render_targets.iter().enumerate() {
1256             self.render_targets[idx] = rt;
1257         }
1258 
1259         self.render_target_len = render_targets.len() as u32;
1260         self.depth_target = depth_target;
1261 
1262         self.dirty_flag.insert(DirtyStateFlag::RENDER_TARGETS);
1263     }
1264 
bind_render_targets(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1265     pub fn bind_render_targets(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1266         unsafe {
1267             context.OMSetRenderTargets(
1268                 self.render_target_len,
1269                 self.render_targets.as_ptr(),
1270                 if let Some(dsv) = self.depth_target {
1271                     dsv
1272                 } else {
1273                     ptr::null_mut()
1274                 },
1275             );
1276         }
1277 
1278         self.dirty_flag.remove(DirtyStateFlag::RENDER_TARGETS);
1279     }
1280 
set_blend_factor(&mut self, factor: [f32; 4])1281     pub fn set_blend_factor(&mut self, factor: [f32; 4]) {
1282         self.blend_factor = Some(factor);
1283 
1284         self.dirty_flag.insert(DirtyStateFlag::BLEND_STATE);
1285     }
1286 
bind_blend_state(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1287     pub fn bind_blend_state(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1288         if let Some(blend) = self.current_blend {
1289             let blend_color = if let Some(ref pipeline) = self.graphics_pipeline {
1290                 pipeline
1291                     .baked_states
1292                     .blend_color
1293                     .or(self.blend_factor)
1294                     .unwrap_or([0f32; 4])
1295             } else {
1296                 self.blend_factor.unwrap_or([0f32; 4])
1297             };
1298 
1299             // TODO: MSAA
1300             unsafe {
1301                 context.OMSetBlendState(blend, &blend_color, !0);
1302             }
1303 
1304             self.dirty_flag.remove(DirtyStateFlag::BLEND_STATE);
1305         }
1306     }
1307 
set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline)1308     pub fn set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline) {
1309         self.graphics_pipeline = Some(pipeline);
1310 
1311         self.dirty_flag.insert(DirtyStateFlag::GRAPHICS_PIPELINE);
1312     }
1313 
bind_graphics_pipeline(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1314     pub fn bind_graphics_pipeline(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1315         if let Some(ref pipeline) = self.graphics_pipeline {
1316             self.vertex_strides.clear();
1317             self.vertex_strides.extend(&pipeline.strides);
1318 
1319             self.required_bindings = Some(pipeline.required_bindings);
1320             self.max_bindings = Some(pipeline.max_vertex_bindings);
1321         };
1322 
1323         self.bind_vertex_buffers(context);
1324 
1325         if let Some(ref pipeline) = self.graphics_pipeline {
1326             unsafe {
1327                 context.IASetPrimitiveTopology(pipeline.topology);
1328                 context.IASetInputLayout(pipeline.input_layout.as_raw());
1329 
1330                 context.VSSetShader(pipeline.vs.as_raw(), ptr::null_mut(), 0);
1331                 if let Some(ref ps) = pipeline.ps {
1332                     context.PSSetShader(ps.as_raw(), ptr::null_mut(), 0);
1333                 }
1334                 if let Some(ref gs) = pipeline.gs {
1335                     context.GSSetShader(gs.as_raw(), ptr::null_mut(), 0);
1336                 }
1337                 if let Some(ref hs) = pipeline.hs {
1338                     context.HSSetShader(hs.as_raw(), ptr::null_mut(), 0);
1339                 }
1340                 if let Some(ref ds) = pipeline.ds {
1341                     context.DSSetShader(ds.as_raw(), ptr::null_mut(), 0);
1342                 }
1343 
1344                 context.RSSetState(pipeline.rasterizer_state.as_raw());
1345                 if let Some(ref viewport) = pipeline.baked_states.viewport {
1346                     context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr());
1347                 }
1348                 if let Some(ref scissor) = pipeline.baked_states.scissor {
1349                     context.RSSetScissorRects(1, [conv::map_rect(&scissor)].as_ptr());
1350                 }
1351 
1352                 if let Some((ref state, reference)) = pipeline.depth_stencil_state {
1353                     let stencil_ref = if let pso::State::Static(reference) = reference {
1354                         reference
1355                     } else {
1356                         self.stencil_ref.unwrap_or(0)
1357                     };
1358 
1359                     context.OMSetDepthStencilState(state.as_raw(), stencil_ref);
1360                 }
1361                 self.current_blend = Some(pipeline.blend_state.as_raw());
1362             }
1363         };
1364 
1365         self.bind_blend_state(context);
1366 
1367         self.dirty_flag.remove(DirtyStateFlag::GRAPHICS_PIPELINE);
1368     }
1369 
bind(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>)1370     pub fn bind(&mut self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
1371         if self.dirty_flag.contains(DirtyStateFlag::RENDER_TARGETS) {
1372             self.bind_render_targets(context);
1373         }
1374 
1375         if self.dirty_flag.contains(DirtyStateFlag::GRAPHICS_PIPELINE) {
1376             self.bind_graphics_pipeline(context);
1377         }
1378 
1379         if self.dirty_flag.contains(DirtyStateFlag::VERTEX_BUFFERS) {
1380             self.bind_vertex_buffers(context);
1381         }
1382 
1383         if self.dirty_flag.contains(DirtyStateFlag::VIEWPORTS) {
1384             self.bind_viewports(context);
1385         }
1386     }
1387 }
1388 
1389 pub struct CommandBuffer {
1390     // TODO: better way of sharing
1391     internal: internal::Internal,
1392     context: ComPtr<d3d11::ID3D11DeviceContext>,
1393     list: RefCell<Option<ComPtr<d3d11::ID3D11CommandList>>>,
1394 
1395     // since coherent memory needs to be synchronized at submission, we need to gather up all
1396     // coherent resources that are used in the command buffer and flush/invalidate them accordingly
1397     // before executing.
1398     flush_coherent_memory: Vec<MemoryFlush>,
1399     invalidate_coherent_memory: Vec<MemoryInvalidate>,
1400 
1401     // holds information about the active render pass
1402     render_pass_cache: Option<RenderPassCache>,
1403 
1404     cache: CommandBufferState,
1405 
1406     one_time_submit: bool,
1407 }
1408 
1409 impl fmt::Debug for CommandBuffer {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result1410     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1411         fmt.write_str("CommandBuffer")
1412     }
1413 }
1414 
1415 unsafe impl Send for CommandBuffer {}
1416 unsafe impl Sync for CommandBuffer {}
1417 
1418 impl CommandBuffer {
create_deferred(device: ComPtr<d3d11::ID3D11Device>, internal: internal::Internal) -> Self1419     fn create_deferred(device: ComPtr<d3d11::ID3D11Device>, internal: internal::Internal) -> Self {
1420         let mut context: *mut d3d11::ID3D11DeviceContext = ptr::null_mut();
1421         let hr =
1422             unsafe { device.CreateDeferredContext(0, &mut context as *mut *mut _ as *mut *mut _) };
1423         assert_eq!(hr, winerror::S_OK);
1424 
1425         CommandBuffer {
1426             internal,
1427             context: unsafe { ComPtr::from_raw(context) },
1428             list: RefCell::new(None),
1429             flush_coherent_memory: Vec::new(),
1430             invalidate_coherent_memory: Vec::new(),
1431             render_pass_cache: None,
1432             cache: CommandBufferState::new(),
1433             one_time_submit: false,
1434         }
1435     }
1436 
as_raw_list(&self) -> ComPtr<d3d11::ID3D11CommandList>1437     fn as_raw_list(&self) -> ComPtr<d3d11::ID3D11CommandList> {
1438         if self.one_time_submit {
1439             self.list.replace(None).unwrap()
1440         } else {
1441             self.list.borrow().clone().unwrap()
1442         }
1443     }
1444 
bind_vertex_descriptor( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, binding: &PipelineBinding, handles: *mut Descriptor, )1445     unsafe fn bind_vertex_descriptor(
1446         &self,
1447         context: &ComPtr<d3d11::ID3D11DeviceContext>,
1448         binding: &PipelineBinding,
1449         handles: *mut Descriptor,
1450     ) {
1451         use pso::DescriptorType::*;
1452 
1453         let handles = handles.offset(binding.handle_offset as isize);
1454         let start = binding.binding_range.start as UINT;
1455         let len = binding.binding_range.end as UINT - start;
1456 
1457         match binding.ty {
1458             Sampler => context.VSSetSamplers(start, len, handles as *const *mut _ as *const *mut _),
1459             SampledImage | InputAttachment => {
1460                 context.VSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _)
1461             }
1462             CombinedImageSampler => {
1463                 context.VSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _);
1464                 context.VSSetSamplers(
1465                     start,
1466                     len,
1467                     handles.offset(1) as *const *mut _ as *const *mut _,
1468                 );
1469             }
1470             UniformBuffer | UniformBufferDynamic => {
1471                 context.VSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _)
1472             }
1473             _ => {}
1474         }
1475     }
1476 
bind_fragment_descriptor( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, binding: &PipelineBinding, handles: *mut Descriptor, )1477     unsafe fn bind_fragment_descriptor(
1478         &self,
1479         context: &ComPtr<d3d11::ID3D11DeviceContext>,
1480         binding: &PipelineBinding,
1481         handles: *mut Descriptor,
1482     ) {
1483         use pso::DescriptorType::*;
1484 
1485         let handles = handles.offset(binding.handle_offset as isize);
1486         let start = binding.binding_range.start as UINT;
1487         let len = binding.binding_range.end as UINT - start;
1488 
1489         match binding.ty {
1490             Sampler => context.PSSetSamplers(start, len, handles as *const *mut _ as *const *mut _),
1491             SampledImage | InputAttachment => {
1492                 context.PSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _)
1493             }
1494             CombinedImageSampler => {
1495                 context.PSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _);
1496                 context.PSSetSamplers(
1497                     start,
1498                     len,
1499                     handles.offset(1) as *const *mut _ as *const *mut _,
1500                 );
1501             }
1502             UniformBuffer | UniformBufferDynamic => {
1503                 context.PSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _)
1504             }
1505             _ => {}
1506         }
1507     }
1508 
bind_compute_descriptor( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, binding: &PipelineBinding, handles: *mut Descriptor, )1509     unsafe fn bind_compute_descriptor(
1510         &self,
1511         context: &ComPtr<d3d11::ID3D11DeviceContext>,
1512         binding: &PipelineBinding,
1513         handles: *mut Descriptor,
1514     ) {
1515         use pso::DescriptorType::*;
1516 
1517         let handles = handles.offset(binding.handle_offset as isize);
1518         let start = binding.binding_range.start as UINT;
1519         let len = binding.binding_range.end as UINT - start;
1520 
1521         match binding.ty {
1522             Sampler => context.CSSetSamplers(start, len, handles as *const *mut _ as *const *mut _),
1523             SampledImage | InputAttachment => {
1524                 context.CSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _)
1525             }
1526             CombinedImageSampler => {
1527                 context.CSSetShaderResources(start, len, handles as *const *mut _ as *const *mut _);
1528                 context.CSSetSamplers(
1529                     start,
1530                     len,
1531                     handles.offset(1) as *const *mut _ as *const *mut _,
1532                 );
1533             }
1534             UniformBuffer | UniformBufferDynamic => {
1535                 context.CSSetConstantBuffers(start, len, handles as *const *mut _ as *const *mut _)
1536             }
1537             StorageImage | StorageBuffer => context.CSSetUnorderedAccessViews(
1538                 start,
1539                 len,
1540                 handles as *const *mut _ as *const *mut _,
1541                 ptr::null_mut(),
1542             ),
1543             _ => unimplemented!(),
1544         }
1545     }
1546 
bind_descriptor( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, binding: &PipelineBinding, handles: *mut Descriptor, )1547     fn bind_descriptor(
1548         &self,
1549         context: &ComPtr<d3d11::ID3D11DeviceContext>,
1550         binding: &PipelineBinding,
1551         handles: *mut Descriptor,
1552     ) {
1553         //use pso::ShaderStageFlags::*;
1554 
1555         unsafe {
1556             if binding.stage.contains(pso::ShaderStageFlags::VERTEX) {
1557                 self.bind_vertex_descriptor(context, binding, handles);
1558             }
1559 
1560             if binding.stage.contains(pso::ShaderStageFlags::FRAGMENT) {
1561                 self.bind_fragment_descriptor(context, binding, handles);
1562             }
1563 
1564             if binding.stage.contains(pso::ShaderStageFlags::COMPUTE) {
1565                 self.bind_compute_descriptor(context, binding, handles);
1566             }
1567         }
1568     }
1569 
defer_coherent_flush(&mut self, buffer: &Buffer)1570     fn defer_coherent_flush(&mut self, buffer: &Buffer) {
1571         if !self
1572             .flush_coherent_memory
1573             .iter()
1574             .any(|m| m.buffer == buffer.internal.raw)
1575         {
1576             self.flush_coherent_memory.push(MemoryFlush {
1577                 host_memory: buffer.host_ptr,
1578                 sync_range: SyncRange::Whole,
1579                 buffer: buffer.internal.raw,
1580             });
1581         }
1582     }
1583 
defer_coherent_invalidate(&mut self, buffer: &Buffer)1584     fn defer_coherent_invalidate(&mut self, buffer: &Buffer) {
1585         if !self
1586             .invalidate_coherent_memory
1587             .iter()
1588             .any(|m| m.buffer == buffer.internal.raw)
1589         {
1590             self.invalidate_coherent_memory.push(MemoryInvalidate {
1591                 working_buffer: Some(self.internal.working_buffer.clone()),
1592                 working_buffer_size: self.internal.working_buffer_size,
1593                 host_memory: buffer.host_ptr,
1594                 sync_range: buffer.bound_range.clone(),
1595                 buffer: buffer.internal.raw,
1596             });
1597         }
1598     }
1599 
reset(&mut self)1600     fn reset(&mut self) {
1601         self.flush_coherent_memory.clear();
1602         self.invalidate_coherent_memory.clear();
1603         self.render_pass_cache = None;
1604         self.cache.clear();
1605     }
1606 }
1607 
1608 impl command::CommandBuffer<Backend> for CommandBuffer {
begin( &mut self, flags: command::CommandBufferFlags, _info: command::CommandBufferInheritanceInfo<Backend>, )1609     unsafe fn begin(
1610         &mut self,
1611         flags: command::CommandBufferFlags,
1612         _info: command::CommandBufferInheritanceInfo<Backend>,
1613     ) {
1614         self.one_time_submit = flags.contains(command::CommandBufferFlags::ONE_TIME_SUBMIT);
1615         self.reset();
1616     }
1617 
finish(&mut self)1618     unsafe fn finish(&mut self) {
1619         let mut list = ptr::null_mut();
1620         let hr = self
1621             .context
1622             .FinishCommandList(FALSE, &mut list as *mut *mut _ as *mut *mut _);
1623         assert_eq!(hr, winerror::S_OK);
1624 
1625         self.list.replace(Some(ComPtr::from_raw(list)));
1626     }
1627 
reset(&mut self, _release_resources: bool)1628     unsafe fn reset(&mut self, _release_resources: bool) {
1629         self.reset();
1630     }
1631 
begin_render_pass<T>( &mut self, render_pass: &RenderPass, framebuffer: &Framebuffer, target_rect: pso::Rect, clear_values: T, _first_subpass: command::SubpassContents, ) where T: IntoIterator, T::Item: Borrow<command::ClearValue>,1632     unsafe fn begin_render_pass<T>(
1633         &mut self,
1634         render_pass: &RenderPass,
1635         framebuffer: &Framebuffer,
1636         target_rect: pso::Rect,
1637         clear_values: T,
1638         _first_subpass: command::SubpassContents,
1639     ) where
1640         T: IntoIterator,
1641         T::Item: Borrow<command::ClearValue>,
1642     {
1643         use pass::AttachmentLoadOp as Alo;
1644 
1645         let mut clear_iter = clear_values.into_iter();
1646         let mut attachment_clears = Vec::new();
1647 
1648         for (idx, attachment) in render_pass.attachments.iter().enumerate() {
1649             //let attachment = render_pass.attachments[attachment_ref];
1650             let format = attachment.format.unwrap();
1651 
1652             let subpass_id = render_pass.subpasses.iter().position(|sp| sp.is_using(idx));
1653 
1654             if attachment.has_clears() {
1655                 let value = *clear_iter.next().unwrap().borrow();
1656 
1657                 match (attachment.ops.load, attachment.stencil_ops.load) {
1658                     (Alo::Clear, Alo::Clear) if format.is_depth() => {
1659                         attachment_clears.push(AttachmentClear {
1660                             subpass_id,
1661                             attachment_id: idx,
1662                             raw: command::AttachmentClear::DepthStencil {
1663                                 depth: Some(value.depth_stencil.depth),
1664                                 stencil: Some(value.depth_stencil.stencil),
1665                             },
1666                         });
1667                     }
1668                     (Alo::Clear, Alo::Clear) => {
1669                         attachment_clears.push(AttachmentClear {
1670                             subpass_id,
1671                             attachment_id: idx,
1672                             raw: command::AttachmentClear::Color {
1673                                 index: idx,
1674                                 value: value.color,
1675                             },
1676                         });
1677 
1678                         attachment_clears.push(AttachmentClear {
1679                             subpass_id,
1680                             attachment_id: idx,
1681                             raw: command::AttachmentClear::DepthStencil {
1682                                 depth: None,
1683                                 stencil: Some(value.depth_stencil.stencil),
1684                             },
1685                         });
1686                     }
1687                     (Alo::Clear, _) if format.is_depth() => {
1688                         attachment_clears.push(AttachmentClear {
1689                             subpass_id,
1690                             attachment_id: idx,
1691                             raw: command::AttachmentClear::DepthStencil {
1692                                 depth: Some(value.depth_stencil.depth),
1693                                 stencil: None,
1694                             },
1695                         });
1696                     }
1697                     (Alo::Clear, _) => {
1698                         attachment_clears.push(AttachmentClear {
1699                             subpass_id,
1700                             attachment_id: idx,
1701                             raw: command::AttachmentClear::Color {
1702                                 index: idx,
1703                                 value: value.color,
1704                             },
1705                         });
1706                     }
1707                     (_, Alo::Clear) => {
1708                         attachment_clears.push(AttachmentClear {
1709                             subpass_id,
1710                             attachment_id: idx,
1711                             raw: command::AttachmentClear::DepthStencil {
1712                                 depth: None,
1713                                 stencil: Some(value.depth_stencil.stencil),
1714                             },
1715                         });
1716                     }
1717                     _ => {}
1718                 }
1719             }
1720         }
1721 
1722         self.render_pass_cache = Some(RenderPassCache {
1723             render_pass: render_pass.clone(),
1724             framebuffer: framebuffer.clone(),
1725             attachment_clear_values: attachment_clears,
1726             target_rect,
1727             current_subpass: 0,
1728         });
1729 
1730         if let Some(ref mut current_render_pass) = self.render_pass_cache {
1731             current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache);
1732         }
1733     }
1734 
next_subpass(&mut self, _contents: command::SubpassContents)1735     unsafe fn next_subpass(&mut self, _contents: command::SubpassContents) {
1736         if let Some(ref mut current_render_pass) = self.render_pass_cache {
1737             // TODO: resolve msaa
1738             current_render_pass.next_subpass();
1739             current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache);
1740         }
1741     }
1742 
end_render_pass(&mut self)1743     unsafe fn end_render_pass(&mut self) {
1744         self.context
1745             .OMSetRenderTargets(8, [ptr::null_mut(); 8].as_ptr(), ptr::null_mut());
1746 
1747         self.render_pass_cache = None;
1748     }
1749 
pipeline_barrier<'a, T>( &mut self, _stages: Range<pso::PipelineStage>, _dependencies: memory::Dependencies, _barriers: T, ) where T: IntoIterator, T::Item: Borrow<memory::Barrier<'a, Backend>>,1750     unsafe fn pipeline_barrier<'a, T>(
1751         &mut self,
1752         _stages: Range<pso::PipelineStage>,
1753         _dependencies: memory::Dependencies,
1754         _barriers: T,
1755     ) where
1756         T: IntoIterator,
1757         T::Item: Borrow<memory::Barrier<'a, Backend>>,
1758     {
1759         // TODO: should we track and assert on resource states?
1760         // unimplemented!()
1761     }
1762 
clear_image<T>( &mut self, image: &Image, _: image::Layout, value: command::ClearValue, subresource_ranges: T, ) where T: IntoIterator, T::Item: Borrow<image::SubresourceRange>,1763     unsafe fn clear_image<T>(
1764         &mut self,
1765         image: &Image,
1766         _: image::Layout,
1767         value: command::ClearValue,
1768         subresource_ranges: T,
1769     ) where
1770         T: IntoIterator,
1771         T::Item: Borrow<image::SubresourceRange>,
1772     {
1773         for range in subresource_ranges {
1774             let range = range.borrow();
1775 
1776             // TODO: clear Int/Uint depending on format
1777             if range.aspects.contains(format::Aspects::COLOR) {
1778                 for layer in range.layers.clone() {
1779                     for level in range.levels.clone() {
1780                         self.context.ClearRenderTargetView(
1781                             image.get_rtv(level, layer).unwrap().as_raw(),
1782                             &value.color.float32,
1783                         );
1784                     }
1785                 }
1786             }
1787 
1788             let mut depth_stencil_flags = 0;
1789             if range.aspects.contains(format::Aspects::DEPTH) {
1790                 depth_stencil_flags |= d3d11::D3D11_CLEAR_DEPTH;
1791             }
1792 
1793             if range.aspects.contains(format::Aspects::STENCIL) {
1794                 depth_stencil_flags |= d3d11::D3D11_CLEAR_STENCIL;
1795             }
1796 
1797             if depth_stencil_flags != 0 {
1798                 for layer in range.layers.clone() {
1799                     for level in range.levels.clone() {
1800                         self.context.ClearDepthStencilView(
1801                             image.get_dsv(level, layer).unwrap().as_raw(),
1802                             depth_stencil_flags,
1803                             value.depth_stencil.depth,
1804                             value.depth_stencil.stencil as _,
1805                         );
1806                     }
1807                 }
1808             }
1809         }
1810     }
1811 
clear_attachments<T, U>(&mut self, clears: T, rects: U) where T: IntoIterator, T::Item: Borrow<command::AttachmentClear>, U: IntoIterator, U::Item: Borrow<pso::ClearRect>,1812     unsafe fn clear_attachments<T, U>(&mut self, clears: T, rects: U)
1813     where
1814         T: IntoIterator,
1815         T::Item: Borrow<command::AttachmentClear>,
1816         U: IntoIterator,
1817         U::Item: Borrow<pso::ClearRect>,
1818     {
1819         if let Some(ref pass) = self.render_pass_cache {
1820             self.cache.dirty_flag.insert(
1821                 DirtyStateFlag::GRAPHICS_PIPELINE
1822                     | DirtyStateFlag::VIEWPORTS
1823                     | DirtyStateFlag::RENDER_TARGETS,
1824             );
1825             self.internal
1826                 .clear_attachments(&self.context, clears, rects, pass);
1827             self.cache.bind(&self.context);
1828         } else {
1829             panic!("`clear_attachments` can only be called inside a renderpass")
1830         }
1831     }
1832 
resolve_image<T>( &mut self, _src: &Image, _src_layout: image::Layout, _dst: &Image, _dst_layout: image::Layout, _regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageResolve>,1833     unsafe fn resolve_image<T>(
1834         &mut self,
1835         _src: &Image,
1836         _src_layout: image::Layout,
1837         _dst: &Image,
1838         _dst_layout: image::Layout,
1839         _regions: T,
1840     ) where
1841         T: IntoIterator,
1842         T::Item: Borrow<command::ImageResolve>,
1843     {
1844         unimplemented!()
1845     }
1846 
blit_image<T>( &mut self, src: &Image, _src_layout: image::Layout, dst: &Image, _dst_layout: image::Layout, filter: image::Filter, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageBlit>,1847     unsafe fn blit_image<T>(
1848         &mut self,
1849         src: &Image,
1850         _src_layout: image::Layout,
1851         dst: &Image,
1852         _dst_layout: image::Layout,
1853         filter: image::Filter,
1854         regions: T,
1855     ) where
1856         T: IntoIterator,
1857         T::Item: Borrow<command::ImageBlit>,
1858     {
1859         self.cache
1860             .dirty_flag
1861             .insert(DirtyStateFlag::GRAPHICS_PIPELINE);
1862 
1863         self.internal
1864             .blit_2d_image(&self.context, src, dst, filter, regions);
1865 
1866         self.cache.bind(&self.context);
1867     }
1868 
bind_index_buffer(&mut self, ibv: buffer::IndexBufferView<Backend>)1869     unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView<Backend>) {
1870         self.context.IASetIndexBuffer(
1871             ibv.buffer.internal.raw,
1872             conv::map_index_type(ibv.index_type),
1873             ibv.offset as u32,
1874         );
1875     }
1876 
bind_vertex_buffers<I, T>(&mut self, first_binding: pso::BufferIndex, buffers: I) where I: IntoIterator<Item = (T, buffer::Offset)>, T: Borrow<Buffer>,1877     unsafe fn bind_vertex_buffers<I, T>(&mut self, first_binding: pso::BufferIndex, buffers: I)
1878     where
1879         I: IntoIterator<Item = (T, buffer::Offset)>,
1880         T: Borrow<Buffer>,
1881     {
1882         for (i, (buf, offset)) in buffers.into_iter().enumerate() {
1883             let idx = i + first_binding as usize;
1884             let buf = buf.borrow();
1885 
1886             if buf.ty == MemoryHeapFlags::HOST_COHERENT {
1887                 self.defer_coherent_flush(buf);
1888             }
1889 
1890             self.cache
1891                 .set_vertex_buffer(idx, offset as u32, buf.internal.raw);
1892         }
1893 
1894         self.cache.bind_vertex_buffers(&self.context);
1895     }
1896 
set_viewports<T>(&mut self, _first_viewport: u32, viewports: T) where T: IntoIterator, T::Item: Borrow<pso::Viewport>,1897     unsafe fn set_viewports<T>(&mut self, _first_viewport: u32, viewports: T)
1898     where
1899         T: IntoIterator,
1900         T::Item: Borrow<pso::Viewport>,
1901     {
1902         let viewports = viewports
1903             .into_iter()
1904             .map(|v| {
1905                 let v = v.borrow();
1906                 conv::map_viewport(v)
1907             })
1908             .collect::<Vec<_>>();
1909 
1910         // TODO: DX only lets us set all VPs at once, so cache in slice?
1911         self.cache.set_viewports(&viewports);
1912         self.cache.bind_viewports(&self.context);
1913     }
1914 
set_scissors<T>(&mut self, _first_scissor: u32, scissors: T) where T: IntoIterator, T::Item: Borrow<pso::Rect>,1915     unsafe fn set_scissors<T>(&mut self, _first_scissor: u32, scissors: T)
1916     where
1917         T: IntoIterator,
1918         T::Item: Borrow<pso::Rect>,
1919     {
1920         let scissors = scissors
1921             .into_iter()
1922             .map(|s| {
1923                 let s = s.borrow();
1924                 conv::map_rect(s)
1925             })
1926             .collect::<Vec<_>>();
1927 
1928         // TODO: same as for viewports
1929         self.context
1930             .RSSetScissorRects(scissors.len() as _, scissors.as_ptr());
1931     }
1932 
set_blend_constants(&mut self, color: pso::ColorValue)1933     unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) {
1934         self.cache.set_blend_factor(color);
1935         self.cache.bind_blend_state(&self.context);
1936     }
1937 
set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue)1938     unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue) {
1939         self.cache.stencil_ref = Some(value);
1940     }
1941 
set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue)1942     unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) {
1943         self.cache.stencil_read_mask = Some(value);
1944     }
1945 
set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue)1946     unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) {
1947         self.cache.stencil_write_mask = Some(value);
1948     }
1949 
set_depth_bounds(&mut self, _bounds: Range<f32>)1950     unsafe fn set_depth_bounds(&mut self, _bounds: Range<f32>) {
1951         unimplemented!()
1952     }
1953 
set_line_width(&mut self, width: f32)1954     unsafe fn set_line_width(&mut self, width: f32) {
1955         validate_line_width(width);
1956     }
1957 
set_depth_bias(&mut self, _depth_bias: pso::DepthBias)1958     unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) {
1959         // TODO:
1960         // unimplemented!()
1961     }
1962 
bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline)1963     unsafe fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) {
1964         self.cache.set_graphics_pipeline(pipeline.clone());
1965         self.cache.bind_graphics_pipeline(&self.context);
1966     }
1967 
bind_graphics_descriptor_sets<'a, I, J>( &mut self, layout: &PipelineLayout, first_set: usize, sets: I, _offsets: J, ) where I: IntoIterator, I::Item: Borrow<DescriptorSet>, J: IntoIterator, J::Item: Borrow<command::DescriptorSetOffset>,1968     unsafe fn bind_graphics_descriptor_sets<'a, I, J>(
1969         &mut self,
1970         layout: &PipelineLayout,
1971         first_set: usize,
1972         sets: I,
1973         _offsets: J,
1974     ) where
1975         I: IntoIterator,
1976         I::Item: Borrow<DescriptorSet>,
1977         J: IntoIterator,
1978         J::Item: Borrow<command::DescriptorSetOffset>,
1979     {
1980         let _scope = debug_scope!(&self.context, "BindGraphicsDescriptorSets");
1981 
1982         // TODO: find a better solution to invalidating old bindings..
1983         self.context.CSSetUnorderedAccessViews(
1984             0,
1985             16,
1986             [ptr::null_mut(); 16].as_ptr(),
1987             ptr::null_mut(),
1988         );
1989 
1990         //let offsets: Vec<command::DescriptorSetOffset> = offsets.into_iter().map(|o| *o.borrow()).collect();
1991 
1992         let iter = sets
1993             .into_iter()
1994             .zip(layout.set_bindings.iter().skip(first_set));
1995 
1996         for (set, bindings) in iter {
1997             let set = set.borrow();
1998 
1999             {
2000                 let coherent_buffers = set.coherent_buffers.lock();
2001                 for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() {
2002                     // TODO: merge sync range if a flush already exists
2003                     if !self
2004                         .flush_coherent_memory
2005                         .iter()
2006                         .any(|m| m.buffer == sync.device_buffer)
2007                     {
2008                         self.flush_coherent_memory.push(MemoryFlush {
2009                             host_memory: sync.host_ptr,
2010                             sync_range: sync.range.clone(),
2011                             buffer: sync.device_buffer,
2012                         });
2013                     }
2014                 }
2015 
2016                 for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() {
2017                     if !self
2018                         .invalidate_coherent_memory
2019                         .iter()
2020                         .any(|m| m.buffer == sync.device_buffer)
2021                     {
2022                         self.invalidate_coherent_memory.push(MemoryInvalidate {
2023                             working_buffer: Some(self.internal.working_buffer.clone()),
2024                             working_buffer_size: self.internal.working_buffer_size,
2025                             host_memory: sync.host_ptr,
2026                             sync_range: sync.range.clone(),
2027                             buffer: sync.device_buffer,
2028                         });
2029                     }
2030                 }
2031             }
2032 
2033             // TODO: offsets
2034             for binding in bindings.iter() {
2035                 self.bind_descriptor(&self.context, binding, set.handles);
2036             }
2037         }
2038     }
2039 
bind_compute_pipeline(&mut self, pipeline: &ComputePipeline)2040     unsafe fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) {
2041         self.context
2042             .CSSetShader(pipeline.cs.as_raw(), ptr::null_mut(), 0);
2043     }
2044 
bind_compute_descriptor_sets<I, J>( &mut self, layout: &PipelineLayout, first_set: usize, sets: I, _offsets: J, ) where I: IntoIterator, I::Item: Borrow<DescriptorSet>, J: IntoIterator, J::Item: Borrow<command::DescriptorSetOffset>,2045     unsafe fn bind_compute_descriptor_sets<I, J>(
2046         &mut self,
2047         layout: &PipelineLayout,
2048         first_set: usize,
2049         sets: I,
2050         _offsets: J,
2051     ) where
2052         I: IntoIterator,
2053         I::Item: Borrow<DescriptorSet>,
2054         J: IntoIterator,
2055         J::Item: Borrow<command::DescriptorSetOffset>,
2056     {
2057         let _scope = debug_scope!(&self.context, "BindComputeDescriptorSets");
2058 
2059         self.context.CSSetUnorderedAccessViews(
2060             0,
2061             16,
2062             [ptr::null_mut(); 16].as_ptr(),
2063             ptr::null_mut(),
2064         );
2065         let iter = sets
2066             .into_iter()
2067             .zip(layout.set_bindings.iter().skip(first_set));
2068 
2069         for (set, bindings) in iter {
2070             let set = set.borrow();
2071 
2072             {
2073                 let coherent_buffers = set.coherent_buffers.lock();
2074                 for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() {
2075                     if !self
2076                         .flush_coherent_memory
2077                         .iter()
2078                         .any(|m| m.buffer == sync.device_buffer)
2079                     {
2080                         self.flush_coherent_memory.push(MemoryFlush {
2081                             host_memory: sync.host_ptr,
2082                             sync_range: sync.range.clone(),
2083                             buffer: sync.device_buffer,
2084                         });
2085                     }
2086                 }
2087 
2088                 for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() {
2089                     if !self
2090                         .invalidate_coherent_memory
2091                         .iter()
2092                         .any(|m| m.buffer == sync.device_buffer)
2093                     {
2094                         self.invalidate_coherent_memory.push(MemoryInvalidate {
2095                             working_buffer: Some(self.internal.working_buffer.clone()),
2096                             working_buffer_size: self.internal.working_buffer_size,
2097                             host_memory: sync.host_ptr,
2098                             sync_range: sync.range.clone(),
2099                             buffer: sync.device_buffer,
2100                         });
2101                     }
2102                 }
2103             }
2104 
2105             // TODO: offsets
2106             for binding in bindings.iter() {
2107                 self.bind_descriptor(&self.context, binding, set.handles);
2108             }
2109         }
2110     }
2111 
dispatch(&mut self, count: WorkGroupCount)2112     unsafe fn dispatch(&mut self, count: WorkGroupCount) {
2113         self.context.Dispatch(count[0], count[1], count[2]);
2114     }
2115 
dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset)2116     unsafe fn dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset) {
2117         unimplemented!()
2118     }
2119 
fill_buffer<R>(&mut self, _buffer: &Buffer, _range: R, _data: u32) where R: RangeArg<buffer::Offset>,2120     unsafe fn fill_buffer<R>(&mut self, _buffer: &Buffer, _range: R, _data: u32)
2121     where
2122         R: RangeArg<buffer::Offset>,
2123     {
2124         unimplemented!()
2125     }
2126 
update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8])2127     unsafe fn update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8]) {
2128         unimplemented!()
2129     }
2130 
copy_buffer<T>(&mut self, src: &Buffer, dst: &Buffer, regions: T) where T: IntoIterator, T::Item: Borrow<command::BufferCopy>,2131     unsafe fn copy_buffer<T>(&mut self, src: &Buffer, dst: &Buffer, regions: T)
2132     where
2133         T: IntoIterator,
2134         T::Item: Borrow<command::BufferCopy>,
2135     {
2136         if src.ty == MemoryHeapFlags::HOST_COHERENT {
2137             self.defer_coherent_flush(src);
2138         }
2139 
2140         for region in regions.into_iter() {
2141             let info = region.borrow();
2142             let dst_box = d3d11::D3D11_BOX {
2143                 left: info.src as _,
2144                 top: 0,
2145                 front: 0,
2146                 right: (info.src + info.size) as _,
2147                 bottom: 1,
2148                 back: 1,
2149             };
2150 
2151             self.context.CopySubresourceRegion(
2152                 dst.internal.raw as _,
2153                 0,
2154                 info.dst as _,
2155                 0,
2156                 0,
2157                 src.internal.raw as _,
2158                 0,
2159                 &dst_box,
2160             );
2161 
2162             if let Some(disjoint_cb) = dst.internal.disjoint_cb {
2163                 self.context.CopySubresourceRegion(
2164                     disjoint_cb as _,
2165                     0,
2166                     info.dst as _,
2167                     0,
2168                     0,
2169                     src.internal.raw as _,
2170                     0,
2171                     &dst_box,
2172                 );
2173             }
2174         }
2175     }
2176 
copy_image<T>( &mut self, src: &Image, _: image::Layout, dst: &Image, _: image::Layout, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageCopy>,2177     unsafe fn copy_image<T>(
2178         &mut self,
2179         src: &Image,
2180         _: image::Layout,
2181         dst: &Image,
2182         _: image::Layout,
2183         regions: T,
2184     ) where
2185         T: IntoIterator,
2186         T::Item: Borrow<command::ImageCopy>,
2187     {
2188         self.internal
2189             .copy_image_2d(&self.context, src, dst, regions);
2190     }
2191 
copy_buffer_to_image<T>( &mut self, buffer: &Buffer, image: &Image, _: image::Layout, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::BufferImageCopy>,2192     unsafe fn copy_buffer_to_image<T>(
2193         &mut self,
2194         buffer: &Buffer,
2195         image: &Image,
2196         _: image::Layout,
2197         regions: T,
2198     ) where
2199         T: IntoIterator,
2200         T::Item: Borrow<command::BufferImageCopy>,
2201     {
2202         if buffer.ty == MemoryHeapFlags::HOST_COHERENT {
2203             self.defer_coherent_flush(buffer);
2204         }
2205 
2206         self.internal
2207             .copy_buffer_to_image_2d(&self.context, buffer, image, regions);
2208     }
2209 
copy_image_to_buffer<T>( &mut self, image: &Image, _: image::Layout, buffer: &Buffer, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::BufferImageCopy>,2210     unsafe fn copy_image_to_buffer<T>(
2211         &mut self,
2212         image: &Image,
2213         _: image::Layout,
2214         buffer: &Buffer,
2215         regions: T,
2216     ) where
2217         T: IntoIterator,
2218         T::Item: Borrow<command::BufferImageCopy>,
2219     {
2220         if buffer.ty == MemoryHeapFlags::HOST_COHERENT {
2221             self.defer_coherent_invalidate(buffer);
2222         }
2223 
2224         self.internal
2225             .copy_image_2d_to_buffer(&self.context, image, buffer, regions);
2226     }
2227 
draw(&mut self, vertices: Range<VertexCount>, instances: Range<InstanceCount>)2228     unsafe fn draw(&mut self, vertices: Range<VertexCount>, instances: Range<InstanceCount>) {
2229         self.context.DrawInstanced(
2230             vertices.end - vertices.start,
2231             instances.end - instances.start,
2232             vertices.start,
2233             instances.start,
2234         );
2235     }
2236 
draw_indexed( &mut self, indices: Range<IndexCount>, base_vertex: VertexOffset, instances: Range<InstanceCount>, )2237     unsafe fn draw_indexed(
2238         &mut self,
2239         indices: Range<IndexCount>,
2240         base_vertex: VertexOffset,
2241         instances: Range<InstanceCount>,
2242     ) {
2243         self.context.DrawIndexedInstanced(
2244             indices.end - indices.start,
2245             instances.end - instances.start,
2246             indices.start,
2247             base_vertex,
2248             instances.start,
2249         );
2250     }
2251 
draw_indirect( &mut self, _buffer: &Buffer, _offset: buffer::Offset, _draw_count: DrawCount, _stride: u32, )2252     unsafe fn draw_indirect(
2253         &mut self,
2254         _buffer: &Buffer,
2255         _offset: buffer::Offset,
2256         _draw_count: DrawCount,
2257         _stride: u32,
2258     ) {
2259         unimplemented!()
2260     }
2261 
draw_indexed_indirect( &mut self, _buffer: &Buffer, _offset: buffer::Offset, _draw_count: DrawCount, _stride: u32, )2262     unsafe fn draw_indexed_indirect(
2263         &mut self,
2264         _buffer: &Buffer,
2265         _offset: buffer::Offset,
2266         _draw_count: DrawCount,
2267         _stride: u32,
2268     ) {
2269         unimplemented!()
2270     }
2271 
set_event(&mut self, _: &(), _: pso::PipelineStage)2272     unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) {
2273         unimplemented!()
2274     }
2275 
reset_event(&mut self, _: &(), _: pso::PipelineStage)2276     unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) {
2277         unimplemented!()
2278     }
2279 
wait_events<'a, I, J>(&mut self, _: I, _: Range<pso::PipelineStage>, _: J) where I: IntoIterator, I::Item: Borrow<()>, J: IntoIterator, J::Item: Borrow<memory::Barrier<'a, Backend>>,2280     unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range<pso::PipelineStage>, _: J)
2281     where
2282         I: IntoIterator,
2283         I::Item: Borrow<()>,
2284         J: IntoIterator,
2285         J::Item: Borrow<memory::Barrier<'a, Backend>>,
2286     {
2287         unimplemented!()
2288     }
2289 
begin_query(&mut self, _query: query::Query<Backend>, _flags: query::ControlFlags)2290     unsafe fn begin_query(&mut self, _query: query::Query<Backend>, _flags: query::ControlFlags) {
2291         unimplemented!()
2292     }
2293 
end_query(&mut self, _query: query::Query<Backend>)2294     unsafe fn end_query(&mut self, _query: query::Query<Backend>) {
2295         unimplemented!()
2296     }
2297 
reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range<query::Id>)2298     unsafe fn reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range<query::Id>) {
2299         unimplemented!()
2300     }
2301 
copy_query_pool_results( &mut self, _pool: &QueryPool, _queries: Range<query::Id>, _buffer: &Buffer, _offset: buffer::Offset, _stride: buffer::Offset, _flags: query::ResultFlags, )2302     unsafe fn copy_query_pool_results(
2303         &mut self,
2304         _pool: &QueryPool,
2305         _queries: Range<query::Id>,
2306         _buffer: &Buffer,
2307         _offset: buffer::Offset,
2308         _stride: buffer::Offset,
2309         _flags: query::ResultFlags,
2310     ) {
2311         unimplemented!()
2312     }
2313 
write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query<Backend>)2314     unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query<Backend>) {
2315         unimplemented!()
2316     }
2317 
push_graphics_constants( &mut self, _layout: &PipelineLayout, _stages: pso::ShaderStageFlags, _offset: u32, _constants: &[u32], )2318     unsafe fn push_graphics_constants(
2319         &mut self,
2320         _layout: &PipelineLayout,
2321         _stages: pso::ShaderStageFlags,
2322         _offset: u32,
2323         _constants: &[u32],
2324     ) {
2325         // unimplemented!()
2326     }
2327 
push_compute_constants( &mut self, _layout: &PipelineLayout, _offset: u32, _constants: &[u32], )2328     unsafe fn push_compute_constants(
2329         &mut self,
2330         _layout: &PipelineLayout,
2331         _offset: u32,
2332         _constants: &[u32],
2333     ) {
2334         unimplemented!()
2335     }
2336 
execute_commands<'a, T, I>(&mut self, _buffers: I) where T: 'a + Borrow<CommandBuffer>, I: IntoIterator<Item = &'a T>,2337     unsafe fn execute_commands<'a, T, I>(&mut self, _buffers: I)
2338     where
2339         T: 'a + Borrow<CommandBuffer>,
2340         I: IntoIterator<Item = &'a T>,
2341     {
2342         unimplemented!()
2343     }
2344 }
2345 
2346 bitflags! {
2347     struct MemoryHeapFlags: u64 {
2348         const DEVICE_LOCAL = 0x1;
2349         const HOST_VISIBLE = 0x2 | 0x4;
2350         const HOST_COHERENT = 0x2;
2351     }
2352 }
2353 
2354 #[derive(Clone, Debug)]
2355 enum SyncRange {
2356     Whole,
2357     Partial(Range<u64>),
2358 }
2359 
2360 #[derive(Debug)]
2361 pub struct MemoryFlush {
2362     host_memory: *mut u8,
2363     sync_range: SyncRange,
2364     buffer: *mut d3d11::ID3D11Buffer,
2365 }
2366 
2367 pub struct MemoryInvalidate {
2368     working_buffer: Option<ComPtr<d3d11::ID3D11Buffer>>,
2369     working_buffer_size: u64,
2370     host_memory: *mut u8,
2371     sync_range: Range<u64>,
2372     buffer: *mut d3d11::ID3D11Buffer,
2373 }
2374 
2375 impl fmt::Debug for MemoryInvalidate {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2376     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2377         fmt.write_str("MemoryInvalidate")
2378     }
2379 }
2380 
intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>>2381 fn intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>> {
2382     let min = if a.start < b.start { a } else { b };
2383     let max = if min == a { b } else { a };
2384 
2385     if min.end < max.start {
2386         None
2387     } else {
2388         let end = if min.end < max.end { min.end } else { max.end };
2389         Some(max.start .. end)
2390     }
2391 }
2392 
2393 impl MemoryFlush {
do_flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>)2394     fn do_flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
2395         let src = self.host_memory;
2396 
2397         debug_marker!(context, "Flush({:?})", self.sync_range);
2398         let region = if let SyncRange::Partial(range) = &self.sync_range {
2399             Some(d3d11::D3D11_BOX {
2400                 left: range.start as _,
2401                 top: 0,
2402                 front: 0,
2403                 right: range.end as _,
2404                 bottom: 1,
2405                 back: 1,
2406             })
2407         } else {
2408             None
2409         };
2410 
2411         unsafe {
2412             context.UpdateSubresource(
2413                 self.buffer as _,
2414                 0,
2415                 if let Some(region) = region {
2416                     &region
2417                 } else {
2418                     ptr::null_mut()
2419                 },
2420                 src as _,
2421                 0,
2422                 0,
2423             );
2424         }
2425     }
2426 }
2427 
2428 impl MemoryInvalidate {
download( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, buffer: *mut d3d11::ID3D11Buffer, range: Range<u64>, )2429     fn download(
2430         &self,
2431         context: &ComPtr<d3d11::ID3D11DeviceContext>,
2432         buffer: *mut d3d11::ID3D11Buffer,
2433         range: Range<u64>,
2434     ) {
2435         unsafe {
2436             context.CopySubresourceRegion(
2437                 self.working_buffer.clone().unwrap().as_raw() as _,
2438                 0,
2439                 0,
2440                 0,
2441                 0,
2442                 buffer as _,
2443                 0,
2444                 &d3d11::D3D11_BOX {
2445                     left: range.start as _,
2446                     top: 0,
2447                     front: 0,
2448                     right: range.end as _,
2449                     bottom: 1,
2450                     back: 1,
2451                 },
2452             );
2453 
2454             // copy over to our vec
2455             let dst = self.host_memory.offset(range.start as isize);
2456             let src = self.map(&context);
2457             ptr::copy(src, dst, (range.end - range.start) as usize);
2458             self.unmap(&context);
2459         }
2460     }
2461 
do_invalidate(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>)2462     fn do_invalidate(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
2463         let stride = self.working_buffer_size;
2464         let range = &self.sync_range;
2465         let len = range.end - range.start;
2466         let chunks = len / stride;
2467         let remainder = len % stride;
2468 
2469         // we split up the copies into chunks the size of our working buffer
2470         for i in 0 .. chunks {
2471             let offset = range.start + i * stride;
2472             let range = offset .. (offset + stride);
2473 
2474             self.download(context, self.buffer, range);
2475         }
2476 
2477         if remainder != 0 {
2478             self.download(context, self.buffer, (chunks * stride) .. range.end);
2479         }
2480     }
2481 
map(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) -> *mut u82482     fn map(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) -> *mut u8 {
2483         assert_eq!(self.working_buffer.is_some(), true);
2484 
2485         unsafe {
2486             let mut map = mem::zeroed();
2487             let hr = context.Map(
2488                 self.working_buffer.clone().unwrap().as_raw() as _,
2489                 0,
2490                 d3d11::D3D11_MAP_READ,
2491                 0,
2492                 &mut map,
2493             );
2494 
2495             assert_eq!(hr, winerror::S_OK);
2496 
2497             map.pData as _
2498         }
2499     }
2500 
unmap(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>)2501     fn unmap(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>) {
2502         unsafe {
2503             context.Unmap(self.working_buffer.clone().unwrap().as_raw() as _, 0);
2504         }
2505     }
2506 }
2507 
2508 // Since we dont have any heaps to work with directly, everytime we bind a
2509 // buffer/image to memory we allocate a dx11 resource and assign it a range.
2510 //
2511 // `HOST_VISIBLE` memory gets a `Vec<u8>` which covers the entire memory
2512 // range. This forces us to only expose non-coherent memory, as this
2513 // abstraction acts as a "cache" since the "staging buffer" vec is disjoint
2514 // from all the dx11 resources we store in the struct.
2515 pub struct Memory {
2516     ty: MemoryHeapFlags,
2517     properties: memory::Properties,
2518     size: u64,
2519 
2520     mapped_ptr: *mut u8,
2521 
2522     // staging buffer covering the whole memory region, if it's HOST_VISIBLE
2523     host_visible: Option<RefCell<Vec<u8>>>,
2524 
2525     // list of all buffers bound to this memory
2526     local_buffers: RefCell<Vec<(Range<u64>, InternalBuffer)>>,
2527 
2528     // list of all images bound to this memory
2529     local_images: RefCell<Vec<(Range<u64>, InternalImage)>>,
2530 }
2531 
2532 impl fmt::Debug for Memory {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2533     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2534         fmt.write_str("Memory")
2535     }
2536 }
2537 
2538 unsafe impl Send for Memory {}
2539 unsafe impl Sync for Memory {}
2540 
2541 impl Memory {
resolve<R: RangeArg<u64>>(&self, range: &R) -> Range<u64>2542     pub fn resolve<R: RangeArg<u64>>(&self, range: &R) -> Range<u64> {
2543         *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size)
2544     }
2545 
bind_buffer(&self, range: Range<u64>, buffer: InternalBuffer)2546     pub fn bind_buffer(&self, range: Range<u64>, buffer: InternalBuffer) {
2547         self.local_buffers.borrow_mut().push((range, buffer));
2548     }
2549 
flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>, range: Range<u64>)2550     pub fn flush(&self, context: &ComPtr<d3d11::ID3D11DeviceContext>, range: Range<u64>) {
2551         use buffer::Usage;
2552 
2553         for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() {
2554             if let Some(range) = intersection(&range, &buffer_range) {
2555                 let ptr = self.mapped_ptr;
2556 
2557                 // we need to handle 3 cases for updating buffers:
2558                 //
2559                 //   1. if our buffer was created as a `UNIFORM` buffer *and* other usage flags, we
2560                 //      also have a disjoint buffer which only has `D3D11_BIND_CONSTANT_BUFFER` due
2561                 //      to DX11 limitation. we then need to update both the original buffer and the
2562                 //      disjoint one with the *whole* range (TODO: allow for partial updates)
2563                 //
2564                 //   2. if our buffer was created with *only* `UNIFORM` usage we need to upload
2565                 //      the whole range (TODO: allow for partial updates)
2566                 //
2567                 //   3. the general case, without any `UNIFORM` usage has no restrictions on
2568                 //      partial updates, so we upload the specified range
2569                 //
2570                 if buffer.usage.contains(Usage::UNIFORM) && buffer.usage != Usage::UNIFORM {
2571                     MemoryFlush {
2572                         host_memory: unsafe { ptr.offset(buffer_range.start as _) },
2573                         sync_range: SyncRange::Whole,
2574                         buffer: buffer.raw,
2575                     }
2576                     .do_flush(&context);
2577 
2578                     if let Some(disjoint) = buffer.disjoint_cb {
2579                         MemoryFlush {
2580                             host_memory: unsafe { ptr.offset(buffer_range.start as _) },
2581                             sync_range: SyncRange::Whole,
2582                             buffer: disjoint,
2583                         }
2584                         .do_flush(&context);
2585                     }
2586                 } else if buffer.usage == Usage::UNIFORM {
2587                     MemoryFlush {
2588                         host_memory: unsafe { ptr.offset(buffer_range.start as _) },
2589                         sync_range: SyncRange::Whole,
2590                         buffer: buffer.raw,
2591                     }
2592                     .do_flush(&context);
2593                 } else {
2594                     let local_start = range.start - buffer_range.start;
2595                     let local_len = range.end - range.start;
2596 
2597                     MemoryFlush {
2598                         host_memory: unsafe { ptr.offset(range.start as _) },
2599                         sync_range: SyncRange::Partial(local_start .. (local_start + local_len)),
2600                         buffer: buffer.raw,
2601                     }
2602                     .do_flush(&context);
2603                 }
2604             }
2605         }
2606     }
2607 
invalidate( &self, context: &ComPtr<d3d11::ID3D11DeviceContext>, range: Range<u64>, working_buffer: ComPtr<d3d11::ID3D11Buffer>, working_buffer_size: u64, )2608     pub fn invalidate(
2609         &self,
2610         context: &ComPtr<d3d11::ID3D11DeviceContext>,
2611         range: Range<u64>,
2612         working_buffer: ComPtr<d3d11::ID3D11Buffer>,
2613         working_buffer_size: u64,
2614     ) {
2615         for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() {
2616             if let Some(range) = intersection(&range, &buffer_range) {
2617                 MemoryInvalidate {
2618                     working_buffer: Some(working_buffer.clone()),
2619                     working_buffer_size,
2620                     host_memory: self.mapped_ptr,
2621                     sync_range: range.clone(),
2622                     buffer: buffer.raw,
2623                 }
2624                 .do_invalidate(&context);
2625             }
2626         }
2627     }
2628 }
2629 
2630 #[derive(Debug)]
2631 pub struct CommandPool {
2632     device: ComPtr<d3d11::ID3D11Device>,
2633     internal: internal::Internal,
2634 }
2635 
2636 unsafe impl Send for CommandPool {}
2637 unsafe impl Sync for CommandPool {}
2638 
2639 impl hal::pool::CommandPool<Backend> for CommandPool {
reset(&mut self, _release_resources: bool)2640     unsafe fn reset(&mut self, _release_resources: bool) {
2641         //unimplemented!()
2642     }
2643 
allocate_one(&mut self, _level: command::Level) -> CommandBuffer2644     unsafe fn allocate_one(&mut self, _level: command::Level) -> CommandBuffer {
2645         CommandBuffer::create_deferred(self.device.clone(), self.internal.clone())
2646     }
2647 
free<I>(&mut self, _cbufs: I) where I: IntoIterator<Item = CommandBuffer>,2648     unsafe fn free<I>(&mut self, _cbufs: I)
2649     where
2650         I: IntoIterator<Item = CommandBuffer>,
2651     {
2652         // TODO:
2653         // unimplemented!()
2654     }
2655 }
2656 
2657 /// Similarily to dx12 backend, we can handle either precompiled dxbc or spirv
2658 pub enum ShaderModule {
2659     Dxbc(Vec<u8>),
2660     Spirv(Vec<u32>),
2661 }
2662 
2663 // TODO: temporary
2664 impl ::fmt::Debug for ShaderModule {
fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result2665     fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
2666         write!(f, "{}", "ShaderModule { ... }")
2667     }
2668 }
2669 
2670 unsafe impl Send for ShaderModule {}
2671 unsafe impl Sync for ShaderModule {}
2672 
2673 #[derive(Clone, Debug)]
2674 pub struct SubpassDesc {
2675     pub color_attachments: Vec<pass::AttachmentRef>,
2676     pub depth_stencil_attachment: Option<pass::AttachmentRef>,
2677     pub input_attachments: Vec<pass::AttachmentRef>,
2678     pub resolve_attachments: Vec<pass::AttachmentRef>,
2679 }
2680 
2681 impl SubpassDesc {
is_using(&self, at_id: pass::AttachmentId) -> bool2682     pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool {
2683         self.color_attachments
2684             .iter()
2685             .chain(self.depth_stencil_attachment.iter())
2686             .chain(self.input_attachments.iter())
2687             .chain(self.resolve_attachments.iter())
2688             .any(|&(id, _)| id == at_id)
2689     }
2690 }
2691 
2692 #[derive(Clone, Debug)]
2693 pub struct RenderPass {
2694     pub attachments: Vec<pass::Attachment>,
2695     pub subpasses: Vec<SubpassDesc>,
2696 }
2697 
2698 #[derive(Clone, Debug)]
2699 pub struct Framebuffer {
2700     attachments: Vec<ImageView>,
2701     layers: image::Layer,
2702 }
2703 
2704 #[derive(Clone, Debug)]
2705 pub struct InternalBuffer {
2706     raw: *mut d3d11::ID3D11Buffer,
2707     // TODO: need to sync between `raw` and `disjoint_cb`, same way as we do with
2708     // `MemoryFlush/Invalidate`
2709     disjoint_cb: Option<*mut d3d11::ID3D11Buffer>, // if unbound this buffer might be null.
2710     srv: Option<*mut d3d11::ID3D11ShaderResourceView>,
2711     uav: Option<*mut d3d11::ID3D11UnorderedAccessView>,
2712     usage: buffer::Usage,
2713 }
2714 
2715 pub struct Buffer {
2716     internal: InternalBuffer,
2717     ty: MemoryHeapFlags,     // empty if unbound
2718     host_ptr: *mut u8,       // null if unbound
2719     bound_range: Range<u64>, // 0 if unbound
2720     requirements: memory::Requirements,
2721     bind: d3d11::D3D11_BIND_FLAG,
2722 }
2723 
2724 impl fmt::Debug for Buffer {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2725     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2726         fmt.write_str("Buffer")
2727     }
2728 }
2729 
2730 unsafe impl Send for Buffer {}
2731 unsafe impl Sync for Buffer {}
2732 
2733 #[derive(Debug)]
2734 pub struct BufferView;
2735 
2736 pub struct Image {
2737     kind: image::Kind,
2738     usage: image::Usage,
2739     format: format::Format,
2740     view_caps: image::ViewCapabilities,
2741     decomposed_format: conv::DecomposedDxgiFormat,
2742     mip_levels: image::Level,
2743     internal: InternalImage,
2744     tiling: image::Tiling,
2745     bind: d3d11::D3D11_BIND_FLAG,
2746     requirements: memory::Requirements,
2747 }
2748 
2749 impl fmt::Debug for Image {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2750     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2751         fmt.write_str("Image")
2752     }
2753 }
2754 
2755 pub struct InternalImage {
2756     raw: *mut d3d11::ID3D11Resource,
2757     copy_srv: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
2758     srv: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
2759 
2760     /// Contains UAVs for all subresources
2761     unordered_access_views: Vec<ComPtr<d3d11::ID3D11UnorderedAccessView>>,
2762 
2763     /// Contains DSVs for all subresources
2764     depth_stencil_views: Vec<ComPtr<d3d11::ID3D11DepthStencilView>>,
2765 
2766     /// Contains RTVs for all subresources
2767     render_target_views: Vec<ComPtr<d3d11::ID3D11RenderTargetView>>,
2768 }
2769 
2770 impl fmt::Debug for InternalImage {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2771     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2772         fmt.write_str("InternalImage")
2773     }
2774 }
2775 
2776 unsafe impl Send for Image {}
2777 unsafe impl Sync for Image {}
2778 
2779 impl Image {
calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT2780     pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT {
2781         mip_level + (layer * self.mip_levels as UINT)
2782     }
2783 
get_uav( &self, mip_level: image::Level, _layer: image::Layer, ) -> Option<&ComPtr<d3d11::ID3D11UnorderedAccessView>>2784     pub fn get_uav(
2785         &self,
2786         mip_level: image::Level,
2787         _layer: image::Layer,
2788     ) -> Option<&ComPtr<d3d11::ID3D11UnorderedAccessView>> {
2789         self.internal
2790             .unordered_access_views
2791             .get(self.calc_subresource(mip_level as _, 0) as usize)
2792     }
2793 
get_dsv( &self, mip_level: image::Level, layer: image::Layer, ) -> Option<&ComPtr<d3d11::ID3D11DepthStencilView>>2794     pub fn get_dsv(
2795         &self,
2796         mip_level: image::Level,
2797         layer: image::Layer,
2798     ) -> Option<&ComPtr<d3d11::ID3D11DepthStencilView>> {
2799         self.internal
2800             .depth_stencil_views
2801             .get(self.calc_subresource(mip_level as _, layer as _) as usize)
2802     }
2803 
get_rtv( &self, mip_level: image::Level, layer: image::Layer, ) -> Option<&ComPtr<d3d11::ID3D11RenderTargetView>>2804     pub fn get_rtv(
2805         &self,
2806         mip_level: image::Level,
2807         layer: image::Layer,
2808     ) -> Option<&ComPtr<d3d11::ID3D11RenderTargetView>> {
2809         self.internal
2810             .render_target_views
2811             .get(self.calc_subresource(mip_level as _, layer as _) as usize)
2812     }
2813 }
2814 
2815 #[derive(Clone)]
2816 pub struct ImageView {
2817     format: format::Format,
2818     rtv_handle: Option<ComPtr<d3d11::ID3D11RenderTargetView>>,
2819     srv_handle: Option<ComPtr<d3d11::ID3D11ShaderResourceView>>,
2820     dsv_handle: Option<ComPtr<d3d11::ID3D11DepthStencilView>>,
2821     uav_handle: Option<ComPtr<d3d11::ID3D11UnorderedAccessView>>,
2822 }
2823 
2824 impl fmt::Debug for ImageView {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2825     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2826         fmt.write_str("ImageView")
2827     }
2828 }
2829 
2830 unsafe impl Send for ImageView {}
2831 unsafe impl Sync for ImageView {}
2832 
2833 pub struct Sampler {
2834     sampler_handle: ComPtr<d3d11::ID3D11SamplerState>,
2835 }
2836 
2837 impl fmt::Debug for Sampler {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2838     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2839         fmt.write_str("Sampler")
2840     }
2841 }
2842 
2843 unsafe impl Send for Sampler {}
2844 unsafe impl Sync for Sampler {}
2845 
2846 pub struct ComputePipeline {
2847     cs: ComPtr<d3d11::ID3D11ComputeShader>,
2848 }
2849 
2850 impl fmt::Debug for ComputePipeline {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2851     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2852         fmt.write_str("ComputePipeline")
2853     }
2854 }
2855 
2856 unsafe impl Send for ComputePipeline {}
2857 unsafe impl Sync for ComputePipeline {}
2858 
2859 /// NOTE: some objects are hashed internally and reused when created with the
2860 ///       same params[0], need to investigate which interfaces this applies
2861 ///       to.
2862 ///
2863 /// [0]: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476500(v=vs.85).aspx
2864 #[derive(Clone)]
2865 pub struct GraphicsPipeline {
2866     vs: ComPtr<d3d11::ID3D11VertexShader>,
2867     gs: Option<ComPtr<d3d11::ID3D11GeometryShader>>,
2868     hs: Option<ComPtr<d3d11::ID3D11HullShader>>,
2869     ds: Option<ComPtr<d3d11::ID3D11DomainShader>>,
2870     ps: Option<ComPtr<d3d11::ID3D11PixelShader>>,
2871     topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY,
2872     input_layout: ComPtr<d3d11::ID3D11InputLayout>,
2873     rasterizer_state: ComPtr<d3d11::ID3D11RasterizerState>,
2874     blend_state: ComPtr<d3d11::ID3D11BlendState>,
2875     depth_stencil_state: Option<(
2876         ComPtr<d3d11::ID3D11DepthStencilState>,
2877         pso::State<pso::StencilValue>,
2878     )>,
2879     baked_states: pso::BakedStates,
2880     required_bindings: u32,
2881     max_vertex_bindings: u32,
2882     strides: Vec<u32>,
2883 }
2884 
2885 impl fmt::Debug for GraphicsPipeline {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result2886     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
2887         fmt.write_str("GraphicsPipeline")
2888     }
2889 }
2890 
2891 unsafe impl Send for GraphicsPipeline {}
2892 unsafe impl Sync for GraphicsPipeline {}
2893 
2894 #[derive(Clone, Debug)]
2895 struct PipelineBinding {
2896     stage: pso::ShaderStageFlags,
2897     ty: pso::DescriptorType,
2898     binding_range: Range<u32>,
2899     handle_offset: u32,
2900 }
2901 
2902 #[derive(Clone, Debug)]
2903 struct RegisterMapping {
2904     ty: pso::DescriptorType,
2905     spirv_binding: u32,
2906     hlsl_register: u8,
2907     combined: bool,
2908 }
2909 
2910 #[derive(Clone, Debug)]
2911 struct RegisterRemapping {
2912     mapping: Vec<RegisterMapping>,
2913     num_t: u8,
2914     num_s: u8,
2915     num_c: u8,
2916     num_u: u8,
2917 }
2918 
2919 /// The pipeline layout holds optimized (less api calls) ranges of objects for all descriptor sets
2920 /// belonging to the pipeline object.
2921 #[derive(Debug)]
2922 pub struct PipelineLayout {
2923     set_bindings: Vec<Vec<PipelineBinding>>,
2924     set_remapping: Vec<RegisterRemapping>,
2925 }
2926 
2927 /// The descriptor set layout contains mappings from a given binding to the offset in our
2928 /// descriptor pool storage and what type of descriptor it is (combined image sampler takes up two
2929 /// handles).
2930 #[derive(Debug)]
2931 pub struct DescriptorSetLayout {
2932     bindings: Vec<PipelineBinding>,
2933     handle_count: u32,
2934     register_remap: RegisterRemapping,
2935 }
2936 
2937 #[derive(Debug)]
2938 struct CoherentBufferFlushRange {
2939     device_buffer: *mut d3d11::ID3D11Buffer,
2940     host_ptr: *mut u8,
2941     range: SyncRange,
2942 }
2943 
2944 #[derive(Debug)]
2945 struct CoherentBufferInvalidateRange {
2946     device_buffer: *mut d3d11::ID3D11Buffer,
2947     host_ptr: *mut u8,
2948     range: Range<u64>,
2949 }
2950 
2951 #[derive(Debug)]
2952 struct CoherentBuffers {
2953     // descriptor set writes containing coherent resources go into these vecs and are added to the
2954     // command buffers own Vec on binding the set.
2955     flush_coherent_buffers: RefCell<Vec<CoherentBufferFlushRange>>,
2956     invalidate_coherent_buffers: RefCell<Vec<CoherentBufferInvalidateRange>>,
2957 }
2958 
2959 impl CoherentBuffers {
add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer)2960     fn add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
2961         let new = buffer.internal.raw;
2962 
2963         if old != new {
2964             let mut buffers = self.flush_coherent_buffers.borrow_mut();
2965 
2966             let pos = buffers.iter().position(|sync| old == sync.device_buffer);
2967 
2968             let sync_range = CoherentBufferFlushRange {
2969                 device_buffer: new,
2970                 host_ptr: buffer.host_ptr,
2971                 range: SyncRange::Whole,
2972             };
2973 
2974             if let Some(pos) = pos {
2975                 buffers[pos] = sync_range;
2976             } else {
2977                 buffers.push(sync_range);
2978             }
2979 
2980             if let Some(disjoint) = buffer.internal.disjoint_cb {
2981                 let pos = buffers
2982                     .iter()
2983                     .position(|sync| disjoint == sync.device_buffer);
2984 
2985                 let sync_range = CoherentBufferFlushRange {
2986                     device_buffer: disjoint,
2987                     host_ptr: buffer.host_ptr,
2988                     range: SyncRange::Whole,
2989                 };
2990 
2991                 if let Some(pos) = pos {
2992                     buffers[pos] = sync_range;
2993                 } else {
2994                     buffers.push(sync_range);
2995                 }
2996             }
2997         }
2998     }
2999 
add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer)3000     fn add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
3001         let new = buffer.internal.raw;
3002 
3003         if old != new {
3004             let mut buffers = self.invalidate_coherent_buffers.borrow_mut();
3005 
3006             let pos = buffers.iter().position(|sync| old == sync.device_buffer);
3007 
3008             let sync_range = CoherentBufferInvalidateRange {
3009                 device_buffer: new,
3010                 host_ptr: buffer.host_ptr,
3011                 range: buffer.bound_range.clone(),
3012             };
3013 
3014             if let Some(pos) = pos {
3015                 buffers[pos] = sync_range;
3016             } else {
3017                 buffers.push(sync_range);
3018             }
3019         }
3020     }
3021 }
3022 
3023 /// Newtype around a common interface that all bindable resources inherit from.
3024 #[derive(Debug, Copy, Clone)]
3025 #[repr(C)]
3026 struct Descriptor(*mut d3d11::ID3D11DeviceChild);
3027 
3028 pub struct DescriptorSet {
3029     offset: usize,
3030     len: usize,
3031     handles: *mut Descriptor,
3032     register_remap: RegisterRemapping,
3033     coherent_buffers: Mutex<CoherentBuffers>,
3034 }
3035 
3036 impl fmt::Debug for DescriptorSet {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result3037     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
3038         fmt.write_str("DescriptorSet")
3039     }
3040 }
3041 
3042 unsafe impl Send for DescriptorSet {}
3043 unsafe impl Sync for DescriptorSet {}
3044 
3045 impl DescriptorSet {
get_handle_offset(&self, target_binding: u32) -> (pso::DescriptorType, u8, u8)3046     fn get_handle_offset(&self, target_binding: u32) -> (pso::DescriptorType, u8, u8) {
3047         use pso::DescriptorType::*;
3048 
3049         let mapping = self
3050             .register_remap
3051             .mapping
3052             .iter()
3053             .find(|&mapping| target_binding == mapping.spirv_binding)
3054             .unwrap();
3055 
3056         let (ty, register) = (mapping.ty, mapping.hlsl_register);
3057 
3058         match ty {
3059             Sampler => {
3060                 let (ty, t_reg) = if mapping.combined {
3061                     let combined_mapping = self
3062                         .register_remap
3063                         .mapping
3064                         .iter()
3065                         .find(|&mapping| {
3066                             mapping.ty == SampledImage && target_binding == mapping.spirv_binding
3067                         })
3068                         .unwrap();
3069                     (CombinedImageSampler, combined_mapping.hlsl_register)
3070                 } else {
3071                     (ty, 0)
3072                 };
3073 
3074                 (ty, register, self.register_remap.num_s + t_reg)
3075             }
3076             SampledImage | UniformTexelBuffer => (ty, self.register_remap.num_s + register, 0),
3077             UniformBuffer | UniformBufferDynamic => (
3078                 ty,
3079                 self.register_remap.num_s + self.register_remap.num_t + register,
3080                 0,
3081             ),
3082             StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic
3083             | StorageImage => (
3084                 ty,
3085                 self.register_remap.num_s
3086                     + self.register_remap.num_t
3087                     + self.register_remap.num_c
3088                     + register,
3089                 0,
3090             ),
3091             CombinedImageSampler => unreachable!(),
3092         }
3093     }
3094 
add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer)3095     fn add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
3096         let new = buffer.internal.raw;
3097 
3098         if old != new {
3099             self.coherent_buffers.lock().add_flush(old, buffer);
3100         }
3101     }
3102 
add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer)3103     fn add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) {
3104         let new = buffer.internal.raw;
3105 
3106         if old != new {
3107             self.coherent_buffers.lock().add_invalidate(old, buffer);
3108         }
3109     }
3110 }
3111 
3112 #[derive(Debug)]
3113 pub struct DescriptorPool {
3114     handles: Vec<Descriptor>,
3115     allocator: RangeAllocator<usize>,
3116 }
3117 
3118 unsafe impl Send for DescriptorPool {}
3119 unsafe impl Sync for DescriptorPool {}
3120 
3121 impl DescriptorPool {
with_capacity(size: usize) -> Self3122     pub fn with_capacity(size: usize) -> Self {
3123         DescriptorPool {
3124             handles: vec![Descriptor(ptr::null_mut()); size],
3125             allocator: RangeAllocator::new(0 .. size),
3126         }
3127     }
3128 }
3129 
3130 impl pso::DescriptorPool<Backend> for DescriptorPool {
allocate_set( &mut self, layout: &DescriptorSetLayout, ) -> Result<DescriptorSet, pso::AllocationError>3131     unsafe fn allocate_set(
3132         &mut self,
3133         layout: &DescriptorSetLayout,
3134     ) -> Result<DescriptorSet, pso::AllocationError> {
3135         // TODO: make sure this doesn't contradict vulkan semantics
3136         // if layout has 0 bindings, allocate 1 handle anyway
3137         let len = layout.handle_count.max(1) as _;
3138 
3139         self.allocator
3140             .allocate_range(len)
3141             .map(|range| {
3142                 for handle in &mut self.handles[range.clone()] {
3143                     *handle = Descriptor(ptr::null_mut());
3144                 }
3145 
3146                 DescriptorSet {
3147                     offset: range.start,
3148                     len,
3149                     handles: self.handles.as_mut_ptr().offset(range.start as _),
3150                     register_remap: layout.register_remap.clone(),
3151                     coherent_buffers: Mutex::new(CoherentBuffers {
3152                         flush_coherent_buffers: RefCell::new(Vec::new()),
3153                         invalidate_coherent_buffers: RefCell::new(Vec::new()),
3154                     }),
3155                 }
3156             })
3157             .map_err(|_| pso::AllocationError::OutOfPoolMemory)
3158     }
3159 
free_sets<I>(&mut self, descriptor_sets: I) where I: IntoIterator<Item = DescriptorSet>,3160     unsafe fn free_sets<I>(&mut self, descriptor_sets: I)
3161     where
3162         I: IntoIterator<Item = DescriptorSet>,
3163     {
3164         for set in descriptor_sets {
3165             self.allocator
3166                 .free_range(set.offset .. (set.offset + set.len))
3167         }
3168     }
3169 
reset(&mut self)3170     unsafe fn reset(&mut self) {
3171         self.allocator.reset();
3172     }
3173 }
3174 
3175 #[derive(Debug)]
3176 pub struct RawFence {
3177     mutex: Mutex<bool>,
3178     condvar: Condvar,
3179 }
3180 
3181 pub type Fence = Arc<RawFence>;
3182 
3183 #[derive(Debug)]
3184 pub struct Semaphore;
3185 #[derive(Debug)]
3186 pub struct QueryPool;
3187 
3188 #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
3189 pub enum Backend {}
3190 impl hal::Backend for Backend {
3191     type Instance = Instance;
3192     type PhysicalDevice = PhysicalDevice;
3193     type Device = device::Device;
3194 
3195     type Surface = Surface;
3196     type Swapchain = Swapchain;
3197 
3198     type QueueFamily = QueueFamily;
3199     type CommandQueue = CommandQueue;
3200     type CommandBuffer = CommandBuffer;
3201 
3202     type Memory = Memory;
3203     type CommandPool = CommandPool;
3204 
3205     type ShaderModule = ShaderModule;
3206     type RenderPass = RenderPass;
3207     type Framebuffer = Framebuffer;
3208 
3209     type Buffer = Buffer;
3210     type BufferView = BufferView;
3211     type Image = Image;
3212 
3213     type ImageView = ImageView;
3214     type Sampler = Sampler;
3215 
3216     type ComputePipeline = ComputePipeline;
3217     type GraphicsPipeline = GraphicsPipeline;
3218     type PipelineLayout = PipelineLayout;
3219     type PipelineCache = ();
3220     type DescriptorSetLayout = DescriptorSetLayout;
3221     type DescriptorPool = DescriptorPool;
3222     type DescriptorSet = DescriptorSet;
3223 
3224     type Fence = Fence;
3225     type Semaphore = Semaphore;
3226     type Event = ();
3227     type QueryPool = QueryPool;
3228 }
3229 
validate_line_width(width: f32)3230 fn validate_line_width(width: f32) {
3231     // Note from the Vulkan spec:
3232     // > If the wide lines feature is not enabled, lineWidth must be 1.0
3233     // Simply assert and no-op because DX11 never exposes `Features::LINE_WIDTH`
3234     assert_eq!(width, 1.0);
3235 }
3236