1 /* This Source Code Form is subject to the terms of the Mozilla Public
2  * License, v. 2.0. If a copy of the MPL was not distributed with this
3  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 
5 use crate::{
6     binding_model, command, conv,
7     device::life::WaitIdleError,
8     hub::{
9         GfxBackend, Global, GlobalIdentityHandlerFactory, Hub, Input, InvalidId, Storage, Token,
10     },
11     id, instance,
12     memory_init_tracker::{MemoryInitKind, MemoryInitTracker, MemoryInitTrackerAction},
13     pipeline, resource, swap_chain,
14     track::{BufferState, TextureSelector, TextureState, TrackerSet, UsageConflict},
15     validation::{self, check_buffer_usage, check_texture_usage},
16     FastHashMap, Label, LabelHelpers, LifeGuard, MultiRefCount, PrivateFeatures, Stored,
17     SubmissionIndex, MAX_BIND_GROUPS,
18 };
19 
20 use arrayvec::ArrayVec;
21 use copyless::VecHelper as _;
22 use hal::{
23     command::CommandBuffer as _,
24     device::Device as _,
25     window::{PresentationSurface as _, Surface as _},
26 };
27 use parking_lot::{Mutex, MutexGuard};
28 use thiserror::Error;
29 use wgt::{BufferAddress, InputStepMode, TextureDimension, TextureFormat, TextureViewDimension};
30 
31 use std::{
32     borrow::Cow,
33     collections::{hash_map::Entry, BTreeMap},
34     iter,
35     marker::PhantomData,
36     mem,
37     ops::Range,
38     ptr,
39     sync::atomic::Ordering,
40 };
41 
42 pub mod alloc;
43 pub mod descriptor;
44 mod life;
45 pub mod queue;
46 #[cfg(any(feature = "trace", feature = "replay"))]
47 pub mod trace;
48 
49 use smallvec::SmallVec;
50 
51 pub const MAX_COLOR_TARGETS: usize = 4;
52 pub const MAX_MIP_LEVELS: u32 = 16;
53 pub const MAX_VERTEX_BUFFERS: usize = 16;
54 pub const MAX_ANISOTROPY: u8 = 16;
55 pub const SHADER_STAGE_COUNT: usize = 3;
56 
57 const IMPLICIT_FAILURE: &str = "failed implicit";
58 
59 pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
60 
all_buffer_stages() -> hal::pso::PipelineStage61 pub fn all_buffer_stages() -> hal::pso::PipelineStage {
62     use hal::pso::PipelineStage as Ps;
63     Ps::DRAW_INDIRECT
64         | Ps::VERTEX_INPUT
65         | Ps::VERTEX_SHADER
66         | Ps::FRAGMENT_SHADER
67         | Ps::COMPUTE_SHADER
68         | Ps::TRANSFER
69         | Ps::HOST
70 }
all_image_stages() -> hal::pso::PipelineStage71 pub fn all_image_stages() -> hal::pso::PipelineStage {
72     use hal::pso::PipelineStage as Ps;
73     Ps::EARLY_FRAGMENT_TESTS
74         | Ps::LATE_FRAGMENT_TESTS
75         | Ps::COLOR_ATTACHMENT_OUTPUT
76         | Ps::VERTEX_SHADER
77         | Ps::FRAGMENT_SHADER
78         | Ps::COMPUTE_SHADER
79         | Ps::TRANSFER
80 }
81 
82 #[repr(C)]
83 #[derive(Clone, Copy, Debug, PartialEq)]
84 #[cfg_attr(feature = "trace", derive(serde::Serialize))]
85 #[cfg_attr(feature = "replay", derive(serde::Deserialize))]
86 pub enum HostMap {
87     Read,
88     Write,
89 }
90 
91 #[derive(Clone, Debug, Hash, PartialEq)]
92 #[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
93 pub(crate) struct AttachmentData<T> {
94     pub colors: ArrayVec<[T; MAX_COLOR_TARGETS]>,
95     pub resolves: ArrayVec<[T; MAX_COLOR_TARGETS]>,
96     pub depth_stencil: Option<T>,
97 }
98 impl<T: PartialEq> Eq for AttachmentData<T> {}
99 impl<T> AttachmentData<T> {
all(&self) -> impl Iterator<Item = &T>100     pub(crate) fn all(&self) -> impl Iterator<Item = &T> {
101         self.colors
102             .iter()
103             .chain(&self.resolves)
104             .chain(&self.depth_stencil)
105     }
106 
map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U>107     pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
108         AttachmentData {
109             colors: self.colors.iter().map(&fun).collect(),
110             resolves: self.resolves.iter().map(&fun).collect(),
111             depth_stencil: self.depth_stencil.as_ref().map(&fun),
112         }
113     }
114 }
115 
116 pub(crate) type AttachmentDataVec<T> = ArrayVec<[T; MAX_COLOR_TARGETS + MAX_COLOR_TARGETS + 1]>;
117 pub(crate) type RenderPassKey = AttachmentData<(hal::pass::Attachment, hal::image::Layout)>;
118 #[derive(Debug, Eq, Hash, PartialEq)]
119 pub(crate) struct FramebufferKey {
120     pub(crate) attachments: AttachmentData<hal::image::FramebufferAttachment>,
121     pub(crate) extent: wgt::Extent3d,
122     pub(crate) samples: hal::image::NumSamples,
123 }
124 
125 #[derive(Clone, Debug, Hash, PartialEq)]
126 #[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
127 pub(crate) struct RenderPassContext {
128     pub attachments: AttachmentData<TextureFormat>,
129     pub sample_count: u8,
130 }
131 #[derive(Clone, Debug, Error)]
132 pub enum RenderPassCompatibilityError {
133     #[error("Incompatible color attachment: {0:?} != {1:?}")]
134     IncompatibleColorAttachment(
135         ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
136         ArrayVec<[TextureFormat; MAX_COLOR_TARGETS]>,
137     ),
138     #[error("Incompatible depth-stencil attachment: {0:?} != {1:?}")]
139     IncompatibleDepthStencilAttachment(Option<TextureFormat>, Option<TextureFormat>),
140     #[error("Incompatible sample count: {0:?} != {1:?}")]
141     IncompatibleSampleCount(u8, u8),
142 }
143 
144 impl RenderPassContext {
145     // Assumed the renderpass only contains one subpass
check_compatible( &self, other: &RenderPassContext, ) -> Result<(), RenderPassCompatibilityError>146     pub(crate) fn check_compatible(
147         &self,
148         other: &RenderPassContext,
149     ) -> Result<(), RenderPassCompatibilityError> {
150         if self.attachments.colors != other.attachments.colors {
151             return Err(RenderPassCompatibilityError::IncompatibleColorAttachment(
152                 self.attachments.colors.clone(),
153                 other.attachments.colors.clone(),
154             ));
155         }
156         if self.attachments.depth_stencil != other.attachments.depth_stencil {
157             return Err(
158                 RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
159                     self.attachments.depth_stencil,
160                     other.attachments.depth_stencil,
161                 ),
162             );
163         }
164         if self.sample_count != other.sample_count {
165             return Err(RenderPassCompatibilityError::IncompatibleSampleCount(
166                 self.sample_count,
167                 other.sample_count,
168             ));
169         }
170         Ok(())
171     }
172 }
173 
174 type BufferMapPendingCallback = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
175 
map_buffer<B: hal::Backend>( raw: &B::Device, buffer: &mut resource::Buffer<B>, offset: hal::buffer::Offset, size: BufferAddress, kind: HostMap, ) -> Result<ptr::NonNull<u8>, resource::BufferAccessError>176 fn map_buffer<B: hal::Backend>(
177     raw: &B::Device,
178     buffer: &mut resource::Buffer<B>,
179     offset: hal::buffer::Offset,
180     size: BufferAddress,
181     kind: HostMap,
182 ) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
183     let &mut (_, ref mut block) = buffer
184         .raw
185         .as_mut()
186         .ok_or(resource::BufferAccessError::Destroyed)?;
187     let ptr = block.map(raw, offset, size).map_err(DeviceError::from)?;
188 
189     buffer.sync_mapped_writes = match kind {
190         HostMap::Read if !block.is_coherent() => {
191             block.invalidate_range(raw, offset, Some(size))?;
192             None
193         }
194         HostMap::Write if !block.is_coherent() => Some(hal::memory::Segment {
195             offset,
196             size: Some(size),
197         }),
198         _ => None,
199     };
200 
201     // Zero out uninitialized parts of the mapping. (Spec dictates all resources behave as if they were initialized with zero)
202     //
203     // If this is a read mapping, ideally we would use a `fill_buffer` command before reading the data from GPU (i.e. `invalidate_range`).
204     // However, this would require us to kick off and wait for a command buffer or piggy back on an existing one (the later is likely the only worthwhile option).
205     // As reading uninitialized memory isn't a particular important path to support,
206     // we instead just initialize the memory here and make sure it is GPU visible, so this happens at max only once for every buffer region.
207     //
208     // If this is a write mapping zeroing out the memory here is the only reasonable way as all data is pushed to GPU anyways.
209     let zero_init_needs_flush_now = !block.is_coherent() && buffer.sync_mapped_writes.is_none(); // No need to flush if it is flushed later anyways.
210     for uninitialized_range in buffer.initialization_status.drain(offset..(size + offset)) {
211         let num_bytes = uninitialized_range.end - uninitialized_range.start;
212         unsafe {
213             ptr::write_bytes(
214                 ptr.as_ptr().offset(uninitialized_range.start as isize),
215                 0,
216                 num_bytes as usize,
217             )
218         };
219         if zero_init_needs_flush_now {
220             block.flush_range(raw, uninitialized_range.start, Some(num_bytes))?;
221         }
222     }
223 
224     Ok(ptr)
225 }
226 
unmap_buffer<B: hal::Backend>( raw: &B::Device, buffer: &mut resource::Buffer<B>, ) -> Result<(), resource::BufferAccessError>227 fn unmap_buffer<B: hal::Backend>(
228     raw: &B::Device,
229     buffer: &mut resource::Buffer<B>,
230 ) -> Result<(), resource::BufferAccessError> {
231     let &mut (_, ref mut block) = buffer
232         .raw
233         .as_mut()
234         .ok_or(resource::BufferAccessError::Destroyed)?;
235     if let Some(segment) = buffer.sync_mapped_writes.take() {
236         block.flush_range(raw, segment.offset, segment.size)?;
237     }
238     block.unmap(raw);
239     Ok(())
240 }
241 
242 //Note: this logic is specifically moved out of `handle_mapping()` in order to
243 // have nothing locked by the time we execute users callback code.
fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I)244 fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
245     for (operation, status) in callbacks {
246         unsafe { (operation.callback)(status, operation.user_data) }
247     }
248 }
249 
250 #[derive(Debug)]
251 pub(crate) struct RenderPassLock<B: hal::Backend> {
252     pub(crate) render_passes: FastHashMap<RenderPassKey, B::RenderPass>,
253     pub(crate) framebuffers: FastHashMap<FramebufferKey, B::Framebuffer>,
254 }
255 
256 /// Structure describing a logical device. Some members are internally mutable,
257 /// stored behind mutexes.
258 /// TODO: establish clear order of locking for these:
259 /// `mem_allocator`, `desc_allocator`, `life_tracke`, `trackers`,
260 /// `render_passes`, `pending_writes`, `trace`.
261 ///
262 /// Currently, the rules are:
263 /// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system
264 /// 1. `self.trackers` is locked last (unenforced)
265 /// 1. `self.trace` is locked last (unenforced)
266 #[derive(Debug)]
267 pub struct Device<B: hal::Backend> {
268     pub(crate) raw: B::Device,
269     pub(crate) adapter_id: Stored<id::AdapterId>,
270     pub(crate) queue_group: hal::queue::QueueGroup<B>,
271     pub(crate) cmd_allocator: command::CommandAllocator<B>,
272     mem_allocator: Mutex<alloc::MemoryAllocator<B>>,
273     desc_allocator: Mutex<descriptor::DescriptorAllocator<B>>,
274     //Note: The submission index here corresponds to the last submission that is done.
275     pub(crate) life_guard: LifeGuard,
276     pub(crate) active_submission_index: SubmissionIndex,
277     /// Has to be locked temporarily only (locked last)
278     pub(crate) trackers: Mutex<TrackerSet>,
279     pub(crate) render_passes: Mutex<RenderPassLock<B>>,
280     // Life tracker should be locked right after the device and before anything else.
281     life_tracker: Mutex<life::LifetimeTracker<B>>,
282     temp_suspected: life::SuspectedResources,
283     pub(crate) hal_limits: hal::Limits,
284     pub(crate) private_features: PrivateFeatures,
285     pub(crate) limits: wgt::Limits,
286     pub(crate) features: wgt::Features,
287     pub(crate) downlevel: wgt::DownlevelProperties,
288     spv_options: naga::back::spv::Options,
289     //TODO: move this behind another mutex. This would allow several methods to switch
290     // to borrow Device immutably, such as `write_buffer`, `write_texture`, and `buffer_unmap`.
291     pending_writes: queue::PendingWrites<B>,
292     #[cfg(feature = "trace")]
293     pub(crate) trace: Option<Mutex<trace::Trace>>,
294 }
295 
296 #[derive(Clone, Debug, Error)]
297 pub enum CreateDeviceError {
298     #[error("not enough memory left")]
299     OutOfMemory,
300 }
301 
302 impl<B: GfxBackend> Device<B> {
303     #[allow(clippy::too_many_arguments)]
new( raw: B::Device, adapter_id: Stored<id::AdapterId>, queue_group: hal::queue::QueueGroup<B>, mem_props: hal::adapter::MemoryProperties, hal_limits: hal::Limits, private_features: PrivateFeatures, downlevel: wgt::DownlevelProperties, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, ) -> Result<Self, CreateDeviceError>304     pub(crate) fn new(
305         raw: B::Device,
306         adapter_id: Stored<id::AdapterId>,
307         queue_group: hal::queue::QueueGroup<B>,
308         mem_props: hal::adapter::MemoryProperties,
309         hal_limits: hal::Limits,
310         private_features: PrivateFeatures,
311         downlevel: wgt::DownlevelProperties,
312         desc: &DeviceDescriptor,
313         trace_path: Option<&std::path::Path>,
314     ) -> Result<Self, CreateDeviceError> {
315         let cmd_allocator = command::CommandAllocator::new(queue_group.family, &raw)
316             .or(Err(CreateDeviceError::OutOfMemory))?;
317 
318         let mem_allocator = alloc::MemoryAllocator::new(mem_props, hal_limits);
319         let descriptors = descriptor::DescriptorAllocator::new();
320         #[cfg(not(feature = "trace"))]
321         if let Some(_) = trace_path {
322             log::error!("Feature 'trace' is not enabled");
323         }
324 
325         let spv_options = {
326             use naga::back::spv;
327             let mut flags = spv::WriterFlags::empty();
328             flags.set(spv::WriterFlags::DEBUG, cfg!(debug_assertions));
329             //Note: we don't adjust the coordinate space, because `NDC_Y_UP` is required.
330             spv::Options {
331                 lang_version: (1, 0),
332                 //TODO: can be `None` once `spirv` is published
333                 capabilities: Some(
334                     [
335                         spv::Capability::Shader,
336                         spv::Capability::DerivativeControl,
337                         spv::Capability::InterpolationFunction,
338                         spv::Capability::Matrix,
339                         spv::Capability::ImageQuery,
340                         spv::Capability::Sampled1D,
341                         spv::Capability::Image1D,
342                         spv::Capability::SampledCubeArray,
343                         spv::Capability::ImageCubeArray,
344                         spv::Capability::StorageImageExtendedFormats,
345                     ]
346                     .iter()
347                     .cloned()
348                     .collect(),
349                 ),
350                 flags,
351             }
352         };
353 
354         Ok(Self {
355             raw,
356             adapter_id,
357             cmd_allocator,
358             mem_allocator: Mutex::new(mem_allocator),
359             desc_allocator: Mutex::new(descriptors),
360             queue_group,
361             life_guard: LifeGuard::new("<device>"),
362             active_submission_index: 0,
363             trackers: Mutex::new(TrackerSet::new(B::VARIANT)),
364             render_passes: Mutex::new(RenderPassLock {
365                 render_passes: FastHashMap::default(),
366                 framebuffers: FastHashMap::default(),
367             }),
368             life_tracker: Mutex::new(life::LifetimeTracker::new()),
369             temp_suspected: life::SuspectedResources::default(),
370             #[cfg(feature = "trace")]
371             trace: trace_path.and_then(|path| match trace::Trace::new(path) {
372                 Ok(mut trace) => {
373                     trace.add(trace::Action::Init {
374                         desc: desc.clone(),
375                         backend: B::VARIANT,
376                     });
377                     Some(Mutex::new(trace))
378                 }
379                 Err(e) => {
380                     log::error!("Unable to start a trace in '{:?}': {:?}", path, e);
381                     None
382                 }
383             }),
384             hal_limits,
385             private_features,
386             limits: desc.limits.clone(),
387             features: desc.features,
388             downlevel,
389             spv_options,
390             pending_writes: queue::PendingWrites::new(),
391         })
392     }
393 
require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures>394     pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
395         if self.features.contains(feature) {
396             Ok(())
397         } else {
398             Err(MissingFeatures(feature))
399         }
400     }
401 
last_completed_submission_index(&self) -> SubmissionIndex402     pub(crate) fn last_completed_submission_index(&self) -> SubmissionIndex {
403         self.life_guard.submission_index.load(Ordering::Acquire)
404     }
405 
lock_life_internal<'this, 'token: 'this>( tracker: &'this Mutex<life::LifetimeTracker<B>>, _token: &mut Token<'token, Self>, ) -> MutexGuard<'this, life::LifetimeTracker<B>>406     fn lock_life_internal<'this, 'token: 'this>(
407         tracker: &'this Mutex<life::LifetimeTracker<B>>,
408         _token: &mut Token<'token, Self>,
409     ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
410         tracker.lock()
411     }
412 
lock_life<'this, 'token: 'this>( &'this self, token: &mut Token<'token, Self>, ) -> MutexGuard<'this, life::LifetimeTracker<B>>413     fn lock_life<'this, 'token: 'this>(
414         &'this self,
415         //TODO: fix this - the token has to be borrowed for the lock
416         token: &mut Token<'token, Self>,
417     ) -> MutexGuard<'this, life::LifetimeTracker<B>> {
418         Self::lock_life_internal(&self.life_tracker, token)
419     }
420 
maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( &'this self, hub: &Hub<B, G>, force_wait: bool, token: &mut Token<'token, Self>, ) -> Result<Vec<BufferMapPendingCallback>, WaitIdleError>421     fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
422         &'this self,
423         hub: &Hub<B, G>,
424         force_wait: bool,
425         token: &mut Token<'token, Self>,
426     ) -> Result<Vec<BufferMapPendingCallback>, WaitIdleError> {
427         profiling::scope!("maintain", "Device");
428         let mut life_tracker = self.lock_life(token);
429 
430         life_tracker.triage_suspected(
431             hub,
432             &self.trackers,
433             #[cfg(feature = "trace")]
434             self.trace.as_ref(),
435             token,
436         );
437         life_tracker.triage_mapped(hub, token);
438         let last_done = life_tracker.triage_submissions(&self.raw, force_wait)?;
439         let callbacks = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
440         life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
441 
442         self.life_guard
443             .submission_index
444             .store(last_done, Ordering::Release);
445         self.cmd_allocator.maintain(&self.raw, last_done);
446         Ok(callbacks)
447     }
448 
untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( &'this mut self, hub: &Hub<B, G>, trackers: &TrackerSet, mut token: &mut Token<'token, Self>, )449     fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
450         &'this mut self,
451         hub: &Hub<B, G>,
452         trackers: &TrackerSet,
453         mut token: &mut Token<'token, Self>,
454     ) {
455         self.temp_suspected.clear();
456         // As the tracker is cleared/dropped, we need to consider all the resources
457         // that it references for destruction in the next GC pass.
458         {
459             let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
460             let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
461             let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
462             let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
463             let (buffer_guard, mut token) = hub.buffers.read(&mut token);
464             let (texture_guard, mut token) = hub.textures.read(&mut token);
465             let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
466             let (sampler_guard, _) = hub.samplers.read(&mut token);
467 
468             for id in trackers.buffers.used() {
469                 if buffer_guard[id].life_guard.ref_count.is_none() {
470                     self.temp_suspected.buffers.push(id);
471                 }
472             }
473             for id in trackers.textures.used() {
474                 if texture_guard[id].life_guard.ref_count.is_none() {
475                     self.temp_suspected.textures.push(id);
476                 }
477             }
478             for id in trackers.views.used() {
479                 if texture_view_guard[id].life_guard.ref_count.is_none() {
480                     self.temp_suspected.texture_views.push(id);
481                 }
482             }
483             for id in trackers.bind_groups.used() {
484                 if bind_group_guard[id].life_guard.ref_count.is_none() {
485                     self.temp_suspected.bind_groups.push(id);
486                 }
487             }
488             for id in trackers.samplers.used() {
489                 if sampler_guard[id].life_guard.ref_count.is_none() {
490                     self.temp_suspected.samplers.push(id);
491                 }
492             }
493             for id in trackers.compute_pipes.used() {
494                 if compute_pipe_guard[id].life_guard.ref_count.is_none() {
495                     self.temp_suspected.compute_pipelines.push(id);
496                 }
497             }
498             for id in trackers.render_pipes.used() {
499                 if render_pipe_guard[id].life_guard.ref_count.is_none() {
500                     self.temp_suspected.render_pipelines.push(id);
501                 }
502             }
503             for id in trackers.query_sets.used() {
504                 if query_set_guard[id].life_guard.ref_count.is_none() {
505                     self.temp_suspected.query_sets.push(id);
506                 }
507             }
508         }
509 
510         self.lock_life(&mut token)
511             .suspected_resources
512             .extend(&self.temp_suspected);
513     }
514 
create_buffer( &self, self_id: id::DeviceId, desc: &resource::BufferDescriptor, transient: bool, ) -> Result<resource::Buffer<B>, resource::CreateBufferError>515     fn create_buffer(
516         &self,
517         self_id: id::DeviceId,
518         desc: &resource::BufferDescriptor,
519         transient: bool,
520     ) -> Result<resource::Buffer<B>, resource::CreateBufferError> {
521         debug_assert_eq!(self_id.backend(), B::VARIANT);
522         let (mut usage, _memory_properties) = conv::map_buffer_usage(desc.usage);
523         if desc.mapped_at_creation {
524             if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
525                 return Err(resource::CreateBufferError::UnalignedSize);
526             }
527             if !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
528                 // we are going to be copying into it, internally
529                 usage |= hal::buffer::Usage::TRANSFER_DST;
530             }
531         } else {
532             // We are required to zero out (initialize) all memory.
533             // This is done on demand using fill_buffer which requires write transfer usage!
534             usage |= hal::buffer::Usage::TRANSFER_DST;
535         }
536 
537         if desc.usage.is_empty() {
538             return Err(resource::CreateBufferError::EmptyUsage);
539         }
540 
541         let mem_usage = {
542             use gpu_alloc::UsageFlags as Uf;
543             use wgt::BufferUsage as Bu;
544 
545             let mut flags = Uf::empty();
546             let map_flags = desc.usage & (Bu::MAP_READ | Bu::MAP_WRITE);
547             let map_copy_flags =
548                 desc.usage & (Bu::MAP_READ | Bu::MAP_WRITE | Bu::COPY_SRC | Bu::COPY_DST);
549             if map_flags.is_empty() || !(desc.usage - map_copy_flags).is_empty() {
550                 flags |= Uf::FAST_DEVICE_ACCESS;
551             }
552             if transient {
553                 flags |= Uf::TRANSIENT;
554             }
555 
556             if !map_flags.is_empty() {
557                 let upload_usage = Bu::MAP_WRITE | Bu::COPY_SRC;
558                 let download_usage = Bu::MAP_READ | Bu::COPY_DST;
559 
560                 flags |= Uf::HOST_ACCESS;
561                 if desc.usage.contains(upload_usage) {
562                     flags |= Uf::UPLOAD;
563                 }
564                 if desc.usage.contains(download_usage) {
565                     flags |= Uf::DOWNLOAD;
566                 }
567 
568                 let is_native_only = self
569                     .features
570                     .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS);
571                 if !is_native_only
572                     && !upload_usage.contains(desc.usage)
573                     && !download_usage.contains(desc.usage)
574                 {
575                     return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
576                 }
577             }
578 
579             flags
580         };
581 
582         let mut buffer = unsafe {
583             self.raw
584                 .create_buffer(desc.size.max(1), usage, hal::memory::SparseFlags::empty())
585         }
586         .map_err(|err| match err {
587             hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
588             _ => panic!("failed to create buffer: {}", err),
589         })?;
590         if let Some(ref label) = desc.label {
591             unsafe { self.raw.set_buffer_name(&mut buffer, label) };
592         }
593 
594         let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
595         let block = self
596             .mem_allocator
597             .lock()
598             .allocate(&self.raw, requirements, mem_usage)?;
599         block.bind_buffer(&self.raw, &mut buffer)?;
600 
601         Ok(resource::Buffer {
602             raw: Some((buffer, block)),
603             device_id: Stored {
604                 value: id::Valid(self_id),
605                 ref_count: self.life_guard.add_ref(),
606             },
607             usage: desc.usage,
608             size: desc.size,
609             initialization_status: MemoryInitTracker::new(desc.size),
610             sync_mapped_writes: None,
611             map_state: resource::BufferMapState::Idle,
612             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
613         })
614     }
615 
create_texture( &self, self_id: id::DeviceId, adapter: &crate::instance::Adapter<B>, desc: &resource::TextureDescriptor, ) -> Result<resource::Texture<B>, resource::CreateTextureError>616     fn create_texture(
617         &self,
618         self_id: id::DeviceId,
619         adapter: &crate::instance::Adapter<B>,
620         desc: &resource::TextureDescriptor,
621     ) -> Result<resource::Texture<B>, resource::CreateTextureError> {
622         debug_assert_eq!(self_id.backend(), B::VARIANT);
623 
624         let format_desc = desc.format.describe();
625         self.require_features(format_desc.required_features)
626             .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?;
627 
628         // Ensure `D24Plus` textures cannot be copied
629         match desc.format {
630             TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => {
631                 if desc
632                     .usage
633                     .intersects(wgt::TextureUsage::COPY_SRC | wgt::TextureUsage::COPY_DST)
634                 {
635                     return Err(resource::CreateTextureError::CannotCopyD24Plus);
636                 }
637             }
638             _ => {}
639         }
640 
641         if desc.usage.is_empty() {
642             return Err(resource::CreateTextureError::EmptyUsage);
643         }
644 
645         let format_features = if self
646             .features
647             .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES)
648         {
649             adapter.get_texture_format_features(desc.format)
650         } else {
651             format_desc.guaranteed_format_features
652         };
653 
654         let missing_allowed_usages = desc.usage - format_features.allowed_usages;
655         if !missing_allowed_usages.is_empty() {
656             return Err(resource::CreateTextureError::InvalidUsages(
657                 missing_allowed_usages,
658                 desc.format,
659             ));
660         }
661 
662         let kind = conv::map_texture_dimension_size(
663             desc.dimension,
664             desc.size,
665             desc.sample_count,
666             &self.limits,
667         )?;
668         let format = conv::map_texture_format(desc.format, self.private_features);
669         let aspects = format.surface_desc().aspects;
670         let usage = conv::map_texture_usage(desc.usage, aspects);
671 
672         let mip_level_count = desc.mip_level_count;
673         if mip_level_count == 0
674             || mip_level_count > MAX_MIP_LEVELS
675             || mip_level_count > kind.compute_num_levels() as u32
676         {
677             return Err(resource::CreateTextureError::InvalidMipLevelCount(
678                 mip_level_count,
679             ));
680         }
681         let mut view_caps = hal::image::ViewCapabilities::empty();
682         // 2D textures with array layer counts that are multiples of 6 could be cubemaps
683         // Following gpuweb/gpuweb#68 always add the hint in that case
684         if desc.dimension == TextureDimension::D2
685             && desc.size.depth_or_array_layers % 6 == 0
686             && (desc.size.depth_or_array_layers == 6
687                 || self
688                     .downlevel
689                     .flags
690                     .contains(wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES))
691         {
692             view_caps |= hal::image::ViewCapabilities::KIND_CUBE;
693         };
694 
695         // TODO: 2D arrays, cubemap arrays
696 
697         let mut image = unsafe {
698             let mut image = self
699                 .raw
700                 .create_image(
701                     kind,
702                     desc.mip_level_count as hal::image::Level,
703                     format,
704                     hal::image::Tiling::Optimal,
705                     usage,
706                     hal::memory::SparseFlags::empty(),
707                     view_caps,
708                 )
709                 .map_err(|err| match err {
710                     hal::image::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
711                     _ => panic!("failed to create texture: {}", err),
712                 })?;
713             if let Some(ref label) = desc.label {
714                 self.raw.set_image_name(&mut image, label);
715             }
716             image
717         };
718 
719         let requirements = unsafe { self.raw.get_image_requirements(&image) };
720         let block = self.mem_allocator.lock().allocate(
721             &self.raw,
722             requirements,
723             gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS,
724         )?;
725         block.bind_image(&self.raw, &mut image)?;
726 
727         Ok(resource::Texture {
728             raw: Some((image, block)),
729             device_id: Stored {
730                 value: id::Valid(self_id),
731                 ref_count: self.life_guard.add_ref(),
732             },
733             usage: desc.usage,
734             aspects,
735             dimension: desc.dimension,
736             kind,
737             format: desc.format,
738             format_features,
739             framebuffer_attachment: hal::image::FramebufferAttachment {
740                 usage,
741                 view_caps,
742                 format,
743             },
744             full_range: TextureSelector {
745                 levels: 0..desc.mip_level_count as hal::image::Level,
746                 layers: 0..kind.num_layers(),
747             },
748             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
749         })
750     }
751 
create_texture_view( &self, texture: &resource::Texture<B>, texture_id: id::TextureId, desc: &resource::TextureViewDescriptor, ) -> Result<resource::TextureView<B>, resource::CreateTextureViewError>752     fn create_texture_view(
753         &self,
754         texture: &resource::Texture<B>,
755         texture_id: id::TextureId,
756         desc: &resource::TextureViewDescriptor,
757     ) -> Result<resource::TextureView<B>, resource::CreateTextureViewError> {
758         let &(ref texture_raw, _) = texture
759             .raw
760             .as_ref()
761             .ok_or(resource::CreateTextureViewError::InvalidTexture)?;
762 
763         let view_dim =
764             match desc.dimension {
765                 Some(dim) => {
766                     use hal::image::Kind;
767 
768                     let required_tex_dim = dim.compatible_texture_dimension();
769 
770                     if required_tex_dim != texture.dimension {
771                         return Err(
772                             resource::CreateTextureViewError::InvalidTextureViewDimension {
773                                 view: dim,
774                                 image: texture.dimension,
775                             },
776                         );
777                     }
778 
779                     if let Kind::D2(_, _, depth, _) = texture.kind {
780                         match dim {
781                             TextureViewDimension::Cube if depth != 6 => {
782                                 return Err(
783                                     resource::CreateTextureViewError::InvalidCubemapTextureDepth {
784                                         depth,
785                                     },
786                                 )
787                             }
788                             TextureViewDimension::CubeArray if depth % 6 != 0 => return Err(
789                                 resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
790                                     depth,
791                                 },
792                             ),
793                             _ => {}
794                         }
795                     }
796 
797                     dim
798                 }
799                 None => match texture.kind {
800                     hal::image::Kind::D1(..) => wgt::TextureViewDimension::D1,
801                     hal::image::Kind::D2(_, _, depth, _)
802                         if depth > 1 && desc.range.array_layer_count.is_none() =>
803                     {
804                         wgt::TextureViewDimension::D2Array
805                     }
806                     hal::image::Kind::D2(..) => wgt::TextureViewDimension::D2,
807                     hal::image::Kind::D3(..) => wgt::TextureViewDimension::D3,
808                 },
809             };
810 
811         let required_level_count =
812             desc.range.base_mip_level + desc.range.mip_level_count.map_or(1, |count| count.get());
813         let required_layer_count = desc.range.base_array_layer
814             + desc.range.array_layer_count.map_or(1, |count| count.get());
815         let level_end = texture.full_range.levels.end;
816         let layer_end = texture.full_range.layers.end;
817         if required_level_count > level_end as u32 {
818             return Err(resource::CreateTextureViewError::TooManyMipLevels {
819                 requested: required_level_count,
820                 total: level_end,
821             });
822         }
823         if required_layer_count > layer_end as u32 {
824             return Err(resource::CreateTextureViewError::TooManyArrayLayers {
825                 requested: required_layer_count,
826                 total: layer_end,
827             });
828         };
829 
830         let aspects = match desc.range.aspect {
831             wgt::TextureAspect::All => texture.aspects,
832             wgt::TextureAspect::DepthOnly => hal::format::Aspects::DEPTH,
833             wgt::TextureAspect::StencilOnly => hal::format::Aspects::STENCIL,
834         };
835         if !texture.aspects.contains(aspects) {
836             return Err(resource::CreateTextureViewError::InvalidAspect {
837                 requested: aspects,
838                 total: texture.aspects,
839             });
840         }
841 
842         let end_level = desc
843             .range
844             .mip_level_count
845             .map_or(level_end, |_| required_level_count as u8);
846         let end_layer = desc
847             .range
848             .array_layer_count
849             .map_or(layer_end, |_| required_layer_count as u16);
850         let selector = TextureSelector {
851             levels: desc.range.base_mip_level as u8..end_level,
852             layers: desc.range.base_array_layer as u16..end_layer,
853         };
854 
855         let view_layer_count = (selector.layers.end - selector.layers.start) as u32;
856         let layer_check_ok = match view_dim {
857             wgt::TextureViewDimension::D1
858             | wgt::TextureViewDimension::D2
859             | wgt::TextureViewDimension::D3 => view_layer_count == 1,
860             wgt::TextureViewDimension::D2Array => true,
861             wgt::TextureViewDimension::Cube => view_layer_count == 6,
862             wgt::TextureViewDimension::CubeArray => view_layer_count % 6 == 0,
863         };
864         if !layer_check_ok {
865             return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
866                 requested: view_layer_count,
867                 dim: view_dim,
868             });
869         }
870 
871         let format = desc.format.unwrap_or(texture.format);
872         let range = hal::image::SubresourceRange {
873             aspects,
874             level_start: desc.range.base_mip_level as _,
875             level_count: desc.range.mip_level_count.map(|v| v.get() as _),
876             layer_start: desc.range.base_array_layer as _,
877             layer_count: desc.range.array_layer_count.map(|v| v.get() as _),
878         };
879         let hal_extent = texture
880             .kind
881             .extent()
882             .at_level(desc.range.base_mip_level as _);
883 
884         let raw = unsafe {
885             self.raw
886                 .create_image_view(
887                     texture_raw,
888                     conv::map_texture_view_dimension(view_dim),
889                     conv::map_texture_format(format, self.private_features),
890                     hal::format::Swizzle::NO,
891                     // conservatively assume the same usage
892                     conv::map_texture_usage(texture.usage, aspects),
893                     range,
894                 )
895                 .or(Err(resource::CreateTextureViewError::OutOfMemory))?
896         };
897 
898         Ok(resource::TextureView {
899             inner: resource::TextureViewInner::Native {
900                 raw,
901                 source_id: Stored {
902                     value: id::Valid(texture_id),
903                     ref_count: texture.life_guard.add_ref(),
904                 },
905             },
906             aspects,
907             format: texture.format,
908             format_features: texture.format_features,
909             dimension: view_dim,
910             extent: wgt::Extent3d {
911                 width: hal_extent.width,
912                 height: hal_extent.height,
913                 depth_or_array_layers: view_layer_count,
914             },
915             samples: texture.kind.num_samples(),
916             framebuffer_attachment: texture.framebuffer_attachment.clone(),
917             // once a storage - forever a storage
918             sampled_internal_use: if texture.usage.contains(wgt::TextureUsage::STORAGE) {
919                 resource::TextureUse::SAMPLED | resource::TextureUse::STORAGE_LOAD
920             } else {
921                 resource::TextureUse::SAMPLED
922             },
923             selector,
924             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
925         })
926     }
927 
create_sampler( &self, self_id: id::DeviceId, desc: &resource::SamplerDescriptor, ) -> Result<resource::Sampler<B>, resource::CreateSamplerError>928     fn create_sampler(
929         &self,
930         self_id: id::DeviceId,
931         desc: &resource::SamplerDescriptor,
932     ) -> Result<resource::Sampler<B>, resource::CreateSamplerError> {
933         if desc
934             .address_modes
935             .iter()
936             .any(|am| am == &wgt::AddressMode::ClampToBorder)
937         {
938             self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
939         }
940 
941         let actual_clamp = if let Some(clamp) = desc.anisotropy_clamp {
942             let clamp = clamp.get();
943             let valid_clamp = clamp <= MAX_ANISOTROPY && conv::is_power_of_two(clamp as u32);
944             if !valid_clamp {
945                 return Err(resource::CreateSamplerError::InvalidClamp(clamp));
946             }
947             if self.private_features.anisotropic_filtering {
948                 Some(clamp)
949             } else {
950                 None
951             }
952         } else {
953             None
954         };
955 
956         let border = match desc.border_color {
957             None | Some(wgt::SamplerBorderColor::TransparentBlack) => {
958                 hal::image::BorderColor::TransparentBlack
959             }
960             Some(wgt::SamplerBorderColor::OpaqueBlack) => hal::image::BorderColor::OpaqueBlack,
961             Some(wgt::SamplerBorderColor::OpaqueWhite) => hal::image::BorderColor::OpaqueWhite,
962         };
963 
964         let filtering = [desc.min_filter, desc.mag_filter, desc.mipmap_filter]
965             .contains(&wgt::FilterMode::Linear);
966 
967         let info = hal::image::SamplerDesc {
968             min_filter: conv::map_filter(desc.min_filter),
969             mag_filter: conv::map_filter(desc.mag_filter),
970             mip_filter: conv::map_filter(desc.mipmap_filter),
971             reduction_mode: hal::image::ReductionMode::WeightedAverage,
972             wrap_mode: (
973                 conv::map_wrap(desc.address_modes[0]),
974                 conv::map_wrap(desc.address_modes[1]),
975                 conv::map_wrap(desc.address_modes[2]),
976             ),
977             lod_bias: hal::image::Lod(0.0),
978             lod_range: hal::image::Lod(desc.lod_min_clamp)..hal::image::Lod(desc.lod_max_clamp),
979             comparison: desc.compare.map(conv::map_compare_function),
980             border,
981             normalized: true,
982             anisotropy_clamp: actual_clamp,
983         };
984 
985         let raw = unsafe {
986             self.raw.create_sampler(&info).map_err(|err| match err {
987                 hal::device::AllocationError::OutOfMemory(_) => {
988                     resource::CreateSamplerError::Device(DeviceError::OutOfMemory)
989                 }
990                 hal::device::AllocationError::TooManyObjects => {
991                     resource::CreateSamplerError::TooManyObjects
992                 }
993             })?
994         };
995         Ok(resource::Sampler {
996             raw,
997             device_id: Stored {
998                 value: id::Valid(self_id),
999                 ref_count: self.life_guard.add_ref(),
1000             },
1001             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
1002             comparison: info.comparison.is_some(),
1003             filtering,
1004         })
1005     }
1006 
create_shader_module<'a>( &self, self_id: id::DeviceId, desc: &pipeline::ShaderModuleDescriptor<'a>, source: pipeline::ShaderModuleSource<'a>, ) -> Result<pipeline::ShaderModule<B>, pipeline::CreateShaderModuleError>1007     fn create_shader_module<'a>(
1008         &self,
1009         self_id: id::DeviceId,
1010         desc: &pipeline::ShaderModuleDescriptor<'a>,
1011         source: pipeline::ShaderModuleSource<'a>,
1012     ) -> Result<pipeline::ShaderModule<B>, pipeline::CreateShaderModuleError> {
1013         // First, try to produce a Naga module.
1014         let (spv, module) = match source {
1015             pipeline::ShaderModuleSource::SpirV(spv) => {
1016                 profiling::scope!("naga::spv::parse");
1017                 // Parse the given shader code and store its representation.
1018                 let options = naga::front::spv::Options {
1019                     adjust_coordinate_space: false, // we require NDC_Y_UP feature
1020                     strict_capabilities: true,
1021                     flow_graph_dump_prefix: None,
1022                 };
1023                 let parser = naga::front::spv::Parser::new(spv.iter().cloned(), &options);
1024                 let module = match parser.parse() {
1025                     Ok(module) => Some(module),
1026                     Err(err) => {
1027                         log::warn!(
1028                             "Failed to parse shader SPIR-V code for {:?}: {:?}",
1029                             desc.label,
1030                             err
1031                         );
1032                         if desc.flags.contains(wgt::ShaderFlags::VALIDATION) {
1033                             return Err(pipeline::CreateShaderModuleError::Parsing);
1034                         }
1035                         log::warn!("\tProceeding unsafely without validation");
1036                         None
1037                     }
1038                 };
1039                 (Some(spv), module)
1040             }
1041             pipeline::ShaderModuleSource::Wgsl(code) => {
1042                 profiling::scope!("naga::wgsl::parse_str");
1043                 // TODO: refactor the corresponding Naga error to be owned, and then
1044                 // display it instead of unwrapping
1045                 match naga::front::wgsl::parse_str(&code) {
1046                     Ok(module) => (None, Some(module)),
1047                     Err(err) => {
1048                         log::error!("Failed to parse WGSL code for {:?}: {}", desc.label, err);
1049                         return Err(pipeline::CreateShaderModuleError::Parsing);
1050                     }
1051                 }
1052             }
1053             pipeline::ShaderModuleSource::Naga(module) => (None, Some(module)),
1054         };
1055 
1056         let (naga_result, interface) = match module {
1057             // If succeeded, then validate it and attempt to give it to gfx-hal directly.
1058             Some(module) if desc.flags.contains(wgt::ShaderFlags::VALIDATION) || spv.is_none() => {
1059                 use naga::valid::Capabilities as Caps;
1060                 profiling::scope!("naga::validate");
1061 
1062                 let mut caps = Caps::empty();
1063                 caps.set(
1064                     Caps::PUSH_CONSTANT,
1065                     self.features.contains(wgt::Features::PUSH_CONSTANTS),
1066                 );
1067                 caps.set(
1068                     Caps::FLOAT64,
1069                     self.features.contains(wgt::Features::SHADER_FLOAT64),
1070                 );
1071                 let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps)
1072                     .validate(&module)?;
1073                 let interface = validation::Interface::new(&module, &info);
1074                 let shader = hal::device::NagaShader { module, info };
1075 
1076                 let naga_result = if desc
1077                     .flags
1078                     .contains(wgt::ShaderFlags::EXPERIMENTAL_TRANSLATION)
1079                     || !cfg!(feature = "cross")
1080                 {
1081                     match unsafe { self.raw.create_shader_module_from_naga(shader) } {
1082                         Ok(raw) => Ok(raw),
1083                         Err((hal::device::ShaderError::CompilationFailed(msg), shader)) => {
1084                             log::warn!("Shader module compilation failed: {}", msg);
1085                             Err(Some(shader))
1086                         }
1087                         Err((_, shader)) => Err(Some(shader)),
1088                     }
1089                 } else {
1090                     Err(Some(shader))
1091                 };
1092                 (naga_result, Some(interface))
1093             }
1094             _ => (Err(None), None),
1095         };
1096 
1097         // Otherwise, fall back to SPIR-V.
1098         let spv_result = match naga_result {
1099             Ok(raw) => Ok(raw),
1100             Err(maybe_shader) => {
1101                 let spv = match spv {
1102                     Some(data) => Ok(data),
1103                     None => {
1104                         // Produce a SPIR-V from the Naga module
1105                         profiling::scope!("naga::wpv::write_vec");
1106                         let shader = maybe_shader.unwrap();
1107                         naga::back::spv::write_vec(&shader.module, &shader.info, &self.spv_options)
1108                             .map(Cow::Owned)
1109                     }
1110                 };
1111                 match spv {
1112                     Ok(data) => unsafe { self.raw.create_shader_module(&data) },
1113                     Err(e) => Err(hal::device::ShaderError::CompilationFailed(format!(
1114                         "{}",
1115                         e
1116                     ))),
1117                 }
1118             }
1119         };
1120 
1121         Ok(pipeline::ShaderModule {
1122             raw: match spv_result {
1123                 Ok(raw) => raw,
1124                 Err(hal::device::ShaderError::OutOfMemory(_)) => {
1125                     return Err(DeviceError::OutOfMemory.into());
1126                 }
1127                 Err(error) => {
1128                     log::error!("Shader error: {}", error);
1129                     return Err(pipeline::CreateShaderModuleError::Generation);
1130                 }
1131             },
1132             device_id: Stored {
1133                 value: id::Valid(self_id),
1134                 ref_count: self.life_guard.add_ref(),
1135             },
1136             interface,
1137             #[cfg(debug_assertions)]
1138             label: desc.label.to_string_or_default(),
1139         })
1140     }
1141 
1142     /// Create a compatible render pass with a given key.
1143     ///
1144     /// This functions doesn't consider the following aspects for compatibility:
1145     ///  - image layouts
1146     ///  - resolve attachments
create_compatible_render_pass( &self, key: &RenderPassKey, ) -> Result<B::RenderPass, hal::device::OutOfMemory>1147     fn create_compatible_render_pass(
1148         &self,
1149         key: &RenderPassKey,
1150     ) -> Result<B::RenderPass, hal::device::OutOfMemory> {
1151         let mut color_ids = [(0, hal::image::Layout::ColorAttachmentOptimal); MAX_COLOR_TARGETS];
1152         for (index, color) in color_ids[..key.colors.len()].iter_mut().enumerate() {
1153             color.0 = index;
1154         }
1155         let depth_id = key.depth_stencil.as_ref().map(|_| {
1156             (
1157                 key.colors.len(),
1158                 hal::image::Layout::DepthStencilAttachmentOptimal,
1159             )
1160         });
1161 
1162         let subpass = hal::pass::SubpassDesc {
1163             colors: &color_ids[..key.colors.len()],
1164             depth_stencil: depth_id.as_ref(),
1165             inputs: &[],
1166             resolves: &[],
1167             preserves: &[],
1168         };
1169         let all = key.all().map(|&(ref at, _)| at.clone());
1170 
1171         unsafe {
1172             self.raw
1173                 .create_render_pass(all, iter::once(subpass), iter::empty())
1174         }
1175     }
1176 
deduplicate_bind_group_layout( self_id: id::DeviceId, entry_map: &binding_model::BindEntryMap, guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>, ) -> Option<id::BindGroupLayoutId>1177     fn deduplicate_bind_group_layout(
1178         self_id: id::DeviceId,
1179         entry_map: &binding_model::BindEntryMap,
1180         guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
1181     ) -> Option<id::BindGroupLayoutId> {
1182         guard
1183             .iter(self_id.backend())
1184             .find(|&(_, ref bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
1185             .map(|(id, value)| {
1186                 value.multi_ref_count.inc();
1187                 id
1188             })
1189     }
1190 
get_introspection_bind_group_layouts<'a>( pipeline_layout: &binding_model::PipelineLayout<B>, bgl_guard: &'a Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>, ) -> ArrayVec<[&'a binding_model::BindEntryMap; MAX_BIND_GROUPS]>1191     fn get_introspection_bind_group_layouts<'a>(
1192         pipeline_layout: &binding_model::PipelineLayout<B>,
1193         bgl_guard: &'a Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
1194     ) -> ArrayVec<[&'a binding_model::BindEntryMap; MAX_BIND_GROUPS]> {
1195         pipeline_layout
1196             .bind_group_layout_ids
1197             .iter()
1198             .map(|&id| &bgl_guard[id].entries)
1199             .collect()
1200     }
1201 
create_bind_group_layout( &self, self_id: id::DeviceId, label: Option<&str>, entry_map: binding_model::BindEntryMap, ) -> Result<binding_model::BindGroupLayout<B>, binding_model::CreateBindGroupLayoutError>1202     fn create_bind_group_layout(
1203         &self,
1204         self_id: id::DeviceId,
1205         label: Option<&str>,
1206         entry_map: binding_model::BindEntryMap,
1207     ) -> Result<binding_model::BindGroupLayout<B>, binding_model::CreateBindGroupLayoutError> {
1208         let mut desc_count = descriptor::DescriptorTotalCount::default();
1209         for entry in entry_map.values() {
1210             use wgt::BindingType as Bt;
1211 
1212             let mut required_features = wgt::Features::empty();
1213             let (counter, array_feature, is_writable_storage) = match entry.ty {
1214                 Bt::Buffer {
1215                     ty: wgt::BufferBindingType::Uniform,
1216                     has_dynamic_offset: false,
1217                     min_binding_size: _,
1218                 } => (
1219                     &mut desc_count.uniform_buffer,
1220                     Some(wgt::Features::BUFFER_BINDING_ARRAY),
1221                     false,
1222                 ),
1223                 Bt::Buffer {
1224                     ty: wgt::BufferBindingType::Uniform,
1225                     has_dynamic_offset: true,
1226                     min_binding_size: _,
1227                 } => (
1228                     &mut desc_count.uniform_buffer_dynamic,
1229                     Some(wgt::Features::BUFFER_BINDING_ARRAY),
1230                     false,
1231                 ),
1232                 Bt::Buffer {
1233                     ty: wgt::BufferBindingType::Storage { read_only },
1234                     has_dynamic_offset,
1235                     min_binding_size: _,
1236                 } => (
1237                     if has_dynamic_offset {
1238                         &mut desc_count.storage_buffer_dynamic
1239                     } else {
1240                         &mut desc_count.storage_buffer
1241                     },
1242                     Some(wgt::Features::BUFFER_BINDING_ARRAY),
1243                     !read_only,
1244                 ),
1245                 Bt::Sampler { .. } => (&mut desc_count.sampler, None, false),
1246                 Bt::Texture { .. } => (
1247                     &mut desc_count.sampled_image,
1248                     Some(wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY),
1249                     false,
1250                 ),
1251                 Bt::StorageTexture { access, .. } => (
1252                     &mut desc_count.storage_image,
1253                     None,
1254                     match access {
1255                         wgt::StorageTextureAccess::ReadOnly => false,
1256                         wgt::StorageTextureAccess::WriteOnly => true,
1257                         wgt::StorageTextureAccess::ReadWrite => {
1258                             required_features |=
1259                                 wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
1260                             true
1261                         }
1262                     },
1263                 ),
1264             };
1265 
1266             *counter += match entry.count {
1267                 // Validate the count parameter
1268                 Some(count) => {
1269                     required_features |= array_feature
1270                         .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported)
1271                         .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
1272                             binding: entry.binding,
1273                             error,
1274                         })?;
1275                     count.get()
1276                 }
1277                 None => 1,
1278             };
1279             if is_writable_storage && entry.visibility.contains(wgt::ShaderStage::VERTEX) {
1280                 required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE;
1281             }
1282 
1283             self.require_features(required_features)
1284                 .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures)
1285                 .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
1286                     binding: entry.binding,
1287                     error,
1288                 })?;
1289         }
1290 
1291         let raw_bindings = entry_map
1292             .values()
1293             .map(|entry| hal::pso::DescriptorSetLayoutBinding {
1294                 binding: entry.binding,
1295                 ty: conv::map_binding_type(entry),
1296                 count: entry
1297                     .count
1298                     .map_or(1, |v| v.get() as hal::pso::DescriptorArrayIndex), //TODO: consolidate
1299                 stage_flags: conv::map_shader_stage_flags(entry.visibility),
1300                 immutable_samplers: false, // TODO
1301             });
1302         let raw = unsafe {
1303             let mut raw_layout = self
1304                 .raw
1305                 .create_descriptor_set_layout(raw_bindings, iter::empty())
1306                 .or(Err(DeviceError::OutOfMemory))?;
1307             if let Some(label) = label {
1308                 self.raw
1309                     .set_descriptor_set_layout_name(&mut raw_layout, label);
1310             }
1311             raw_layout
1312         };
1313 
1314         let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
1315         for entry in entry_map.values() {
1316             count_validator.add_binding(entry);
1317         }
1318         // If a single bind group layout violates limits, the pipeline layout is definitely
1319         // going to violate limits too, lets catch it now.
1320         count_validator
1321             .validate(&self.limits)
1322             .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
1323 
1324         Ok(binding_model::BindGroupLayout {
1325             raw,
1326             device_id: Stored {
1327                 value: id::Valid(self_id),
1328                 ref_count: self.life_guard.add_ref(),
1329             },
1330             multi_ref_count: MultiRefCount::new(),
1331             desc_count,
1332             dynamic_count: entry_map
1333                 .values()
1334                 .filter(|b| b.ty.has_dynamic_offset())
1335                 .count(),
1336             count_validator,
1337             entries: entry_map,
1338             #[cfg(debug_assertions)]
1339             label: label.unwrap_or("").to_string(),
1340         })
1341     }
1342 
1343     #[allow(clippy::too_many_arguments)]
create_buffer_descriptor<'a>( bb: &binding_model::BufferBinding, binding: u32, decl: &wgt::BindGroupLayoutEntry, used_buffer_ranges: &mut Vec<MemoryInitTrackerAction<id::BufferId>>, dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>, used: &mut TrackerSet, storage: &'a Storage<resource::Buffer<B>, id::BufferId>, limits: &wgt::Limits, ) -> Result<hal::pso::Descriptor<'a, B>, binding_model::CreateBindGroupError>1344     fn create_buffer_descriptor<'a>(
1345         bb: &binding_model::BufferBinding,
1346         binding: u32,
1347         decl: &wgt::BindGroupLayoutEntry,
1348         used_buffer_ranges: &mut Vec<MemoryInitTrackerAction<id::BufferId>>,
1349         dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
1350         used: &mut TrackerSet,
1351         storage: &'a Storage<resource::Buffer<B>, id::BufferId>,
1352         limits: &wgt::Limits,
1353     ) -> Result<hal::pso::Descriptor<'a, B>, binding_model::CreateBindGroupError> {
1354         use crate::binding_model::CreateBindGroupError as Error;
1355 
1356         let (binding_ty, dynamic, min_size) = match decl.ty {
1357             wgt::BindingType::Buffer {
1358                 ty,
1359                 has_dynamic_offset,
1360                 min_binding_size,
1361             } => (ty, has_dynamic_offset, min_binding_size),
1362             _ => {
1363                 return Err(Error::WrongBindingType {
1364                     binding,
1365                     actual: decl.ty,
1366                     expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
1367                 })
1368             }
1369         };
1370         let (pub_usage, internal_use, range_limit) = match binding_ty {
1371             wgt::BufferBindingType::Uniform => (
1372                 wgt::BufferUsage::UNIFORM,
1373                 resource::BufferUse::UNIFORM,
1374                 limits.max_uniform_buffer_binding_size,
1375             ),
1376             wgt::BufferBindingType::Storage { read_only } => (
1377                 wgt::BufferUsage::STORAGE,
1378                 if read_only {
1379                     resource::BufferUse::STORAGE_LOAD
1380                 } else {
1381                     resource::BufferUse::STORAGE_STORE
1382                 },
1383                 limits.max_storage_buffer_binding_size,
1384             ),
1385         };
1386 
1387         if bb.offset % wgt::BIND_BUFFER_ALIGNMENT != 0 {
1388             return Err(Error::UnalignedBufferOffset(bb.offset));
1389         }
1390 
1391         let buffer = used
1392             .buffers
1393             .use_extend(storage, bb.buffer_id, (), internal_use)
1394             .map_err(|_| Error::InvalidBuffer(bb.buffer_id))?;
1395         check_buffer_usage(buffer.usage, pub_usage)?;
1396         let &(ref buffer_raw, _) = buffer
1397             .raw
1398             .as_ref()
1399             .ok_or(Error::InvalidBuffer(bb.buffer_id))?;
1400 
1401         let (bind_size, bind_end) = match bb.size {
1402             Some(size) => {
1403                 let end = bb.offset + size.get();
1404                 if end > buffer.size {
1405                     return Err(Error::BindingRangeTooLarge {
1406                         buffer: bb.buffer_id,
1407                         range: bb.offset..end,
1408                         size: buffer.size,
1409                     });
1410                 }
1411                 (size.get(), end)
1412             }
1413             None => (buffer.size - bb.offset, buffer.size),
1414         };
1415 
1416         if bind_size > range_limit as u64 {
1417             return Err(Error::BufferRangeTooLarge {
1418                 binding,
1419                 given: bind_size as u32,
1420                 limit: range_limit,
1421             });
1422         }
1423 
1424         // Record binding info for validating dynamic offsets
1425         if dynamic {
1426             dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
1427                 maximum_dynamic_offset: buffer.size - bind_end,
1428             });
1429         }
1430 
1431         if let Some(non_zero) = min_size {
1432             let min_size = non_zero.get();
1433             if min_size > bind_size {
1434                 return Err(Error::BindingSizeTooSmall {
1435                     buffer: bb.buffer_id,
1436                     actual: bind_size,
1437                     min: min_size,
1438                 });
1439             }
1440         } else if bind_size == 0 {
1441             return Err(Error::BindingZeroSize(bb.buffer_id));
1442         }
1443 
1444         used_buffer_ranges.push(MemoryInitTrackerAction {
1445             id: bb.buffer_id,
1446             range: bb.offset..(bb.offset + bind_size),
1447             kind: MemoryInitKind::NeedsInitializedMemory,
1448         });
1449 
1450         let sub_range = hal::buffer::SubRange {
1451             offset: bb.offset,
1452             size: Some(bind_size),
1453         };
1454         Ok(hal::pso::Descriptor::Buffer(buffer_raw, sub_range))
1455     }
1456 
create_bind_group<G: GlobalIdentityHandlerFactory>( &self, self_id: id::DeviceId, layout: &binding_model::BindGroupLayout<B>, desc: &binding_model::BindGroupDescriptor, hub: &Hub<B, G>, token: &mut Token<binding_model::BindGroupLayout<B>>, ) -> Result<binding_model::BindGroup<B>, binding_model::CreateBindGroupError>1457     fn create_bind_group<G: GlobalIdentityHandlerFactory>(
1458         &self,
1459         self_id: id::DeviceId,
1460         layout: &binding_model::BindGroupLayout<B>,
1461         desc: &binding_model::BindGroupDescriptor,
1462         hub: &Hub<B, G>,
1463         token: &mut Token<binding_model::BindGroupLayout<B>>,
1464     ) -> Result<binding_model::BindGroup<B>, binding_model::CreateBindGroupError> {
1465         use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
1466         {
1467             // Check that the number of entries in the descriptor matches
1468             // the number of entries in the layout.
1469             let actual = desc.entries.len();
1470             let expected = layout.entries.len();
1471             if actual != expected {
1472                 return Err(Error::BindingsNumMismatch { expected, actual });
1473             }
1474         }
1475 
1476         // TODO: arrayvec/smallvec
1477         // Record binding info for dynamic offset validation
1478         let mut dynamic_binding_info = Vec::new();
1479         // fill out the descriptors
1480         let mut used = TrackerSet::new(B::VARIANT);
1481 
1482         let (buffer_guard, mut token) = hub.buffers.read(token);
1483         let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
1484         let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
1485         let (sampler_guard, _) = hub.samplers.read(&mut token);
1486 
1487         // `BTreeMap` has ordered bindings as keys, which allows us to coalesce
1488         // the descriptor writes into a single transaction.
1489         let mut write_map = BTreeMap::new();
1490         let mut used_buffer_ranges = Vec::new();
1491         for entry in desc.entries.iter() {
1492             let binding = entry.binding;
1493             // Find the corresponding declaration in the layout
1494             let decl = layout
1495                 .entries
1496                 .get(&binding)
1497                 .ok_or(Error::MissingBindingDeclaration(binding))?;
1498             let descriptors: SmallVec<[_; 1]> = match entry.resource {
1499                 Br::Buffer(ref bb) => {
1500                     let buffer_desc = Self::create_buffer_descriptor(
1501                         &bb,
1502                         binding,
1503                         &decl,
1504                         &mut used_buffer_ranges,
1505                         &mut dynamic_binding_info,
1506                         &mut used,
1507                         &*buffer_guard,
1508                         &self.limits,
1509                     )?;
1510                     SmallVec::from([buffer_desc])
1511                 }
1512                 Br::BufferArray(ref bindings_array) => {
1513                     if let Some(count) = decl.count {
1514                         let count = count.get() as usize;
1515                         let num_bindings = bindings_array.len();
1516                         if count != num_bindings {
1517                             return Err(Error::BindingArrayLengthMismatch {
1518                                 actual: num_bindings,
1519                                 expected: count,
1520                             });
1521                         }
1522                     } else {
1523                         return Err(Error::SingleBindingExpected);
1524                     }
1525 
1526                     bindings_array
1527                         .iter()
1528                         .map(|bb| {
1529                             Self::create_buffer_descriptor(
1530                                 &bb,
1531                                 binding,
1532                                 &decl,
1533                                 &mut used_buffer_ranges,
1534                                 &mut dynamic_binding_info,
1535                                 &mut used,
1536                                 &*buffer_guard,
1537                                 &self.limits,
1538                             )
1539                         })
1540                         .collect::<Result<_, _>>()?
1541                 }
1542                 Br::Sampler(id) => {
1543                     match decl.ty {
1544                         wgt::BindingType::Sampler {
1545                             filtering,
1546                             comparison,
1547                         } => {
1548                             let sampler = used
1549                                 .samplers
1550                                 .use_extend(&*sampler_guard, id, (), ())
1551                                 .map_err(|_| Error::InvalidSampler(id))?;
1552 
1553                             // Check the actual sampler to also (not) be a comparison sampler
1554                             if sampler.comparison != comparison {
1555                                 return Err(Error::WrongSamplerComparison {
1556                                     binding,
1557                                     layout_cmp: comparison,
1558                                     sampler_cmp: sampler.comparison,
1559                                 });
1560                             }
1561                             // Check the actual sampler to be non-filtering, if required
1562                             if sampler.filtering && !filtering {
1563                                 return Err(Error::WrongSamplerFiltering {
1564                                     binding,
1565                                     layout_flt: filtering,
1566                                     sampler_flt: sampler.filtering,
1567                                 });
1568                             }
1569 
1570                             SmallVec::from([hal::pso::Descriptor::Sampler(&sampler.raw)])
1571                         }
1572                         _ => {
1573                             return Err(Error::WrongBindingType {
1574                                 binding,
1575                                 actual: decl.ty,
1576                                 expected: "Sampler",
1577                             })
1578                         }
1579                     }
1580                 }
1581                 Br::TextureView(id) => {
1582                     let view = used
1583                         .views
1584                         .use_extend(&*texture_view_guard, id, (), ())
1585                         .map_err(|_| Error::InvalidTextureView(id))?;
1586                     let format_info = view.format.describe();
1587                     let (pub_usage, internal_use) = match decl.ty {
1588                         wgt::BindingType::Texture {
1589                             sample_type,
1590                             view_dimension,
1591                             multisampled,
1592                         } => {
1593                             use wgt::TextureSampleType as Tst;
1594                             if multisampled != (view.samples != 1) {
1595                                 return Err(Error::InvalidTextureMultisample {
1596                                     binding,
1597                                     layout_multisampled: multisampled,
1598                                     view_samples: view.samples as u32,
1599                                 });
1600                             }
1601                             match (sample_type, format_info.sample_type, view.format_features.filterable ) {
1602                                 (Tst::Uint, Tst::Uint, ..) |
1603                                 (Tst::Sint, Tst::Sint, ..) |
1604                                 (Tst::Depth, Tst::Depth, ..) |
1605                                 // if we expect non-filterable, accept anything float
1606                                 (Tst::Float { filterable: false }, Tst::Float { .. }, ..) |
1607                                 // if we expect filterable, require it
1608                                 (Tst::Float { filterable: true }, Tst::Float { filterable: true }, ..) |
1609                                 // if we expect filterable, also accept Float that is defined as unfilterable if filterable feature is explicitly enabled
1610                                 // (only hit if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is enabled)
1611                                 (Tst::Float { filterable: true }, Tst::Float { .. }, true) |
1612                                 // if we expect float, also accept depth
1613                                 (Tst::Float { .. }, Tst::Depth, ..) => {}
1614                                 _ => {
1615                                     return Err(Error::InvalidTextureSampleType {
1616                                     binding,
1617                                     layout_sample_type: sample_type,
1618                                     view_format: view.format,
1619                                 })
1620                             },
1621                             }
1622                             if view_dimension != view.dimension {
1623                                 return Err(Error::InvalidTextureDimension {
1624                                     binding,
1625                                     layout_dimension: view_dimension,
1626                                     view_dimension: view.dimension,
1627                                 });
1628                             }
1629                             (wgt::TextureUsage::SAMPLED, view.sampled_internal_use)
1630                         }
1631                         wgt::BindingType::StorageTexture {
1632                             access,
1633                             format,
1634                             view_dimension,
1635                         } => {
1636                             if format != view.format {
1637                                 return Err(Error::InvalidStorageTextureFormat {
1638                                     binding,
1639                                     layout_format: format,
1640                                     view_format: view.format,
1641                                 });
1642                             }
1643                             if view_dimension != view.dimension {
1644                                 return Err(Error::InvalidTextureDimension {
1645                                     binding,
1646                                     layout_dimension: view_dimension,
1647                                     view_dimension: view.dimension,
1648                                 });
1649                             }
1650                             let internal_use = match access {
1651                                 wgt::StorageTextureAccess::ReadOnly => {
1652                                     resource::TextureUse::STORAGE_LOAD
1653                                 }
1654                                 wgt::StorageTextureAccess::WriteOnly => {
1655                                     resource::TextureUse::STORAGE_STORE
1656                                 }
1657                                 wgt::StorageTextureAccess::ReadWrite => {
1658                                     if !view.format_features.flags.contains(
1659                                         wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE,
1660                                     ) {
1661                                         return Err(Error::StorageReadWriteNotSupported(
1662                                             view.format,
1663                                         ));
1664                                     }
1665 
1666                                     resource::TextureUse::STORAGE_STORE
1667                                         | resource::TextureUse::STORAGE_LOAD
1668                                 }
1669                             };
1670                             (wgt::TextureUsage::STORAGE, internal_use)
1671                         }
1672                         _ => return Err(Error::WrongBindingType {
1673                             binding,
1674                             actual: decl.ty,
1675                             expected:
1676                                 "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
1677                         }),
1678                     };
1679                     if view
1680                         .aspects
1681                         .contains(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL)
1682                     {
1683                         return Err(Error::DepthStencilAspect);
1684                     }
1685                     match view.inner {
1686                         resource::TextureViewInner::Native {
1687                             ref raw,
1688                             ref source_id,
1689                         } => {
1690                             // Careful here: the texture may no longer have its own ref count,
1691                             // if it was deleted by the user.
1692                             let texture = &texture_guard[source_id.value];
1693                             used.textures
1694                                 .change_extend(
1695                                     source_id.value,
1696                                     &source_id.ref_count,
1697                                     view.selector.clone(),
1698                                     internal_use,
1699                                 )
1700                                 .map_err(UsageConflict::from)?;
1701                             check_texture_usage(texture.usage, pub_usage)?;
1702                             let image_layout =
1703                                 conv::map_texture_state(internal_use, view.aspects).1;
1704                             SmallVec::from([hal::pso::Descriptor::Image(raw, image_layout)])
1705                         }
1706                         resource::TextureViewInner::SwapChain { .. } => {
1707                             return Err(Error::SwapChainImage);
1708                         }
1709                     }
1710                 }
1711                 Br::TextureViewArray(ref bindings_array) => {
1712                     if let Some(count) = decl.count {
1713                         let count = count.get() as usize;
1714                         let num_bindings = bindings_array.len();
1715                         if count != num_bindings {
1716                             return Err(Error::BindingArrayLengthMismatch {
1717                                 actual: num_bindings,
1718                                 expected: count,
1719                             });
1720                         }
1721                     } else {
1722                         return Err(Error::SingleBindingExpected);
1723                     }
1724 
1725                     bindings_array
1726                         .iter()
1727                         .map(|&id| {
1728                             let view = used
1729                                 .views
1730                                 .use_extend(&*texture_view_guard, id, (), ())
1731                                 .map_err(|_| Error::InvalidTextureView(id))?;
1732                             let (pub_usage, internal_use) = match decl.ty {
1733                                 wgt::BindingType::Texture { .. } => {
1734                                     (wgt::TextureUsage::SAMPLED, view.sampled_internal_use)
1735                                 }
1736                                 _ => {
1737                                     return Err(Error::WrongBindingType {
1738                                         binding,
1739                                         actual: decl.ty,
1740                                         expected: "SampledTextureArray",
1741                                     })
1742                                 }
1743                             };
1744                             match view.inner {
1745                                 resource::TextureViewInner::Native {
1746                                     ref raw,
1747                                     ref source_id,
1748                                 } => {
1749                                     // Careful here: the texture may no longer have its own ref count,
1750                                     // if it was deleted by the user.
1751                                     let texture = &texture_guard[source_id.value];
1752                                     used.textures
1753                                         .change_extend(
1754                                             source_id.value,
1755                                             &source_id.ref_count,
1756                                             view.selector.clone(),
1757                                             internal_use,
1758                                         )
1759                                         .map_err(UsageConflict::from)?;
1760                                     check_texture_usage(texture.usage, pub_usage)?;
1761                                     let image_layout =
1762                                         conv::map_texture_state(internal_use, view.aspects).1;
1763                                     Ok(hal::pso::Descriptor::Image(raw, image_layout))
1764                                 }
1765                                 resource::TextureViewInner::SwapChain { .. } => {
1766                                     Err(Error::SwapChainImage)
1767                                 }
1768                             }
1769                         })
1770                         .collect::<Result<_, _>>()?
1771                 }
1772             };
1773             if write_map.insert(binding, descriptors).is_some() {
1774                 return Err(Error::DuplicateBinding(binding));
1775             }
1776         }
1777 
1778         let mut desc_sets =
1779             self.desc_allocator
1780                 .lock()
1781                 .allocate(&self.raw, &layout.raw, &layout.desc_count, 1)?;
1782         let mut desc_set = desc_sets.pop().unwrap();
1783 
1784         // Set the descriptor set's label for easier debugging.
1785         if let Some(label) = desc.label.as_ref() {
1786             unsafe {
1787                 self.raw.set_descriptor_set_name(desc_set.raw_mut(), &label);
1788             }
1789         }
1790 
1791         if let Some(start_binding) = write_map.keys().next().cloned() {
1792             let descriptors = write_map.into_iter().flat_map(|(_, list)| list);
1793             unsafe {
1794                 let write = hal::pso::DescriptorSetWrite {
1795                     set: desc_set.raw_mut(),
1796                     binding: start_binding,
1797                     array_offset: 0,
1798                     descriptors,
1799                 };
1800                 self.raw.write_descriptor_set(write);
1801             }
1802         }
1803 
1804         Ok(binding_model::BindGroup {
1805             raw: desc_set,
1806             device_id: Stored {
1807                 value: id::Valid(self_id),
1808                 ref_count: self.life_guard.add_ref(),
1809             },
1810             layout_id: id::Valid(desc.layout),
1811             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
1812             used,
1813             used_buffer_ranges,
1814             dynamic_binding_info,
1815         })
1816     }
1817 
create_pipeline_layout( &self, self_id: id::DeviceId, desc: &binding_model::PipelineLayoutDescriptor, bgl_guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>, ) -> Result<binding_model::PipelineLayout<B>, binding_model::CreatePipelineLayoutError>1818     fn create_pipeline_layout(
1819         &self,
1820         self_id: id::DeviceId,
1821         desc: &binding_model::PipelineLayoutDescriptor,
1822         bgl_guard: &Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
1823     ) -> Result<binding_model::PipelineLayout<B>, binding_model::CreatePipelineLayoutError> {
1824         use crate::binding_model::CreatePipelineLayoutError as Error;
1825 
1826         let bind_group_layouts_count = desc.bind_group_layouts.len();
1827         let device_max_bind_groups = self.limits.max_bind_groups as usize;
1828         if bind_group_layouts_count > device_max_bind_groups {
1829             return Err(Error::TooManyGroups {
1830                 actual: bind_group_layouts_count,
1831                 max: device_max_bind_groups,
1832             });
1833         }
1834 
1835         if !desc.push_constant_ranges.is_empty() {
1836             self.require_features(wgt::Features::PUSH_CONSTANTS)?;
1837         }
1838 
1839         let mut used_stages = wgt::ShaderStage::empty();
1840         for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
1841             if pc.stages.intersects(used_stages) {
1842                 return Err(Error::MoreThanOnePushConstantRangePerStage {
1843                     index,
1844                     provided: pc.stages,
1845                     intersected: pc.stages & used_stages,
1846                 });
1847             }
1848             used_stages |= pc.stages;
1849 
1850             let device_max_pc_size = self.limits.max_push_constant_size;
1851             if device_max_pc_size < pc.range.end {
1852                 return Err(Error::PushConstantRangeTooLarge {
1853                     index,
1854                     range: pc.range.clone(),
1855                     max: device_max_pc_size,
1856                 });
1857             }
1858 
1859             if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
1860                 return Err(Error::MisalignedPushConstantRange {
1861                     index,
1862                     bound: pc.range.start,
1863                 });
1864             }
1865             if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
1866                 return Err(Error::MisalignedPushConstantRange {
1867                     index,
1868                     bound: pc.range.end,
1869                 });
1870             }
1871         }
1872 
1873         let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
1874 
1875         // validate total resource counts
1876         for &id in desc.bind_group_layouts.iter() {
1877             let bind_group_layout = bgl_guard
1878                 .get(id)
1879                 .map_err(|_| Error::InvalidBindGroupLayout(id))?;
1880             count_validator.merge(&bind_group_layout.count_validator);
1881         }
1882         count_validator
1883             .validate(&self.limits)
1884             .map_err(Error::TooManyBindings)?;
1885 
1886         let descriptor_set_layouts = desc
1887             .bind_group_layouts
1888             .iter()
1889             .map(|&id| &bgl_guard.get(id).unwrap().raw);
1890         let push_constants = desc
1891             .push_constant_ranges
1892             .iter()
1893             .map(|pc| (conv::map_shader_stage_flags(pc.stages), pc.range.clone()));
1894 
1895         let raw = unsafe {
1896             let raw_layout = self
1897                 .raw
1898                 .create_pipeline_layout(descriptor_set_layouts, push_constants)
1899                 .or(Err(DeviceError::OutOfMemory))?;
1900             if let Some(_) = desc.label {
1901                 //TODO-0.6: needs gfx changes published
1902                 //self.raw.set_pipeline_layout_name(&mut raw_layout, label);
1903             }
1904             raw_layout
1905         };
1906 
1907         Ok(binding_model::PipelineLayout {
1908             raw,
1909             device_id: Stored {
1910                 value: id::Valid(self_id),
1911                 ref_count: self.life_guard.add_ref(),
1912             },
1913             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
1914             bind_group_layout_ids: desc
1915                 .bind_group_layouts
1916                 .iter()
1917                 .map(|&id| {
1918                     bgl_guard.get(id).unwrap().multi_ref_count.inc();
1919                     id::Valid(id)
1920                 })
1921                 .collect(),
1922             push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
1923         })
1924     }
1925 
1926     //TODO: refactor this. It's the only method of `Device` that registers new objects
1927     // (the pipeline layout).
derive_pipeline_layout( &self, self_id: id::DeviceId, implicit_context: Option<ImplicitPipelineContext>, mut derived_group_layouts: ArrayVec<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>, bgl_guard: &mut Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>, pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<B>, id::PipelineLayoutId>, ) -> Result<id::PipelineLayoutId, pipeline::ImplicitLayoutError>1928     fn derive_pipeline_layout(
1929         &self,
1930         self_id: id::DeviceId,
1931         implicit_context: Option<ImplicitPipelineContext>,
1932         mut derived_group_layouts: ArrayVec<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>,
1933         bgl_guard: &mut Storage<binding_model::BindGroupLayout<B>, id::BindGroupLayoutId>,
1934         pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<B>, id::PipelineLayoutId>,
1935     ) -> Result<id::PipelineLayoutId, pipeline::ImplicitLayoutError> {
1936         while derived_group_layouts
1937             .last()
1938             .map_or(false, |map| map.is_empty())
1939         {
1940             derived_group_layouts.pop();
1941         }
1942         let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?;
1943         let group_count = derived_group_layouts.len();
1944         if ids.group_ids.len() < group_count {
1945             log::error!(
1946                 "Not enough bind group IDs ({}) specified for the implicit layout ({})",
1947                 ids.group_ids.len(),
1948                 derived_group_layouts.len()
1949             );
1950             return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _));
1951         }
1952 
1953         for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) {
1954             match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) {
1955                 Some(dedup_id) => {
1956                     *bgl_id = dedup_id;
1957                 }
1958                 None => {
1959                     let bgl = self.create_bind_group_layout(self_id, None, map)?;
1960                     bgl_guard.force_replace(*bgl_id, bgl);
1961                 }
1962             };
1963         }
1964 
1965         let layout_desc = binding_model::PipelineLayoutDescriptor {
1966             label: None,
1967             bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]),
1968             push_constant_ranges: Cow::Borrowed(&[]), //TODO?
1969         };
1970         let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?;
1971         pipeline_layout_guard.force_replace(ids.root_id, layout);
1972         Ok(ids.root_id)
1973     }
1974 
create_compute_pipeline<G: GlobalIdentityHandlerFactory>( &self, self_id: id::DeviceId, desc: &pipeline::ComputePipelineDescriptor, implicit_context: Option<ImplicitPipelineContext>, hub: &Hub<B, G>, token: &mut Token<Self>, ) -> Result<pipeline::ComputePipeline<B>, pipeline::CreateComputePipelineError>1975     fn create_compute_pipeline<G: GlobalIdentityHandlerFactory>(
1976         &self,
1977         self_id: id::DeviceId,
1978         desc: &pipeline::ComputePipelineDescriptor,
1979         implicit_context: Option<ImplicitPipelineContext>,
1980         hub: &Hub<B, G>,
1981         token: &mut Token<Self>,
1982     ) -> Result<pipeline::ComputePipeline<B>, pipeline::CreateComputePipelineError> {
1983         //TODO: only lock mutable if the layout is derived
1984         let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
1985         let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
1986 
1987         // This has to be done first, or otherwise the IDs may be pointing to entries
1988         // that are not even in the storage.
1989         if let Some(ref ids) = implicit_context {
1990             pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
1991             for &bgl_id in ids.group_ids.iter() {
1992                 bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
1993             }
1994         }
1995 
1996         if !self
1997             .downlevel
1998             .flags
1999             .contains(wgt::DownlevelFlags::COMPUTE_SHADERS)
2000         {
2001             return Err(pipeline::CreateComputePipelineError::ComputeShadersUnsupported);
2002         }
2003 
2004         let mut derived_group_layouts =
2005             ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
2006 
2007         let io = validation::StageIo::default();
2008         let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
2009 
2010         let entry_point_name = &desc.stage.entry_point;
2011         let shader_module = shader_module_guard
2012             .get(desc.stage.module)
2013             .map_err(|_| validation::StageError::InvalidModule)?;
2014 
2015         let flag = wgt::ShaderStage::COMPUTE;
2016         if let Some(ref interface) = shader_module.interface {
2017             let provided_layouts = match desc.layout {
2018                 Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
2019                     pipeline_layout_guard
2020                         .get(pipeline_layout_id)
2021                         .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
2022                     &*bgl_guard,
2023                 )),
2024                 None => {
2025                     for _ in 0..self.limits.max_bind_groups {
2026                         derived_group_layouts.push(binding_model::BindEntryMap::default());
2027                     }
2028                     None
2029                 }
2030             };
2031             let _ = interface.check_stage(
2032                 provided_layouts.as_ref().map(|p| p.as_slice()),
2033                 &mut derived_group_layouts,
2034                 &entry_point_name,
2035                 flag,
2036                 io,
2037             )?;
2038         } else if desc.layout.is_none() {
2039             return Err(pipeline::ImplicitLayoutError::ReflectionError(flag).into());
2040         }
2041 
2042         let shader = hal::pso::EntryPoint::<B> {
2043             entry: &entry_point_name, // TODO
2044             module: &shader_module.raw,
2045             specialization: hal::pso::Specialization::EMPTY,
2046         };
2047 
2048         // TODO
2049         let flags = hal::pso::PipelineCreationFlags::empty();
2050         // TODO
2051         let parent = hal::pso::BasePipeline::None;
2052 
2053         let pipeline_layout_id = match desc.layout {
2054             Some(id) => id,
2055             None => self.derive_pipeline_layout(
2056                 self_id,
2057                 implicit_context,
2058                 derived_group_layouts,
2059                 &mut *bgl_guard,
2060                 &mut *pipeline_layout_guard,
2061             )?,
2062         };
2063         let layout = pipeline_layout_guard
2064             .get(pipeline_layout_id)
2065             .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
2066 
2067         let pipeline_desc = hal::pso::ComputePipelineDesc {
2068             label: desc.label.as_ref().map(AsRef::as_ref),
2069             shader,
2070             layout: &layout.raw,
2071             flags,
2072             parent,
2073         };
2074 
2075         let raw =
2076             unsafe { self.raw.create_compute_pipeline(&pipeline_desc, None) }.map_err(|err| {
2077                 match err {
2078                     hal::pso::CreationError::OutOfMemory(_) => {
2079                         pipeline::CreateComputePipelineError::Device(DeviceError::OutOfMemory)
2080                     }
2081                     hal::pso::CreationError::ShaderCreationError(_, error) => {
2082                         pipeline::CreateComputePipelineError::Internal(error)
2083                     }
2084                     _ => {
2085                         log::error!("failed to create compute pipeline: {}", err);
2086                         pipeline::CreateComputePipelineError::Device(DeviceError::OutOfMemory)
2087                     }
2088                 }
2089             })?;
2090 
2091         let pipeline = pipeline::ComputePipeline {
2092             raw,
2093             layout_id: Stored {
2094                 value: id::Valid(pipeline_layout_id),
2095                 ref_count: layout.life_guard.add_ref(),
2096             },
2097             device_id: Stored {
2098                 value: id::Valid(self_id),
2099                 ref_count: self.life_guard.add_ref(),
2100             },
2101             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
2102         };
2103         Ok(pipeline)
2104     }
2105 
create_render_pipeline<G: GlobalIdentityHandlerFactory>( &self, self_id: id::DeviceId, desc: &pipeline::RenderPipelineDescriptor, implicit_context: Option<ImplicitPipelineContext>, hub: &Hub<B, G>, token: &mut Token<Self>, ) -> Result<pipeline::RenderPipeline<B>, pipeline::CreateRenderPipelineError>2106     fn create_render_pipeline<G: GlobalIdentityHandlerFactory>(
2107         &self,
2108         self_id: id::DeviceId,
2109         desc: &pipeline::RenderPipelineDescriptor,
2110         implicit_context: Option<ImplicitPipelineContext>,
2111         hub: &Hub<B, G>,
2112         token: &mut Token<Self>,
2113     ) -> Result<pipeline::RenderPipeline<B>, pipeline::CreateRenderPipelineError> {
2114         //TODO: only lock mutable if the layout is derived
2115         let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
2116         let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
2117 
2118         // This has to be done first, or otherwise the IDs may be pointing to entries
2119         // that are not even in the storage.
2120         if let Some(ref ids) = implicit_context {
2121             pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
2122             for &bgl_id in ids.group_ids.iter() {
2123                 bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
2124             }
2125         }
2126 
2127         let mut derived_group_layouts =
2128             ArrayVec::<[binding_model::BindEntryMap; MAX_BIND_GROUPS]>::new();
2129 
2130         let color_states = desc
2131             .fragment
2132             .as_ref()
2133             .map_or(&[][..], |fragment| &fragment.targets);
2134         let depth_stencil_state = desc.depth_stencil.as_ref();
2135         let rasterizer =
2136             conv::map_primitive_state_to_rasterizer(&desc.primitive, depth_stencil_state);
2137 
2138         let mut io = validation::StageIo::default();
2139         let mut validated_stages = wgt::ShaderStage::empty();
2140 
2141         let desc_vbs = &desc.vertex.buffers;
2142         let mut vertex_strides = Vec::with_capacity(desc_vbs.len());
2143         let mut vertex_buffers = Vec::with_capacity(desc_vbs.len());
2144         let mut attributes = Vec::new();
2145         for (i, vb_state) in desc_vbs.iter().enumerate() {
2146             vertex_strides
2147                 .alloc()
2148                 .init((vb_state.array_stride, vb_state.step_mode));
2149             if vb_state.attributes.is_empty() {
2150                 continue;
2151             }
2152             if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 {
2153                 return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge {
2154                     index: i as u32,
2155                     given: vb_state.array_stride as u32,
2156                     limit: self.limits.max_vertex_buffer_array_stride,
2157                 });
2158             }
2159             if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
2160                 return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
2161                     index: i as u32,
2162                     stride: vb_state.array_stride,
2163                 });
2164             }
2165             vertex_buffers.alloc().init(hal::pso::VertexBufferDesc {
2166                 binding: i as u32,
2167                 stride: vb_state.array_stride as u32,
2168                 rate: match vb_state.step_mode {
2169                     InputStepMode::Vertex => hal::pso::VertexInputRate::Vertex,
2170                     InputStepMode::Instance => hal::pso::VertexInputRate::Instance(1),
2171                 },
2172             });
2173             let desc_atts = &vb_state.attributes;
2174             for attribute in desc_atts.iter() {
2175                 if attribute.offset >= 0x10000000 {
2176                     return Err(
2177                         pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
2178                             location: attribute.shader_location,
2179                             offset: attribute.offset,
2180                         },
2181                     );
2182                 }
2183 
2184                 if let wgt::VertexFormat::Float64
2185                 | wgt::VertexFormat::Float64x2
2186                 | wgt::VertexFormat::Float64x3
2187                 | wgt::VertexFormat::Float64x4 = attribute.format
2188                 {
2189                     self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?;
2190                 }
2191 
2192                 attributes.alloc().init(hal::pso::AttributeDesc {
2193                     location: attribute.shader_location,
2194                     binding: i as u32,
2195                     element: hal::pso::Element {
2196                         format: conv::map_vertex_format(attribute.format),
2197                         offset: attribute.offset as u32,
2198                     },
2199                 });
2200                 io.insert(
2201                     attribute.shader_location,
2202                     validation::InterfaceVar::vertex_attribute(attribute.format),
2203                 );
2204             }
2205         }
2206 
2207         if vertex_buffers.len() > self.limits.max_vertex_buffers as usize {
2208             return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers {
2209                 given: vertex_buffers.len() as u32,
2210                 limit: self.limits.max_vertex_buffers,
2211             });
2212         }
2213         if attributes.len() > self.limits.max_vertex_attributes as usize {
2214             return Err(
2215                 pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
2216                     given: attributes.len() as u32,
2217                     limit: self.limits.max_vertex_attributes,
2218                 },
2219             );
2220         }
2221 
2222         if desc.primitive.strip_index_format.is_some()
2223             && desc.primitive.topology != wgt::PrimitiveTopology::LineStrip
2224             && desc.primitive.topology != wgt::PrimitiveTopology::TriangleStrip
2225         {
2226             return Err(
2227                 pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology {
2228                     strip_index_format: desc.primitive.strip_index_format,
2229                     topology: desc.primitive.topology,
2230                 },
2231             );
2232         }
2233 
2234         if desc.primitive.clamp_depth {
2235             self.require_features(wgt::Features::DEPTH_CLAMPING)?;
2236         }
2237         if desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
2238             self.require_features(wgt::Features::NON_FILL_POLYGON_MODE)?;
2239         }
2240 
2241         if desc.primitive.conservative {
2242             self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?;
2243         }
2244 
2245         if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
2246             return Err(
2247                 pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode,
2248             );
2249         }
2250 
2251         let input_assembler = conv::map_primitive_state_to_input_assembler(&desc.primitive);
2252 
2253         let mut blender = hal::pso::BlendDesc {
2254             logic_op: None,
2255             targets: Vec::with_capacity(color_states.len()),
2256         };
2257         for (i, cs) in color_states.iter().enumerate() {
2258             let bt = conv::map_color_target_state(cs)
2259                 .map_err(|error| pipeline::CreateRenderPipelineError::ColorState(i as u8, error))?;
2260             blender.targets.push(bt);
2261         }
2262 
2263         let depth_stencil = depth_stencil_state
2264             .map(conv::map_depth_stencil_state)
2265             .unwrap_or_default();
2266 
2267         let baked_states = hal::pso::BakedStates {
2268             viewport: None,
2269             scissor: None,
2270             blend_constants: None,
2271             depth_bounds: None,
2272         };
2273 
2274         if desc.layout.is_none() {
2275             for _ in 0..self.limits.max_bind_groups {
2276                 derived_group_layouts.push(binding_model::BindEntryMap::default());
2277             }
2278         }
2279 
2280         let samples = {
2281             let sc = desc.multisample.count;
2282             if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
2283                 return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
2284             }
2285             sc as u8
2286         };
2287         let multisampling = if samples == 1 {
2288             None
2289         } else {
2290             Some(conv::map_multisample_state(&desc.multisample))
2291         };
2292 
2293         let rp_key = RenderPassKey {
2294             colors: color_states
2295                 .iter()
2296                 .map(|state| {
2297                     let at = hal::pass::Attachment {
2298                         format: Some(conv::map_texture_format(
2299                             state.format,
2300                             self.private_features,
2301                         )),
2302                         samples,
2303                         ops: hal::pass::AttachmentOps::PRESERVE,
2304                         stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
2305                         layouts: hal::image::Layout::General..hal::image::Layout::General,
2306                     };
2307                     (at, hal::image::Layout::ColorAttachmentOptimal)
2308                 })
2309                 .collect(),
2310             // We can ignore the resolves as the vulkan specs says:
2311             // As an additional special case, if two render passes have a single subpass,
2312             // they are compatible even if they have different resolve attachment references
2313             // or depth/stencil resolve modes but satisfy the other compatibility conditions.
2314             resolves: ArrayVec::new(),
2315             depth_stencil: depth_stencil_state.map(|state| {
2316                 let at = hal::pass::Attachment {
2317                     format: Some(conv::map_texture_format(
2318                         state.format,
2319                         self.private_features,
2320                     )),
2321                     samples,
2322                     ops: hal::pass::AttachmentOps::PRESERVE,
2323                     stencil_ops: hal::pass::AttachmentOps::PRESERVE,
2324                     layouts: hal::image::Layout::General..hal::image::Layout::General,
2325                 };
2326                 (at, hal::image::Layout::DepthStencilAttachmentOptimal)
2327             }),
2328         };
2329 
2330         let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
2331 
2332         let vertex = {
2333             let stage = &desc.vertex.stage;
2334             let flag = wgt::ShaderStage::VERTEX;
2335 
2336             let shader_module = shader_module_guard.get(stage.module).map_err(|_| {
2337                 pipeline::CreateRenderPipelineError::Stage {
2338                     stage: flag,
2339                     error: validation::StageError::InvalidModule,
2340                 }
2341             })?;
2342 
2343             if let Some(ref interface) = shader_module.interface {
2344                 let provided_layouts = match desc.layout {
2345                     Some(pipeline_layout_id) => {
2346                         let pipeline_layout = pipeline_layout_guard
2347                             .get(pipeline_layout_id)
2348                             .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
2349                         Some(Device::get_introspection_bind_group_layouts(
2350                             pipeline_layout,
2351                             &*bgl_guard,
2352                         ))
2353                     }
2354                     None => None,
2355                 };
2356 
2357                 io = interface
2358                     .check_stage(
2359                         provided_layouts.as_ref().map(|p| p.as_slice()),
2360                         &mut derived_group_layouts,
2361                         &stage.entry_point,
2362                         flag,
2363                         io,
2364                     )
2365                     .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
2366                         stage: flag,
2367                         error,
2368                     })?;
2369                 validated_stages |= flag;
2370             }
2371 
2372             hal::pso::EntryPoint::<B> {
2373                 entry: &stage.entry_point,
2374                 module: &shader_module.raw,
2375                 specialization: hal::pso::Specialization::EMPTY,
2376             }
2377         };
2378 
2379         let fragment = match desc.fragment {
2380             Some(ref fragment) => {
2381                 let entry_point_name = &fragment.stage.entry_point;
2382                 let flag = wgt::ShaderStage::FRAGMENT;
2383 
2384                 let shader_module =
2385                     shader_module_guard
2386                         .get(fragment.stage.module)
2387                         .map_err(|_| pipeline::CreateRenderPipelineError::Stage {
2388                             stage: flag,
2389                             error: validation::StageError::InvalidModule,
2390                         })?;
2391 
2392                 let provided_layouts = match desc.layout {
2393                     Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
2394                         pipeline_layout_guard
2395                             .get(pipeline_layout_id)
2396                             .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
2397                         &*bgl_guard,
2398                     )),
2399                     None => None,
2400                 };
2401 
2402                 if validated_stages == wgt::ShaderStage::VERTEX {
2403                     if let Some(ref interface) = shader_module.interface {
2404                         io = interface
2405                             .check_stage(
2406                                 provided_layouts.as_ref().map(|p| p.as_slice()),
2407                                 &mut derived_group_layouts,
2408                                 &entry_point_name,
2409                                 flag,
2410                                 io,
2411                             )
2412                             .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
2413                                 stage: flag,
2414                                 error,
2415                             })?;
2416                         validated_stages |= flag;
2417                     }
2418                 }
2419 
2420                 Some(hal::pso::EntryPoint::<B> {
2421                     entry: &entry_point_name,
2422                     module: &shader_module.raw,
2423                     specialization: hal::pso::Specialization::EMPTY,
2424                 })
2425             }
2426             None => None,
2427         };
2428 
2429         if validated_stages.contains(wgt::ShaderStage::FRAGMENT) {
2430             for (i, state) in color_states.iter().enumerate() {
2431                 match io.get(&(i as wgt::ShaderLocation)) {
2432                     Some(ref output) => {
2433                         validation::check_texture_format(state.format, &output.ty).map_err(
2434                             |pipeline| {
2435                                 pipeline::CreateRenderPipelineError::ColorState(
2436                                     i as u8,
2437                                     pipeline::ColorStateError::IncompatibleFormat {
2438                                         pipeline,
2439                                         shader: output.ty,
2440                                     },
2441                                 )
2442                             },
2443                         )?;
2444                     }
2445                     None if state.write_mask.is_empty() => {}
2446                     None => {
2447                         log::warn!("Missing fragment output[{}], expected {:?}", i, state,);
2448                         return Err(pipeline::CreateRenderPipelineError::ColorState(
2449                             i as u8,
2450                             pipeline::ColorStateError::Missing,
2451                         ));
2452                     }
2453                 }
2454             }
2455         }
2456         let last_stage = match desc.fragment {
2457             Some(_) => wgt::ShaderStage::FRAGMENT,
2458             None => wgt::ShaderStage::VERTEX,
2459         };
2460         if desc.layout.is_none() && !validated_stages.contains(last_stage) {
2461             return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
2462         }
2463 
2464         let primitive_assembler = hal::pso::PrimitiveAssemblerDesc::Vertex {
2465             buffers: &vertex_buffers,
2466             attributes: &attributes,
2467             input_assembler,
2468             vertex,
2469             tessellation: None,
2470             geometry: None,
2471         };
2472 
2473         // TODO
2474         let flags = hal::pso::PipelineCreationFlags::empty();
2475         // TODO
2476         let parent = hal::pso::BasePipeline::None;
2477 
2478         let pipeline_layout_id = match desc.layout {
2479             Some(id) => id,
2480             None => self.derive_pipeline_layout(
2481                 self_id,
2482                 implicit_context,
2483                 derived_group_layouts,
2484                 &mut *bgl_guard,
2485                 &mut *pipeline_layout_guard,
2486             )?,
2487         };
2488         let layout = pipeline_layout_guard
2489             .get(pipeline_layout_id)
2490             .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
2491 
2492         let mut rp_lock = self.render_passes.lock();
2493         let pipeline_desc = hal::pso::GraphicsPipelineDesc {
2494             label: desc.label.as_ref().map(AsRef::as_ref),
2495             primitive_assembler,
2496             rasterizer,
2497             fragment,
2498             blender,
2499             depth_stencil,
2500             multisampling,
2501             baked_states,
2502             layout: &layout.raw,
2503             subpass: hal::pass::Subpass {
2504                 index: 0,
2505                 main_pass: match rp_lock.render_passes.entry(rp_key) {
2506                     Entry::Occupied(e) => e.into_mut(),
2507                     Entry::Vacant(e) => {
2508                         let pass = self
2509                             .create_compatible_render_pass(e.key())
2510                             .or(Err(DeviceError::OutOfMemory))?;
2511                         e.insert(pass)
2512                     }
2513                 },
2514             },
2515             flags,
2516             parent,
2517         };
2518         // TODO: cache
2519         let raw =
2520             unsafe { self.raw.create_graphics_pipeline(&pipeline_desc, None) }.map_err(|err| {
2521                 match err {
2522                     hal::pso::CreationError::OutOfMemory(_) => {
2523                         pipeline::CreateRenderPipelineError::Device(DeviceError::OutOfMemory)
2524                     }
2525                     hal::pso::CreationError::ShaderCreationError(stage, error) => {
2526                         pipeline::CreateRenderPipelineError::Internal {
2527                             stage: conv::map_hal_flags_to_shader_stage(stage),
2528                             error,
2529                         }
2530                     }
2531                     _ => {
2532                         log::error!("failed to create graphics pipeline: {}", err);
2533                         pipeline::CreateRenderPipelineError::Device(DeviceError::OutOfMemory)
2534                     }
2535                 }
2536             })?;
2537 
2538         let pass_context = RenderPassContext {
2539             attachments: AttachmentData {
2540                 colors: color_states.iter().map(|state| state.format).collect(),
2541                 resolves: ArrayVec::new(),
2542                 depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
2543             },
2544             sample_count: samples,
2545         };
2546 
2547         let mut flags = pipeline::PipelineFlags::empty();
2548         for state in color_states.iter() {
2549             if let Some(ref bs) = state.blend {
2550                 if bs.color.uses_constant() | bs.alpha.uses_constant() {
2551                     flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
2552                 }
2553             }
2554         }
2555         if let Some(ds) = depth_stencil_state.as_ref() {
2556             if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
2557                 flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
2558             }
2559             if !ds.is_read_only() {
2560                 flags |= pipeline::PipelineFlags::WRITES_DEPTH_STENCIL;
2561             }
2562         }
2563 
2564         let pipeline = pipeline::RenderPipeline {
2565             raw,
2566             layout_id: Stored {
2567                 value: id::Valid(pipeline_layout_id),
2568                 ref_count: layout.life_guard.add_ref(),
2569             },
2570             device_id: Stored {
2571                 value: id::Valid(self_id),
2572                 ref_count: self.life_guard.add_ref(),
2573             },
2574             pass_context,
2575             flags,
2576             strip_index_format: desc.primitive.strip_index_format,
2577             vertex_strides,
2578             life_guard: LifeGuard::new(desc.label.borrow_or_default()),
2579         };
2580         Ok(pipeline)
2581     }
2582 
wait_for_submit( &self, submission_index: SubmissionIndex, token: &mut Token<Self>, ) -> Result<(), WaitIdleError>2583     fn wait_for_submit(
2584         &self,
2585         submission_index: SubmissionIndex,
2586         token: &mut Token<Self>,
2587     ) -> Result<(), WaitIdleError> {
2588         if self.last_completed_submission_index() <= submission_index {
2589             log::info!("Waiting for submission {:?}", submission_index);
2590             self.lock_life(token)
2591                 .triage_submissions(&self.raw, true)
2592                 .map(|_| ())
2593         } else {
2594             Ok(())
2595         }
2596     }
2597 
create_query_set( &self, self_id: id::DeviceId, desc: &wgt::QuerySetDescriptor, ) -> Result<resource::QuerySet<B>, resource::CreateQuerySetError>2598     fn create_query_set(
2599         &self,
2600         self_id: id::DeviceId,
2601         desc: &wgt::QuerySetDescriptor,
2602     ) -> Result<resource::QuerySet<B>, resource::CreateQuerySetError> {
2603         use resource::CreateQuerySetError as Error;
2604 
2605         match desc.ty {
2606             wgt::QueryType::Timestamp => {
2607                 self.require_features(wgt::Features::TIMESTAMP_QUERY)?;
2608             }
2609             wgt::QueryType::PipelineStatistics(..) => {
2610                 self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?;
2611             }
2612         }
2613 
2614         if desc.count == 0 {
2615             return Err(Error::ZeroCount);
2616         }
2617 
2618         if desc.count >= wgt::QUERY_SET_MAX_QUERIES {
2619             return Err(Error::TooManyQueries {
2620                 count: desc.count,
2621                 maximum: wgt::QUERY_SET_MAX_QUERIES,
2622             });
2623         }
2624 
2625         let (hal_type, elements) = conv::map_query_type(&desc.ty);
2626 
2627         Ok(resource::QuerySet {
2628             raw: unsafe { self.raw.create_query_pool(hal_type, desc.count).unwrap() },
2629             device_id: Stored {
2630                 value: id::Valid(self_id),
2631                 ref_count: self.life_guard.add_ref(),
2632             },
2633             life_guard: LifeGuard::new(""),
2634             desc: desc.clone(),
2635             elements,
2636         })
2637     }
2638 }
2639 
2640 impl<B: hal::Backend> Device<B> {
destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>)2641     pub(crate) fn destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>) {
2642         self.desc_allocator
2643             .lock()
2644             .free(&self.raw, iter::once(bind_group.raw));
2645     }
2646 
destroy_buffer(&self, buffer: resource::Buffer<B>)2647     pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<B>) {
2648         if let Some((raw, memory)) = buffer.raw {
2649             unsafe {
2650                 self.mem_allocator.lock().free(&self.raw, memory);
2651                 self.raw.destroy_buffer(raw);
2652             }
2653         }
2654     }
2655 
destroy_texture(&self, texture: resource::Texture<B>)2656     pub(crate) fn destroy_texture(&self, texture: resource::Texture<B>) {
2657         if let Some((raw, memory)) = texture.raw {
2658             unsafe {
2659                 self.mem_allocator.lock().free(&self.raw, memory);
2660                 self.raw.destroy_image(raw);
2661             }
2662         }
2663     }
2664 
2665     /// Wait for idle and remove resources that we can, before we die.
prepare_to_die(&mut self)2666     pub(crate) fn prepare_to_die(&mut self) {
2667         let mut life_tracker = self.life_tracker.lock();
2668         if let Err(error) = life_tracker.triage_submissions(&self.raw, true) {
2669             log::error!("failed to triage submissions: {}", error);
2670         }
2671         life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
2672     }
2673 
dispose(self)2674     pub(crate) fn dispose(self) {
2675         let mut desc_alloc = self.desc_allocator.into_inner();
2676         let mut mem_alloc = self.mem_allocator.into_inner();
2677         self.pending_writes
2678             .dispose(&self.raw, &self.cmd_allocator, &mut mem_alloc);
2679         self.cmd_allocator.destroy(&self.raw);
2680         unsafe {
2681             desc_alloc.cleanup(&self.raw);
2682             mem_alloc.clear(&self.raw);
2683             let rps = self.render_passes.into_inner();
2684             for (_, rp) in rps.render_passes {
2685                 self.raw.destroy_render_pass(rp);
2686             }
2687             for (_, fbo) in rps.framebuffers {
2688                 self.raw.destroy_framebuffer(fbo);
2689             }
2690         }
2691     }
2692 }
2693 
2694 impl<B: hal::Backend> crate::hub::Resource for Device<B> {
2695     const TYPE: &'static str = "Device";
2696 
life_guard(&self) -> &LifeGuard2697     fn life_guard(&self) -> &LifeGuard {
2698         &self.life_guard
2699     }
2700 }
2701 
2702 #[derive(Clone, Debug, Error)]
2703 #[error("device is invalid")]
2704 pub struct InvalidDevice;
2705 
2706 #[derive(Clone, Debug, Error)]
2707 pub enum DeviceError {
2708     #[error("parent device is invalid")]
2709     Invalid,
2710     #[error("parent device is lost")]
2711     Lost,
2712     #[error("not enough memory left")]
2713     OutOfMemory,
2714 }
2715 
2716 impl From<hal::device::WaitError> for DeviceError {
from(err: hal::device::WaitError) -> Self2717     fn from(err: hal::device::WaitError) -> Self {
2718         match err {
2719             hal::device::WaitError::OutOfMemory(_) => Self::OutOfMemory,
2720             hal::device::WaitError::DeviceLost(_) => Self::Lost,
2721         }
2722     }
2723 }
2724 
2725 impl From<gpu_alloc::MapError> for DeviceError {
from(err: gpu_alloc::MapError) -> Self2726     fn from(err: gpu_alloc::MapError) -> Self {
2727         match err {
2728             gpu_alloc::MapError::OutOfDeviceMemory | gpu_alloc::MapError::OutOfHostMemory => {
2729                 DeviceError::OutOfMemory
2730             }
2731             _ => panic!("failed to map buffer: {}", err),
2732         }
2733     }
2734 }
2735 
2736 impl DeviceError {
from_bind(err: hal::device::BindError) -> Self2737     fn from_bind(err: hal::device::BindError) -> Self {
2738         match err {
2739             hal::device::BindError::OutOfMemory(_) => Self::OutOfMemory,
2740             _ => panic!("failed to bind memory: {}", err),
2741         }
2742     }
2743 }
2744 
2745 #[derive(Clone, Debug, Error)]
2746 #[error("Features {0:?} are required but not enabled on the device")]
2747 pub struct MissingFeatures(pub wgt::Features);
2748 
2749 #[derive(Clone, Debug)]
2750 #[cfg_attr(feature = "trace", derive(serde::Serialize))]
2751 #[cfg_attr(feature = "replay", derive(serde::Deserialize))]
2752 pub struct ImplicitPipelineContext {
2753     pub root_id: id::PipelineLayoutId,
2754     pub group_ids: ArrayVec<[id::BindGroupLayoutId; MAX_BIND_GROUPS]>,
2755 }
2756 
2757 pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
2758     pub root_id: Input<G, id::PipelineLayoutId>,
2759     pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
2760 }
2761 
2762 impl<G: GlobalIdentityHandlerFactory> ImplicitPipelineIds<'_, G> {
prepare<B: hal::Backend>(self, hub: &Hub<B, G>) -> ImplicitPipelineContext2763     fn prepare<B: hal::Backend>(self, hub: &Hub<B, G>) -> ImplicitPipelineContext {
2764         ImplicitPipelineContext {
2765             root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
2766             group_ids: self
2767                 .group_ids
2768                 .iter()
2769                 .map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id())
2770                 .collect(),
2771         }
2772     }
2773 }
2774 
2775 impl<G: GlobalIdentityHandlerFactory> Global<G> {
adapter_get_swap_chain_preferred_format<B: GfxBackend>( &self, adapter_id: id::AdapterId, surface_id: id::SurfaceId, ) -> Result<TextureFormat, instance::GetSwapChainPreferredFormatError>2776     pub fn adapter_get_swap_chain_preferred_format<B: GfxBackend>(
2777         &self,
2778         adapter_id: id::AdapterId,
2779         surface_id: id::SurfaceId,
2780     ) -> Result<TextureFormat, instance::GetSwapChainPreferredFormatError> {
2781         let hub = B::hub(self);
2782         let mut token = Token::root();
2783 
2784         let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
2785         let (adapter_guard, mut _token) = hub.adapters.read(&mut token);
2786         let adapter = adapter_guard
2787             .get(adapter_id)
2788             .map_err(|_| instance::GetSwapChainPreferredFormatError::InvalidAdapter)?;
2789         let surface = surface_guard
2790             .get_mut(surface_id)
2791             .map_err(|_| instance::GetSwapChainPreferredFormatError::InvalidSurface)?;
2792 
2793         adapter.get_swap_chain_preferred_format(surface)
2794     }
2795 
device_features<B: GfxBackend>( &self, device_id: id::DeviceId, ) -> Result<wgt::Features, InvalidDevice>2796     pub fn device_features<B: GfxBackend>(
2797         &self,
2798         device_id: id::DeviceId,
2799     ) -> Result<wgt::Features, InvalidDevice> {
2800         let hub = B::hub(self);
2801         let mut token = Token::root();
2802         let (device_guard, _) = hub.devices.read(&mut token);
2803         let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
2804 
2805         Ok(device.features)
2806     }
2807 
device_limits<B: GfxBackend>( &self, device_id: id::DeviceId, ) -> Result<wgt::Limits, InvalidDevice>2808     pub fn device_limits<B: GfxBackend>(
2809         &self,
2810         device_id: id::DeviceId,
2811     ) -> Result<wgt::Limits, InvalidDevice> {
2812         let hub = B::hub(self);
2813         let mut token = Token::root();
2814         let (device_guard, _) = hub.devices.read(&mut token);
2815         let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
2816 
2817         Ok(device.limits.clone())
2818     }
2819 
device_downlevel_properties<B: GfxBackend>( &self, device_id: id::DeviceId, ) -> Result<wgt::DownlevelProperties, InvalidDevice>2820     pub fn device_downlevel_properties<B: GfxBackend>(
2821         &self,
2822         device_id: id::DeviceId,
2823     ) -> Result<wgt::DownlevelProperties, InvalidDevice> {
2824         let hub = B::hub(self);
2825         let mut token = Token::root();
2826         let (device_guard, _) = hub.devices.read(&mut token);
2827         let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
2828 
2829         Ok(device.downlevel)
2830     }
2831 
device_create_buffer<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &resource::BufferDescriptor, id_in: Input<G, id::BufferId>, ) -> (id::BufferId, Option<resource::CreateBufferError>)2832     pub fn device_create_buffer<B: GfxBackend>(
2833         &self,
2834         device_id: id::DeviceId,
2835         desc: &resource::BufferDescriptor,
2836         id_in: Input<G, id::BufferId>,
2837     ) -> (id::BufferId, Option<resource::CreateBufferError>) {
2838         profiling::scope!("create_buffer", "Device");
2839 
2840         let hub = B::hub(self);
2841         let mut token = Token::root();
2842         let fid = hub.buffers.prepare(id_in);
2843 
2844         let (device_guard, mut token) = hub.devices.read(&mut token);
2845         let error = loop {
2846             let device = match device_guard.get(device_id) {
2847                 Ok(device) => device,
2848                 Err(_) => break DeviceError::Invalid.into(),
2849             };
2850             #[cfg(feature = "trace")]
2851             if let Some(ref trace) = device.trace {
2852                 let mut desc = desc.clone();
2853                 let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false);
2854                 if mapped_at_creation && !desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
2855                     desc.usage |= wgt::BufferUsage::COPY_DST;
2856                 }
2857                 trace
2858                     .lock()
2859                     .add(trace::Action::CreateBuffer(fid.id(), desc));
2860             }
2861 
2862             let mut buffer = match device.create_buffer(device_id, desc, false) {
2863                 Ok(buffer) => buffer,
2864                 Err(e) => break e,
2865             };
2866             let ref_count = buffer.life_guard.add_ref();
2867 
2868             let buffer_use = if !desc.mapped_at_creation {
2869                 resource::BufferUse::EMPTY
2870             } else if desc.usage.contains(wgt::BufferUsage::MAP_WRITE) {
2871                 // buffer is mappable, so we are just doing that at start
2872                 let map_size = buffer.size;
2873                 let ptr = match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) {
2874                     Ok(ptr) => ptr,
2875                     Err(e) => {
2876                         let (raw, memory) = buffer.raw.unwrap();
2877                         device.lock_life(&mut token).schedule_resource_destruction(
2878                             queue::TempResource::Buffer(raw),
2879                             memory,
2880                             !0,
2881                         );
2882                         break e.into();
2883                     }
2884                 };
2885                 buffer.map_state = resource::BufferMapState::Active {
2886                     ptr,
2887                     sub_range: hal::buffer::SubRange::WHOLE,
2888                     host: HostMap::Write,
2889                 };
2890                 resource::BufferUse::MAP_WRITE
2891             } else {
2892                 // buffer needs staging area for initialization only
2893                 let stage_desc = wgt::BufferDescriptor {
2894                     label: Some(Cow::Borrowed("<init_buffer>")),
2895                     size: desc.size,
2896                     usage: wgt::BufferUsage::MAP_WRITE | wgt::BufferUsage::COPY_SRC,
2897                     mapped_at_creation: false,
2898                 };
2899                 let mut stage = match device.create_buffer(device_id, &stage_desc, true) {
2900                     Ok(stage) => stage,
2901                     Err(e) => {
2902                         let (raw, memory) = buffer.raw.unwrap();
2903                         device.lock_life(&mut token).schedule_resource_destruction(
2904                             queue::TempResource::Buffer(raw),
2905                             memory,
2906                             !0,
2907                         );
2908                         break e;
2909                     }
2910                 };
2911                 let (stage_buffer, mut stage_memory) = stage.raw.unwrap();
2912                 let ptr = match stage_memory.map(&device.raw, 0, stage.size) {
2913                     Ok(ptr) => ptr,
2914                     Err(e) => {
2915                         let (raw, memory) = buffer.raw.unwrap();
2916                         let mut life_lock = device.lock_life(&mut token);
2917                         life_lock.schedule_resource_destruction(
2918                             queue::TempResource::Buffer(raw),
2919                             memory,
2920                             !0,
2921                         );
2922                         life_lock.schedule_resource_destruction(
2923                             queue::TempResource::Buffer(stage_buffer),
2924                             stage_memory,
2925                             !0,
2926                         );
2927                         break e.into();
2928                     }
2929                 };
2930 
2931                 // Zero initialize memory and then mark both staging and buffer as initialized
2932                 // (it's guaranteed that this is the case by the time the buffer is usable)
2933                 unsafe { ptr::write_bytes(ptr.as_ptr(), 0, buffer.size as usize) };
2934                 buffer.initialization_status.clear(0..buffer.size);
2935                 stage.initialization_status.clear(0..buffer.size);
2936 
2937                 buffer.map_state = resource::BufferMapState::Init {
2938                     ptr,
2939                     needs_flush: !stage_memory.is_coherent(),
2940                     stage_buffer,
2941                     stage_memory,
2942                 };
2943                 resource::BufferUse::COPY_DST
2944             };
2945 
2946             let id = fid.assign(buffer, &mut token);
2947             log::info!("Created buffer {:?} with {:?}", id, desc);
2948 
2949             device
2950                 .trackers
2951                 .lock()
2952                 .buffers
2953                 .init(id, ref_count, BufferState::with_usage(buffer_use))
2954                 .unwrap();
2955             return (id.0, None);
2956         };
2957 
2958         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
2959         (id, Some(error))
2960     }
2961 
2962     #[cfg(feature = "replay")]
device_wait_for_buffer<B: GfxBackend>( &self, device_id: id::DeviceId, buffer_id: id::BufferId, ) -> Result<(), WaitIdleError>2963     pub fn device_wait_for_buffer<B: GfxBackend>(
2964         &self,
2965         device_id: id::DeviceId,
2966         buffer_id: id::BufferId,
2967     ) -> Result<(), WaitIdleError> {
2968         let hub = B::hub(self);
2969         let mut token = Token::root();
2970         let (device_guard, mut token) = hub.devices.read(&mut token);
2971         let last_submission = {
2972             let (buffer_guard, _) = hub.buffers.write(&mut token);
2973             match buffer_guard.get(buffer_id) {
2974                 Ok(buffer) => buffer.life_guard.submission_index.load(Ordering::Acquire),
2975                 Err(_) => return Ok(()),
2976             }
2977         };
2978 
2979         device_guard
2980             .get(device_id)
2981             .map_err(|_| DeviceError::Invalid)?
2982             .wait_for_submit(last_submission, &mut token)
2983     }
2984 
device_set_buffer_sub_data<B: GfxBackend>( &self, device_id: id::DeviceId, buffer_id: id::BufferId, offset: BufferAddress, data: &[u8], ) -> Result<(), resource::BufferAccessError>2985     pub fn device_set_buffer_sub_data<B: GfxBackend>(
2986         &self,
2987         device_id: id::DeviceId,
2988         buffer_id: id::BufferId,
2989         offset: BufferAddress,
2990         data: &[u8],
2991     ) -> Result<(), resource::BufferAccessError> {
2992         profiling::scope!("set_buffer_sub_data", "Device");
2993 
2994         let hub = B::hub(self);
2995         let mut token = Token::root();
2996 
2997         let (device_guard, mut token) = hub.devices.read(&mut token);
2998         let (mut buffer_guard, _) = hub.buffers.write(&mut token);
2999         let device = device_guard
3000             .get(device_id)
3001             .map_err(|_| DeviceError::Invalid)?;
3002         let buffer = buffer_guard
3003             .get_mut(buffer_id)
3004             .map_err(|_| resource::BufferAccessError::Invalid)?;
3005         check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_WRITE)?;
3006         //assert!(buffer isn't used by the GPU);
3007 
3008         #[cfg(feature = "trace")]
3009         if let Some(ref trace) = device.trace {
3010             let mut trace = trace.lock();
3011             let data_path = trace.make_binary("bin", data);
3012             trace.add(trace::Action::WriteBuffer {
3013                 id: buffer_id,
3014                 data: data_path,
3015                 range: offset..offset + data.len() as BufferAddress,
3016                 queued: false,
3017             });
3018         }
3019 
3020         buffer
3021             .raw
3022             .as_mut()
3023             .unwrap()
3024             .1
3025             .write_bytes(&device.raw, offset, data)?;
3026 
3027         Ok(())
3028     }
3029 
device_get_buffer_sub_data<B: GfxBackend>( &self, device_id: id::DeviceId, buffer_id: id::BufferId, offset: BufferAddress, data: &mut [u8], ) -> Result<(), resource::BufferAccessError>3030     pub fn device_get_buffer_sub_data<B: GfxBackend>(
3031         &self,
3032         device_id: id::DeviceId,
3033         buffer_id: id::BufferId,
3034         offset: BufferAddress,
3035         data: &mut [u8],
3036     ) -> Result<(), resource::BufferAccessError> {
3037         profiling::scope!("get_buffer_sub_data", "Device");
3038 
3039         let hub = B::hub(self);
3040         let mut token = Token::root();
3041 
3042         let (device_guard, mut token) = hub.devices.read(&mut token);
3043         let (mut buffer_guard, _) = hub.buffers.write(&mut token);
3044         let device = device_guard
3045             .get(device_id)
3046             .map_err(|_| DeviceError::Invalid)?;
3047         let buffer = buffer_guard
3048             .get_mut(buffer_id)
3049             .map_err(|_| resource::BufferAccessError::Invalid)?;
3050         check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
3051         //assert!(buffer isn't used by the GPU);
3052 
3053         buffer
3054             .raw
3055             .as_mut()
3056             .unwrap()
3057             .1
3058             .read_bytes(&device.raw, offset, data)?;
3059 
3060         Ok(())
3061     }
3062 
buffer_label<B: GfxBackend>(&self, id: id::BufferId) -> String3063     pub fn buffer_label<B: GfxBackend>(&self, id: id::BufferId) -> String {
3064         B::hub(self).buffers.label_for_resource(id)
3065     }
3066 
buffer_destroy<B: GfxBackend>( &self, buffer_id: id::BufferId, ) -> Result<(), resource::DestroyError>3067     pub fn buffer_destroy<B: GfxBackend>(
3068         &self,
3069         buffer_id: id::BufferId,
3070     ) -> Result<(), resource::DestroyError> {
3071         profiling::scope!("destroy", "Buffer");
3072 
3073         let hub = B::hub(self);
3074         let mut token = Token::root();
3075 
3076         //TODO: lock pending writes separately, keep the device read-only
3077         let (mut device_guard, mut token) = hub.devices.write(&mut token);
3078 
3079         log::info!("Buffer {:?} is destroyed", buffer_id);
3080         let (mut buffer_guard, _) = hub.buffers.write(&mut token);
3081         let buffer = buffer_guard
3082             .get_mut(buffer_id)
3083             .map_err(|_| resource::DestroyError::Invalid)?;
3084 
3085         let device = &mut device_guard[buffer.device_id.value];
3086 
3087         #[cfg(feature = "trace")]
3088         if let Some(ref trace) = device.trace {
3089             trace.lock().add(trace::Action::FreeBuffer(buffer_id));
3090         }
3091 
3092         let (raw, memory) = buffer
3093             .raw
3094             .take()
3095             .ok_or(resource::DestroyError::AlreadyDestroyed)?;
3096         let temp = queue::TempResource::Buffer(raw);
3097 
3098         if device.pending_writes.dst_buffers.contains(&buffer_id) {
3099             device.pending_writes.temp_resources.push((temp, memory));
3100         } else {
3101             let last_submit_index = buffer.life_guard.submission_index.load(Ordering::Acquire);
3102             drop(buffer_guard);
3103             device.lock_life(&mut token).schedule_resource_destruction(
3104                 temp,
3105                 memory,
3106                 last_submit_index,
3107             );
3108         }
3109 
3110         Ok(())
3111     }
3112 
buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, wait: bool)3113     pub fn buffer_drop<B: GfxBackend>(&self, buffer_id: id::BufferId, wait: bool) {
3114         profiling::scope!("drop", "Buffer");
3115 
3116         let hub = B::hub(self);
3117         let mut token = Token::root();
3118 
3119         log::info!("Buffer {:?} is dropped", buffer_id);
3120         let (ref_count, last_submit_index, device_id) = {
3121             let (mut buffer_guard, _) = hub.buffers.write(&mut token);
3122             match buffer_guard.get_mut(buffer_id) {
3123                 Ok(buffer) => {
3124                     let ref_count = buffer.life_guard.ref_count.take().unwrap();
3125                     let last_submit_index =
3126                         buffer.life_guard.submission_index.load(Ordering::Acquire);
3127                     (ref_count, last_submit_index, buffer.device_id.value)
3128                 }
3129                 Err(InvalidId) => {
3130                     hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard);
3131                     return;
3132                 }
3133             }
3134         };
3135 
3136         let (device_guard, mut token) = hub.devices.read(&mut token);
3137         let device = &device_guard[device_id];
3138         {
3139             let mut life_lock = device.lock_life(&mut token);
3140             if device.pending_writes.dst_buffers.contains(&buffer_id) {
3141                 life_lock.future_suspected_buffers.push(Stored {
3142                     value: id::Valid(buffer_id),
3143                     ref_count,
3144                 });
3145             } else {
3146                 drop(ref_count);
3147                 life_lock
3148                     .suspected_resources
3149                     .buffers
3150                     .push(id::Valid(buffer_id));
3151             }
3152         }
3153 
3154         if wait {
3155             match device.wait_for_submit(last_submit_index, &mut token) {
3156                 Ok(()) => (),
3157                 Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
3158             }
3159         }
3160     }
3161 
device_create_texture<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &resource::TextureDescriptor, id_in: Input<G, id::TextureId>, ) -> (id::TextureId, Option<resource::CreateTextureError>)3162     pub fn device_create_texture<B: GfxBackend>(
3163         &self,
3164         device_id: id::DeviceId,
3165         desc: &resource::TextureDescriptor,
3166         id_in: Input<G, id::TextureId>,
3167     ) -> (id::TextureId, Option<resource::CreateTextureError>) {
3168         profiling::scope!("create_texture", "Device");
3169 
3170         let hub = B::hub(self);
3171         let mut token = Token::root();
3172         let fid = hub.textures.prepare(id_in);
3173 
3174         let (adapter_guard, mut token) = hub.adapters.read(&mut token);
3175         let (device_guard, mut token) = hub.devices.read(&mut token);
3176         let error = loop {
3177             let device = match device_guard.get(device_id) {
3178                 Ok(device) => device,
3179                 Err(_) => break DeviceError::Invalid.into(),
3180             };
3181             #[cfg(feature = "trace")]
3182             if let Some(ref trace) = device.trace {
3183                 trace
3184                     .lock()
3185                     .add(trace::Action::CreateTexture(fid.id(), desc.clone()));
3186             }
3187 
3188             let adapter = &adapter_guard[device.adapter_id.value];
3189             let texture = match device.create_texture(device_id, adapter, desc) {
3190                 Ok(texture) => texture,
3191                 Err(error) => break error,
3192             };
3193             let num_levels = texture.full_range.levels.end;
3194             let num_layers = texture.full_range.layers.end;
3195             let ref_count = texture.life_guard.add_ref();
3196 
3197             let id = fid.assign(texture, &mut token);
3198             log::info!("Created texture {:?} with {:?}", id, desc);
3199 
3200             device
3201                 .trackers
3202                 .lock()
3203                 .textures
3204                 .init(id, ref_count, TextureState::new(num_levels, num_layers))
3205                 .unwrap();
3206             return (id.0, None);
3207         };
3208 
3209         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3210         (id, Some(error))
3211     }
3212 
texture_label<B: GfxBackend>(&self, id: id::TextureId) -> String3213     pub fn texture_label<B: GfxBackend>(&self, id: id::TextureId) -> String {
3214         B::hub(self).textures.label_for_resource(id)
3215     }
3216 
texture_destroy<B: GfxBackend>( &self, texture_id: id::TextureId, ) -> Result<(), resource::DestroyError>3217     pub fn texture_destroy<B: GfxBackend>(
3218         &self,
3219         texture_id: id::TextureId,
3220     ) -> Result<(), resource::DestroyError> {
3221         profiling::scope!("destroy", "Texture");
3222 
3223         let hub = B::hub(self);
3224         let mut token = Token::root();
3225 
3226         //TODO: lock pending writes separately, keep the device read-only
3227         let (mut device_guard, mut token) = hub.devices.write(&mut token);
3228 
3229         log::info!("Buffer {:?} is destroyed", texture_id);
3230         let (mut texture_guard, _) = hub.textures.write(&mut token);
3231         let texture = texture_guard
3232             .get_mut(texture_id)
3233             .map_err(|_| resource::DestroyError::Invalid)?;
3234 
3235         let device = &mut device_guard[texture.device_id.value];
3236 
3237         #[cfg(feature = "trace")]
3238         if let Some(ref trace) = device.trace {
3239             trace.lock().add(trace::Action::FreeTexture(texture_id));
3240         }
3241 
3242         let (raw, memory) = texture
3243             .raw
3244             .take()
3245             .ok_or(resource::DestroyError::AlreadyDestroyed)?;
3246         let temp = queue::TempResource::Image(raw);
3247 
3248         if device.pending_writes.dst_textures.contains(&texture_id) {
3249             device.pending_writes.temp_resources.push((temp, memory));
3250         } else {
3251             let last_submit_index = texture.life_guard.submission_index.load(Ordering::Acquire);
3252             drop(texture_guard);
3253             device.lock_life(&mut token).schedule_resource_destruction(
3254                 temp,
3255                 memory,
3256                 last_submit_index,
3257             );
3258         }
3259 
3260         Ok(())
3261     }
3262 
texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId, wait: bool)3263     pub fn texture_drop<B: GfxBackend>(&self, texture_id: id::TextureId, wait: bool) {
3264         profiling::scope!("drop", "Texture");
3265 
3266         let hub = B::hub(self);
3267         let mut token = Token::root();
3268 
3269         let (ref_count, last_submit_index, device_id) = {
3270             let (mut texture_guard, _) = hub.textures.write(&mut token);
3271             match texture_guard.get_mut(texture_id) {
3272                 Ok(texture) => {
3273                     let ref_count = texture.life_guard.ref_count.take().unwrap();
3274                     let last_submit_index =
3275                         texture.life_guard.submission_index.load(Ordering::Acquire);
3276                     (ref_count, last_submit_index, texture.device_id.value)
3277                 }
3278                 Err(InvalidId) => {
3279                     hub.textures
3280                         .unregister_locked(texture_id, &mut *texture_guard);
3281                     return;
3282                 }
3283             }
3284         };
3285 
3286         let (device_guard, mut token) = hub.devices.read(&mut token);
3287         let device = &device_guard[device_id];
3288         {
3289             let mut life_lock = device.lock_life(&mut token);
3290             if device.pending_writes.dst_textures.contains(&texture_id) {
3291                 life_lock.future_suspected_textures.push(Stored {
3292                     value: id::Valid(texture_id),
3293                     ref_count,
3294                 });
3295             } else {
3296                 drop(ref_count);
3297                 life_lock
3298                     .suspected_resources
3299                     .textures
3300                     .push(id::Valid(texture_id));
3301             }
3302         }
3303 
3304         if wait {
3305             match device.wait_for_submit(last_submit_index, &mut token) {
3306                 Ok(()) => (),
3307                 Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
3308             }
3309         }
3310     }
3311 
texture_create_view<B: GfxBackend>( &self, texture_id: id::TextureId, desc: &resource::TextureViewDescriptor, id_in: Input<G, id::TextureViewId>, ) -> (id::TextureViewId, Option<resource::CreateTextureViewError>)3312     pub fn texture_create_view<B: GfxBackend>(
3313         &self,
3314         texture_id: id::TextureId,
3315         desc: &resource::TextureViewDescriptor,
3316         id_in: Input<G, id::TextureViewId>,
3317     ) -> (id::TextureViewId, Option<resource::CreateTextureViewError>) {
3318         profiling::scope!("create_view", "Texture");
3319 
3320         let hub = B::hub(self);
3321         let mut token = Token::root();
3322         let fid = hub.texture_views.prepare(id_in);
3323 
3324         let (device_guard, mut token) = hub.devices.read(&mut token);
3325         let (texture_guard, mut token) = hub.textures.read(&mut token);
3326         let error = loop {
3327             let texture = match texture_guard.get(texture_id) {
3328                 Ok(texture) => texture,
3329                 Err(_) => break resource::CreateTextureViewError::InvalidTexture,
3330             };
3331             let device = &device_guard[texture.device_id.value];
3332             #[cfg(feature = "trace")]
3333             if let Some(ref trace) = device.trace {
3334                 trace.lock().add(trace::Action::CreateTextureView {
3335                     id: fid.id(),
3336                     parent_id: texture_id,
3337                     desc: desc.clone(),
3338                 });
3339             }
3340 
3341             let view = match device.create_texture_view(texture, texture_id, desc) {
3342                 Ok(view) => view,
3343                 Err(e) => break e,
3344             };
3345             let ref_count = view.life_guard.add_ref();
3346             let id = fid.assign(view, &mut token);
3347 
3348             device
3349                 .trackers
3350                 .lock()
3351                 .views
3352                 .init(id, ref_count, PhantomData)
3353                 .unwrap();
3354             return (id.0, None);
3355         };
3356 
3357         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3358         (id, Some(error))
3359     }
3360 
texture_view_label<B: GfxBackend>(&self, id: id::TextureViewId) -> String3361     pub fn texture_view_label<B: GfxBackend>(&self, id: id::TextureViewId) -> String {
3362         B::hub(self).texture_views.label_for_resource(id)
3363     }
3364 
texture_view_drop<B: GfxBackend>( &self, texture_view_id: id::TextureViewId, wait: bool, ) -> Result<(), resource::TextureViewDestroyError>3365     pub fn texture_view_drop<B: GfxBackend>(
3366         &self,
3367         texture_view_id: id::TextureViewId,
3368         wait: bool,
3369     ) -> Result<(), resource::TextureViewDestroyError> {
3370         profiling::scope!("drop", "TextureView");
3371 
3372         let hub = B::hub(self);
3373         let mut token = Token::root();
3374 
3375         let (last_submit_index, device_id) = {
3376             let (texture_guard, mut token) = hub.textures.read(&mut token);
3377             let (mut texture_view_guard, _) = hub.texture_views.write(&mut token);
3378 
3379             match texture_view_guard.get_mut(texture_view_id) {
3380                 Ok(view) => {
3381                     let _ref_count = view.life_guard.ref_count.take();
3382                     let last_submit_index =
3383                         view.life_guard.submission_index.load(Ordering::Acquire);
3384                     let device_id = match view.inner {
3385                         resource::TextureViewInner::Native { ref source_id, .. } => {
3386                             texture_guard[source_id.value].device_id.value
3387                         }
3388                         resource::TextureViewInner::SwapChain { .. } => {
3389                             return Err(resource::TextureViewDestroyError::SwapChainImage)
3390                         }
3391                     };
3392                     (last_submit_index, device_id)
3393                 }
3394                 Err(InvalidId) => {
3395                     hub.texture_views
3396                         .unregister_locked(texture_view_id, &mut *texture_view_guard);
3397                     return Ok(());
3398                 }
3399             }
3400         };
3401 
3402         let (device_guard, mut token) = hub.devices.read(&mut token);
3403         let device = &device_guard[device_id];
3404         device
3405             .lock_life(&mut token)
3406             .suspected_resources
3407             .texture_views
3408             .push(id::Valid(texture_view_id));
3409 
3410         if wait {
3411             match device.wait_for_submit(last_submit_index, &mut token) {
3412                 Ok(()) => (),
3413                 Err(e) => log::error!(
3414                     "Failed to wait for texture view {:?}: {:?}",
3415                     texture_view_id,
3416                     e
3417                 ),
3418             }
3419         }
3420         Ok(())
3421     }
3422 
device_create_sampler<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &resource::SamplerDescriptor, id_in: Input<G, id::SamplerId>, ) -> (id::SamplerId, Option<resource::CreateSamplerError>)3423     pub fn device_create_sampler<B: GfxBackend>(
3424         &self,
3425         device_id: id::DeviceId,
3426         desc: &resource::SamplerDescriptor,
3427         id_in: Input<G, id::SamplerId>,
3428     ) -> (id::SamplerId, Option<resource::CreateSamplerError>) {
3429         profiling::scope!("create_sampler", "Device");
3430 
3431         let hub = B::hub(self);
3432         let mut token = Token::root();
3433         let fid = hub.samplers.prepare(id_in);
3434 
3435         let (device_guard, mut token) = hub.devices.read(&mut token);
3436         let error = loop {
3437             let device = match device_guard.get(device_id) {
3438                 Ok(device) => device,
3439                 Err(_) => break DeviceError::Invalid.into(),
3440             };
3441             #[cfg(feature = "trace")]
3442             if let Some(ref trace) = device.trace {
3443                 trace
3444                     .lock()
3445                     .add(trace::Action::CreateSampler(fid.id(), desc.clone()));
3446             }
3447 
3448             let sampler = match device.create_sampler(device_id, desc) {
3449                 Ok(sampler) => sampler,
3450                 Err(e) => break e,
3451             };
3452             let ref_count = sampler.life_guard.add_ref();
3453             let id = fid.assign(sampler, &mut token);
3454 
3455             device
3456                 .trackers
3457                 .lock()
3458                 .samplers
3459                 .init(id, ref_count, PhantomData)
3460                 .unwrap();
3461             return (id.0, None);
3462         };
3463 
3464         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3465         (id, Some(error))
3466     }
3467 
sampler_label<B: GfxBackend>(&self, id: id::SamplerId) -> String3468     pub fn sampler_label<B: GfxBackend>(&self, id: id::SamplerId) -> String {
3469         B::hub(self).samplers.label_for_resource(id)
3470     }
3471 
sampler_drop<B: GfxBackend>(&self, sampler_id: id::SamplerId)3472     pub fn sampler_drop<B: GfxBackend>(&self, sampler_id: id::SamplerId) {
3473         profiling::scope!("drop", "Sampler");
3474 
3475         let hub = B::hub(self);
3476         let mut token = Token::root();
3477 
3478         let device_id = {
3479             let (mut sampler_guard, _) = hub.samplers.write(&mut token);
3480             match sampler_guard.get_mut(sampler_id) {
3481                 Ok(sampler) => {
3482                     sampler.life_guard.ref_count.take();
3483                     sampler.device_id.value
3484                 }
3485                 Err(InvalidId) => {
3486                     hub.samplers
3487                         .unregister_locked(sampler_id, &mut *sampler_guard);
3488                     return;
3489                 }
3490             }
3491         };
3492 
3493         let (device_guard, mut token) = hub.devices.read(&mut token);
3494         device_guard[device_id]
3495             .lock_life(&mut token)
3496             .suspected_resources
3497             .samplers
3498             .push(id::Valid(sampler_id));
3499     }
3500 
device_create_bind_group_layout<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &binding_model::BindGroupLayoutDescriptor, id_in: Input<G, id::BindGroupLayoutId>, ) -> ( id::BindGroupLayoutId, Option<binding_model::CreateBindGroupLayoutError>, )3501     pub fn device_create_bind_group_layout<B: GfxBackend>(
3502         &self,
3503         device_id: id::DeviceId,
3504         desc: &binding_model::BindGroupLayoutDescriptor,
3505         id_in: Input<G, id::BindGroupLayoutId>,
3506     ) -> (
3507         id::BindGroupLayoutId,
3508         Option<binding_model::CreateBindGroupLayoutError>,
3509     ) {
3510         profiling::scope!("create_bind_group_layout", "Device");
3511 
3512         let mut token = Token::root();
3513         let hub = B::hub(self);
3514         let fid = hub.bind_group_layouts.prepare(id_in);
3515 
3516         let error = 'outer: loop {
3517             let (device_guard, mut token) = hub.devices.read(&mut token);
3518             let device = match device_guard.get(device_id) {
3519                 Ok(device) => device,
3520                 Err(_) => break DeviceError::Invalid.into(),
3521             };
3522             #[cfg(feature = "trace")]
3523             if let Some(ref trace) = device.trace {
3524                 trace
3525                     .lock()
3526                     .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone()));
3527             }
3528 
3529             let mut entry_map = FastHashMap::default();
3530             for entry in desc.entries.iter() {
3531                 if entry_map.insert(entry.binding, *entry).is_some() {
3532                     break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding(
3533                         entry.binding,
3534                     );
3535                 }
3536             }
3537 
3538             // If there is an equivalent BGL, just bump the refcount and return it.
3539             // This is only applicable for identity filters that are generating new IDs,
3540             // so their inputs are `PhantomData` of size 0.
3541             if mem::size_of::<Input<G, id::BindGroupLayoutId>>() == 0 {
3542                 let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
3543                 if let Some(id) =
3544                     Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard)
3545                 {
3546                     return (id, None);
3547                 }
3548             }
3549 
3550             let layout = match device.create_bind_group_layout(
3551                 device_id,
3552                 desc.label.as_ref().map(|cow| cow.as_ref()),
3553                 entry_map,
3554             ) {
3555                 Ok(layout) => layout,
3556                 Err(e) => break e,
3557             };
3558 
3559             let id = fid.assign(layout, &mut token);
3560             return (id.0, None);
3561         };
3562 
3563         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3564         (id, Some(error))
3565     }
3566 
bind_group_layout_label<B: GfxBackend>(&self, id: id::BindGroupLayoutId) -> String3567     pub fn bind_group_layout_label<B: GfxBackend>(&self, id: id::BindGroupLayoutId) -> String {
3568         B::hub(self).bind_group_layouts.label_for_resource(id)
3569     }
3570 
bind_group_layout_drop<B: GfxBackend>( &self, bind_group_layout_id: id::BindGroupLayoutId, )3571     pub fn bind_group_layout_drop<B: GfxBackend>(
3572         &self,
3573         bind_group_layout_id: id::BindGroupLayoutId,
3574     ) {
3575         profiling::scope!("drop", "BindGroupLayout");
3576 
3577         let hub = B::hub(self);
3578         let mut token = Token::root();
3579         let device_id = {
3580             let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
3581             match bind_group_layout_guard.get_mut(bind_group_layout_id) {
3582                 Ok(layout) => layout.device_id.value,
3583                 Err(InvalidId) => {
3584                     hub.bind_group_layouts
3585                         .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);
3586                     return;
3587                 }
3588             }
3589         };
3590 
3591         let (device_guard, mut token) = hub.devices.read(&mut token);
3592         device_guard[device_id]
3593             .lock_life(&mut token)
3594             .suspected_resources
3595             .bind_group_layouts
3596             .push(id::Valid(bind_group_layout_id));
3597     }
3598 
device_create_pipeline_layout<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &binding_model::PipelineLayoutDescriptor, id_in: Input<G, id::PipelineLayoutId>, ) -> ( id::PipelineLayoutId, Option<binding_model::CreatePipelineLayoutError>, )3599     pub fn device_create_pipeline_layout<B: GfxBackend>(
3600         &self,
3601         device_id: id::DeviceId,
3602         desc: &binding_model::PipelineLayoutDescriptor,
3603         id_in: Input<G, id::PipelineLayoutId>,
3604     ) -> (
3605         id::PipelineLayoutId,
3606         Option<binding_model::CreatePipelineLayoutError>,
3607     ) {
3608         profiling::scope!("create_pipeline_layout", "Device");
3609 
3610         let hub = B::hub(self);
3611         let mut token = Token::root();
3612         let fid = hub.pipeline_layouts.prepare(id_in);
3613 
3614         let (device_guard, mut token) = hub.devices.read(&mut token);
3615         let error = loop {
3616             let device = match device_guard.get(device_id) {
3617                 Ok(device) => device,
3618                 Err(_) => break DeviceError::Invalid.into(),
3619             };
3620             #[cfg(feature = "trace")]
3621             if let Some(ref trace) = device.trace {
3622                 trace
3623                     .lock()
3624                     .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone()));
3625             }
3626 
3627             let layout = {
3628                 let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
3629                 match device.create_pipeline_layout(device_id, desc, &*bgl_guard) {
3630                     Ok(layout) => layout,
3631                     Err(e) => break e,
3632                 }
3633             };
3634 
3635             let id = fid.assign(layout, &mut token);
3636             return (id.0, None);
3637         };
3638 
3639         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3640         (id, Some(error))
3641     }
3642 
pipeline_layout_label<B: GfxBackend>(&self, id: id::PipelineLayoutId) -> String3643     pub fn pipeline_layout_label<B: GfxBackend>(&self, id: id::PipelineLayoutId) -> String {
3644         B::hub(self).pipeline_layouts.label_for_resource(id)
3645     }
3646 
pipeline_layout_drop<B: GfxBackend>(&self, pipeline_layout_id: id::PipelineLayoutId)3647     pub fn pipeline_layout_drop<B: GfxBackend>(&self, pipeline_layout_id: id::PipelineLayoutId) {
3648         profiling::scope!("drop", "PipelineLayout");
3649 
3650         let hub = B::hub(self);
3651         let mut token = Token::root();
3652         let (device_id, ref_count) = {
3653             let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token);
3654             match pipeline_layout_guard.get_mut(pipeline_layout_id) {
3655                 Ok(layout) => (
3656                     layout.device_id.value,
3657                     layout.life_guard.ref_count.take().unwrap(),
3658                 ),
3659                 Err(InvalidId) => {
3660                     hub.pipeline_layouts
3661                         .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard);
3662                     return;
3663                 }
3664             }
3665         };
3666 
3667         let (device_guard, mut token) = hub.devices.read(&mut token);
3668         device_guard[device_id]
3669             .lock_life(&mut token)
3670             .suspected_resources
3671             .pipeline_layouts
3672             .push(Stored {
3673                 value: id::Valid(pipeline_layout_id),
3674                 ref_count,
3675             });
3676     }
3677 
device_create_bind_group<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &binding_model::BindGroupDescriptor, id_in: Input<G, id::BindGroupId>, ) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>)3678     pub fn device_create_bind_group<B: GfxBackend>(
3679         &self,
3680         device_id: id::DeviceId,
3681         desc: &binding_model::BindGroupDescriptor,
3682         id_in: Input<G, id::BindGroupId>,
3683     ) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>) {
3684         profiling::scope!("create_bind_group", "Device");
3685 
3686         let hub = B::hub(self);
3687         let mut token = Token::root();
3688         let fid = hub.bind_groups.prepare(id_in);
3689 
3690         let (device_guard, mut token) = hub.devices.read(&mut token);
3691         let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
3692 
3693         let error = loop {
3694             let device = match device_guard.get(device_id) {
3695                 Ok(device) => device,
3696                 Err(_) => break DeviceError::Invalid.into(),
3697             };
3698             #[cfg(feature = "trace")]
3699             if let Some(ref trace) = device.trace {
3700                 trace
3701                     .lock()
3702                     .add(trace::Action::CreateBindGroup(fid.id(), desc.clone()));
3703             }
3704 
3705             let bind_group_layout = match bind_group_layout_guard.get(desc.layout) {
3706                 Ok(layout) => layout,
3707                 Err(_) => break binding_model::CreateBindGroupError::InvalidLayout,
3708             };
3709             let bind_group = match device.create_bind_group(
3710                 device_id,
3711                 bind_group_layout,
3712                 desc,
3713                 &hub,
3714                 &mut token,
3715             ) {
3716                 Ok(bind_group) => bind_group,
3717                 Err(e) => break e,
3718             };
3719             let ref_count = bind_group.life_guard.add_ref();
3720 
3721             let id = fid.assign(bind_group, &mut token);
3722             log::debug!(
3723                 "Bind group {:?} {:#?}",
3724                 id,
3725                 hub.bind_groups.read(&mut token).0[id].used
3726             );
3727 
3728             device
3729                 .trackers
3730                 .lock()
3731                 .bind_groups
3732                 .init(id, ref_count, PhantomData)
3733                 .unwrap();
3734             return (id.0, None);
3735         };
3736 
3737         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3738         (id, Some(error))
3739     }
3740 
bind_group_label<B: GfxBackend>(&self, id: id::BindGroupId) -> String3741     pub fn bind_group_label<B: GfxBackend>(&self, id: id::BindGroupId) -> String {
3742         B::hub(self).bind_groups.label_for_resource(id)
3743     }
3744 
bind_group_drop<B: GfxBackend>(&self, bind_group_id: id::BindGroupId)3745     pub fn bind_group_drop<B: GfxBackend>(&self, bind_group_id: id::BindGroupId) {
3746         profiling::scope!("drop", "BindGroup");
3747 
3748         let hub = B::hub(self);
3749         let mut token = Token::root();
3750 
3751         let device_id = {
3752             let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token);
3753             match bind_group_guard.get_mut(bind_group_id) {
3754                 Ok(bind_group) => {
3755                     bind_group.life_guard.ref_count.take();
3756                     bind_group.device_id.value
3757                 }
3758                 Err(InvalidId) => {
3759                     hub.bind_groups
3760                         .unregister_locked(bind_group_id, &mut *bind_group_guard);
3761                     return;
3762                 }
3763             }
3764         };
3765 
3766         let (device_guard, mut token) = hub.devices.read(&mut token);
3767         device_guard[device_id]
3768             .lock_life(&mut token)
3769             .suspected_resources
3770             .bind_groups
3771             .push(id::Valid(bind_group_id));
3772     }
3773 
device_create_shader_module<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &pipeline::ShaderModuleDescriptor, source: pipeline::ShaderModuleSource, id_in: Input<G, id::ShaderModuleId>, ) -> ( id::ShaderModuleId, Option<pipeline::CreateShaderModuleError>, )3774     pub fn device_create_shader_module<B: GfxBackend>(
3775         &self,
3776         device_id: id::DeviceId,
3777         desc: &pipeline::ShaderModuleDescriptor,
3778         source: pipeline::ShaderModuleSource,
3779         id_in: Input<G, id::ShaderModuleId>,
3780     ) -> (
3781         id::ShaderModuleId,
3782         Option<pipeline::CreateShaderModuleError>,
3783     ) {
3784         profiling::scope!("create_shader_module", "Device");
3785 
3786         let hub = B::hub(self);
3787         let mut token = Token::root();
3788         let fid = hub.shader_modules.prepare(id_in);
3789 
3790         let (device_guard, mut token) = hub.devices.read(&mut token);
3791         let error = loop {
3792             let device = match device_guard.get(device_id) {
3793                 Ok(device) => device,
3794                 Err(_) => break DeviceError::Invalid.into(),
3795             };
3796             #[cfg(feature = "trace")]
3797             if let Some(ref trace) = device.trace {
3798                 let mut trace = trace.lock();
3799                 let data = match source {
3800                     pipeline::ShaderModuleSource::SpirV(ref spv) => {
3801                         trace.make_binary("spv", unsafe {
3802                             std::slice::from_raw_parts(spv.as_ptr() as *const u8, spv.len() * 4)
3803                         })
3804                     }
3805                     pipeline::ShaderModuleSource::Wgsl(ref code) => {
3806                         trace.make_binary("wgsl", code.as_bytes())
3807                     }
3808                     pipeline::ShaderModuleSource::Naga(_) => {
3809                         // we don't want to enable Naga serialization just for this alone
3810                         trace.make_binary("ron", &[])
3811                     }
3812                 };
3813                 trace.add(trace::Action::CreateShaderModule {
3814                     id: fid.id(),
3815                     desc: desc.clone(),
3816                     data,
3817                 });
3818             };
3819 
3820             let shader = match device.create_shader_module(device_id, desc, source) {
3821                 Ok(shader) => shader,
3822                 Err(e) => break e,
3823             };
3824             let id = fid.assign(shader, &mut token);
3825             return (id.0, None);
3826         };
3827 
3828         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3829         (id, Some(error))
3830     }
3831 
shader_module_label<B: GfxBackend>(&self, id: id::ShaderModuleId) -> String3832     pub fn shader_module_label<B: GfxBackend>(&self, id: id::ShaderModuleId) -> String {
3833         B::hub(self).shader_modules.label_for_resource(id)
3834     }
3835 
shader_module_drop<B: GfxBackend>(&self, shader_module_id: id::ShaderModuleId)3836     pub fn shader_module_drop<B: GfxBackend>(&self, shader_module_id: id::ShaderModuleId) {
3837         profiling::scope!("drop", "ShaderModule");
3838 
3839         let hub = B::hub(self);
3840         let mut token = Token::root();
3841         let (device_guard, mut token) = hub.devices.read(&mut token);
3842         let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
3843         if let Some(module) = module {
3844             let device = &device_guard[module.device_id.value];
3845             #[cfg(feature = "trace")]
3846             if let Some(ref trace) = device.trace {
3847                 trace
3848                     .lock()
3849                     .add(trace::Action::DestroyShaderModule(shader_module_id));
3850             }
3851             unsafe {
3852                 device.raw.destroy_shader_module(module.raw);
3853             }
3854         }
3855     }
3856 
device_create_command_encoder<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &wgt::CommandEncoderDescriptor<Label>, id_in: Input<G, id::CommandEncoderId>, ) -> (id::CommandEncoderId, Option<command::CommandAllocatorError>)3857     pub fn device_create_command_encoder<B: GfxBackend>(
3858         &self,
3859         device_id: id::DeviceId,
3860         desc: &wgt::CommandEncoderDescriptor<Label>,
3861         id_in: Input<G, id::CommandEncoderId>,
3862     ) -> (id::CommandEncoderId, Option<command::CommandAllocatorError>) {
3863         profiling::scope!("create_command_encoder", "Device");
3864 
3865         let hub = B::hub(self);
3866         let mut token = Token::root();
3867         let fid = hub.command_buffers.prepare(id_in);
3868 
3869         let (device_guard, mut token) = hub.devices.read(&mut token);
3870         let error = loop {
3871             let device = match device_guard.get(device_id) {
3872                 Ok(device) => device,
3873                 Err(_) => break DeviceError::Invalid.into(),
3874             };
3875 
3876             let dev_stored = Stored {
3877                 value: id::Valid(device_id),
3878                 ref_count: device.life_guard.add_ref(),
3879             };
3880 
3881             let mut command_buffer = match device.cmd_allocator.allocate(
3882                 dev_stored,
3883                 &device.raw,
3884                 device.limits.clone(),
3885                 device.downlevel,
3886                 device.features,
3887                 device.private_features,
3888                 &desc.label,
3889                 #[cfg(feature = "trace")]
3890                 device.trace.is_some(),
3891             ) {
3892                 Ok(cmd_buf) => cmd_buf,
3893                 Err(e) => break e,
3894             };
3895 
3896             let mut raw = command_buffer.raw.first_mut().unwrap();
3897             unsafe {
3898                 if let Some(ref label) = desc.label {
3899                     device.raw.set_command_buffer_name(&mut raw, label);
3900                 }
3901                 raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
3902             }
3903 
3904             let id = fid.assign(command_buffer, &mut token);
3905             return (id.0, None);
3906         };
3907 
3908         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
3909         (id, Some(error))
3910     }
3911 
command_buffer_label<B: GfxBackend>(&self, id: id::CommandBufferId) -> String3912     pub fn command_buffer_label<B: GfxBackend>(&self, id: id::CommandBufferId) -> String {
3913         B::hub(self).command_buffers.label_for_resource(id)
3914     }
3915 
command_encoder_drop<B: GfxBackend>(&self, command_encoder_id: id::CommandEncoderId)3916     pub fn command_encoder_drop<B: GfxBackend>(&self, command_encoder_id: id::CommandEncoderId) {
3917         profiling::scope!("drop", "CommandEncoder");
3918 
3919         let hub = B::hub(self);
3920         let mut token = Token::root();
3921 
3922         let (mut device_guard, mut token) = hub.devices.write(&mut token);
3923         let (cmdbuf, _) = hub
3924             .command_buffers
3925             .unregister(command_encoder_id, &mut token);
3926         if let Some(cmdbuf) = cmdbuf {
3927             let device = &mut device_guard[cmdbuf.device_id.value];
3928             device.untrack::<G>(&hub, &cmdbuf.trackers, &mut token);
3929             device.cmd_allocator.discard(cmdbuf);
3930         }
3931     }
3932 
command_buffer_drop<B: GfxBackend>(&self, command_buffer_id: id::CommandBufferId)3933     pub fn command_buffer_drop<B: GfxBackend>(&self, command_buffer_id: id::CommandBufferId) {
3934         profiling::scope!("drop", "CommandBuffer");
3935         self.command_encoder_drop::<B>(command_buffer_id)
3936     }
3937 
device_create_render_bundle_encoder( &self, device_id: id::DeviceId, desc: &command::RenderBundleEncoderDescriptor, ) -> ( id::RenderBundleEncoderId, Option<command::CreateRenderBundleError>, )3938     pub fn device_create_render_bundle_encoder(
3939         &self,
3940         device_id: id::DeviceId,
3941         desc: &command::RenderBundleEncoderDescriptor,
3942     ) -> (
3943         id::RenderBundleEncoderId,
3944         Option<command::CreateRenderBundleError>,
3945     ) {
3946         profiling::scope!("create_render_bundle_encoder", "Device");
3947         let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) {
3948             Ok(encoder) => (encoder, None),
3949             Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)),
3950         };
3951         (Box::into_raw(Box::new(encoder)), error)
3952     }
3953 
render_bundle_encoder_finish<B: GfxBackend>( &self, bundle_encoder: command::RenderBundleEncoder, desc: &command::RenderBundleDescriptor, id_in: Input<G, id::RenderBundleId>, ) -> (id::RenderBundleId, Option<command::RenderBundleError>)3954     pub fn render_bundle_encoder_finish<B: GfxBackend>(
3955         &self,
3956         bundle_encoder: command::RenderBundleEncoder,
3957         desc: &command::RenderBundleDescriptor,
3958         id_in: Input<G, id::RenderBundleId>,
3959     ) -> (id::RenderBundleId, Option<command::RenderBundleError>) {
3960         profiling::scope!("finish", "RenderBundleEncoder");
3961 
3962         let hub = B::hub(self);
3963         let mut token = Token::root();
3964         let fid = hub.render_bundles.prepare(id_in);
3965 
3966         let (device_guard, mut token) = hub.devices.read(&mut token);
3967         let error = loop {
3968             let device = match device_guard.get(bundle_encoder.parent()) {
3969                 Ok(device) => device,
3970                 Err(_) => break command::RenderBundleError::INVALID_DEVICE,
3971             };
3972             #[cfg(feature = "trace")]
3973             if let Some(ref trace) = device.trace {
3974                 trace.lock().add(trace::Action::CreateRenderBundle {
3975                     id: fid.id(),
3976                     desc: trace::new_render_bundle_encoder_descriptor(
3977                         desc.label.clone(),
3978                         &bundle_encoder.context,
3979                     ),
3980                     base: bundle_encoder.to_base_pass(),
3981                 });
3982             }
3983 
3984             let render_bundle = match bundle_encoder.finish(desc, device, &hub, &mut token) {
3985                 Ok(bundle) => bundle,
3986                 Err(e) => break e,
3987             };
3988 
3989             log::debug!("Render bundle {:#?}", render_bundle.used);
3990             let ref_count = render_bundle.life_guard.add_ref();
3991             let id = fid.assign(render_bundle, &mut token);
3992 
3993             device
3994                 .trackers
3995                 .lock()
3996                 .bundles
3997                 .init(id, ref_count, PhantomData)
3998                 .unwrap();
3999             return (id.0, None);
4000         };
4001 
4002         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
4003         (id, Some(error))
4004     }
4005 
render_bundle_label<B: GfxBackend>(&self, id: id::RenderBundleId) -> String4006     pub fn render_bundle_label<B: GfxBackend>(&self, id: id::RenderBundleId) -> String {
4007         B::hub(self).render_bundles.label_for_resource(id)
4008     }
4009 
render_bundle_drop<B: GfxBackend>(&self, render_bundle_id: id::RenderBundleId)4010     pub fn render_bundle_drop<B: GfxBackend>(&self, render_bundle_id: id::RenderBundleId) {
4011         profiling::scope!("drop", "RenderBundle");
4012         let hub = B::hub(self);
4013         let mut token = Token::root();
4014 
4015         let (device_guard, mut token) = hub.devices.read(&mut token);
4016         let device_id = {
4017             let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
4018             match bundle_guard.get_mut(render_bundle_id) {
4019                 Ok(bundle) => {
4020                     bundle.life_guard.ref_count.take();
4021                     bundle.device_id.value
4022                 }
4023                 Err(InvalidId) => {
4024                     hub.render_bundles
4025                         .unregister_locked(render_bundle_id, &mut *bundle_guard);
4026                     return;
4027                 }
4028             }
4029         };
4030 
4031         device_guard[device_id]
4032             .lock_life(&mut token)
4033             .suspected_resources
4034             .render_bundles
4035             .push(id::Valid(render_bundle_id));
4036     }
4037 
device_create_query_set<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &wgt::QuerySetDescriptor, id_in: Input<G, id::QuerySetId>, ) -> (id::QuerySetId, Option<resource::CreateQuerySetError>)4038     pub fn device_create_query_set<B: GfxBackend>(
4039         &self,
4040         device_id: id::DeviceId,
4041         desc: &wgt::QuerySetDescriptor,
4042         id_in: Input<G, id::QuerySetId>,
4043     ) -> (id::QuerySetId, Option<resource::CreateQuerySetError>) {
4044         profiling::scope!("create_query_set", "Device");
4045 
4046         let hub = B::hub(self);
4047         let mut token = Token::root();
4048         let fid = hub.query_sets.prepare(id_in);
4049 
4050         let (device_guard, mut token) = hub.devices.read(&mut token);
4051         let error = loop {
4052             let device = match device_guard.get(device_id) {
4053                 Ok(device) => device,
4054                 Err(_) => break DeviceError::Invalid.into(),
4055             };
4056             #[cfg(feature = "trace")]
4057             if let Some(ref trace) = device.trace {
4058                 trace.lock().add(trace::Action::CreateQuerySet {
4059                     id: fid.id(),
4060                     desc: desc.clone(),
4061                 });
4062             }
4063 
4064             let query_set = match device.create_query_set(device_id, desc) {
4065                 Ok(query_set) => query_set,
4066                 Err(err) => break err,
4067             };
4068 
4069             let ref_count = query_set.life_guard.add_ref();
4070             let id = fid.assign(query_set, &mut token);
4071 
4072             device
4073                 .trackers
4074                 .lock()
4075                 .query_sets
4076                 .init(id, ref_count, PhantomData)
4077                 .unwrap();
4078 
4079             return (id.0, None);
4080         };
4081 
4082         let id = fid.assign_error("", &mut token);
4083         (id, Some(error))
4084     }
4085 
query_set_drop<B: GfxBackend>(&self, query_set_id: id::QuerySetId)4086     pub fn query_set_drop<B: GfxBackend>(&self, query_set_id: id::QuerySetId) {
4087         profiling::scope!("drop", "QuerySet");
4088 
4089         let hub = B::hub(self);
4090         let mut token = Token::root();
4091 
4092         let device_id = {
4093             let (mut query_set_guard, _) = hub.query_sets.write(&mut token);
4094             let query_set = query_set_guard.get_mut(query_set_id).unwrap();
4095             query_set.life_guard.ref_count.take();
4096             query_set.device_id.value
4097         };
4098 
4099         let (device_guard, mut token) = hub.devices.read(&mut token);
4100         let device = &device_guard[device_id];
4101 
4102         #[cfg(feature = "trace")]
4103         if let Some(ref trace) = device.trace {
4104             trace
4105                 .lock()
4106                 .add(trace::Action::DestroyQuerySet(query_set_id));
4107         }
4108 
4109         device
4110             .lock_life(&mut token)
4111             .suspected_resources
4112             .query_sets
4113             .push(id::Valid(query_set_id));
4114     }
4115 
device_create_render_pipeline<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &pipeline::RenderPipelineDescriptor, id_in: Input<G, id::RenderPipelineId>, implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>, ) -> ( id::RenderPipelineId, Option<pipeline::CreateRenderPipelineError>, )4116     pub fn device_create_render_pipeline<B: GfxBackend>(
4117         &self,
4118         device_id: id::DeviceId,
4119         desc: &pipeline::RenderPipelineDescriptor,
4120         id_in: Input<G, id::RenderPipelineId>,
4121         implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
4122     ) -> (
4123         id::RenderPipelineId,
4124         Option<pipeline::CreateRenderPipelineError>,
4125     ) {
4126         profiling::scope!("create_render_pipeline", "Device");
4127 
4128         let hub = B::hub(self);
4129         let mut token = Token::root();
4130 
4131         let fid = hub.render_pipelines.prepare(id_in);
4132         let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(&hub));
4133 
4134         let (device_guard, mut token) = hub.devices.read(&mut token);
4135         let error = loop {
4136             let device = match device_guard.get(device_id) {
4137                 Ok(device) => device,
4138                 Err(_) => break DeviceError::Invalid.into(),
4139             };
4140             #[cfg(feature = "trace")]
4141             if let Some(ref trace) = device.trace {
4142                 trace.lock().add(trace::Action::CreateRenderPipeline {
4143                     id: fid.id(),
4144                     desc: desc.clone(),
4145                     implicit_context: implicit_context.clone(),
4146                 });
4147             }
4148 
4149             let pipeline = match device.create_render_pipeline(
4150                 device_id,
4151                 desc,
4152                 implicit_context,
4153                 &hub,
4154                 &mut token,
4155             ) {
4156                 Ok(pair) => pair,
4157                 Err(e) => break e,
4158             };
4159 
4160             let id = fid.assign(pipeline, &mut token);
4161             return (id.0, None);
4162         };
4163 
4164         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
4165         (id, Some(error))
4166     }
4167 
4168     /// Get an ID of one of the bind group layouts. The ID adds a refcount,
4169     /// which needs to be released by calling `bind_group_layout_drop`.
render_pipeline_get_bind_group_layout<B: GfxBackend>( &self, pipeline_id: id::RenderPipelineId, index: u32, id_in: Input<G, id::BindGroupLayoutId>, ) -> ( id::BindGroupLayoutId, Option<binding_model::GetBindGroupLayoutError>, )4170     pub fn render_pipeline_get_bind_group_layout<B: GfxBackend>(
4171         &self,
4172         pipeline_id: id::RenderPipelineId,
4173         index: u32,
4174         id_in: Input<G, id::BindGroupLayoutId>,
4175     ) -> (
4176         id::BindGroupLayoutId,
4177         Option<binding_model::GetBindGroupLayoutError>,
4178     ) {
4179         let hub = B::hub(self);
4180         let mut token = Token::root();
4181         let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
4182 
4183         let error = loop {
4184             let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
4185             let (_, mut token) = hub.bind_groups.read(&mut token);
4186             let (pipeline_guard, _) = hub.render_pipelines.read(&mut token);
4187 
4188             let pipeline = match pipeline_guard.get(pipeline_id) {
4189                 Ok(pipeline) => pipeline,
4190                 Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
4191             };
4192             let id = match pipeline_layout_guard[pipeline.layout_id.value]
4193                 .bind_group_layout_ids
4194                 .get(index as usize)
4195             {
4196                 Some(id) => id,
4197                 None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
4198             };
4199 
4200             bgl_guard[*id].multi_ref_count.inc();
4201             return (id.0, None);
4202         };
4203 
4204         let id = hub
4205             .bind_group_layouts
4206             .prepare(id_in)
4207             .assign_error("<derived>", &mut token);
4208         (id, Some(error))
4209     }
4210 
render_pipeline_label<B: GfxBackend>(&self, id: id::RenderPipelineId) -> String4211     pub fn render_pipeline_label<B: GfxBackend>(&self, id: id::RenderPipelineId) -> String {
4212         B::hub(self).render_pipelines.label_for_resource(id)
4213     }
4214 
render_pipeline_drop<B: GfxBackend>(&self, render_pipeline_id: id::RenderPipelineId)4215     pub fn render_pipeline_drop<B: GfxBackend>(&self, render_pipeline_id: id::RenderPipelineId) {
4216         profiling::scope!("drop", "RenderPipeline");
4217         let hub = B::hub(self);
4218         let mut token = Token::root();
4219         let (device_guard, mut token) = hub.devices.read(&mut token);
4220 
4221         let (device_id, layout_id) = {
4222             let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token);
4223             match pipeline_guard.get_mut(render_pipeline_id) {
4224                 Ok(pipeline) => {
4225                     pipeline.life_guard.ref_count.take();
4226                     (pipeline.device_id.value, pipeline.layout_id.clone())
4227                 }
4228                 Err(InvalidId) => {
4229                     hub.render_pipelines
4230                         .unregister_locked(render_pipeline_id, &mut *pipeline_guard);
4231                     return;
4232                 }
4233             }
4234         };
4235 
4236         let mut life_lock = device_guard[device_id].lock_life(&mut token);
4237         life_lock
4238             .suspected_resources
4239             .render_pipelines
4240             .push(id::Valid(render_pipeline_id));
4241         life_lock
4242             .suspected_resources
4243             .pipeline_layouts
4244             .push(layout_id);
4245     }
4246 
device_create_compute_pipeline<B: GfxBackend>( &self, device_id: id::DeviceId, desc: &pipeline::ComputePipelineDescriptor, id_in: Input<G, id::ComputePipelineId>, implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>, ) -> ( id::ComputePipelineId, Option<pipeline::CreateComputePipelineError>, )4247     pub fn device_create_compute_pipeline<B: GfxBackend>(
4248         &self,
4249         device_id: id::DeviceId,
4250         desc: &pipeline::ComputePipelineDescriptor,
4251         id_in: Input<G, id::ComputePipelineId>,
4252         implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
4253     ) -> (
4254         id::ComputePipelineId,
4255         Option<pipeline::CreateComputePipelineError>,
4256     ) {
4257         profiling::scope!("create_compute_pipeline", "Device");
4258 
4259         let hub = B::hub(self);
4260         let mut token = Token::root();
4261 
4262         let fid = hub.compute_pipelines.prepare(id_in);
4263         let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(&hub));
4264 
4265         let (device_guard, mut token) = hub.devices.read(&mut token);
4266         let error = loop {
4267             let device = match device_guard.get(device_id) {
4268                 Ok(device) => device,
4269                 Err(_) => break DeviceError::Invalid.into(),
4270             };
4271             #[cfg(feature = "trace")]
4272             if let Some(ref trace) = device.trace {
4273                 trace.lock().add(trace::Action::CreateComputePipeline {
4274                     id: fid.id(),
4275                     desc: desc.clone(),
4276                     implicit_context: implicit_context.clone(),
4277                 });
4278             }
4279 
4280             let pipeline = match device.create_compute_pipeline(
4281                 device_id,
4282                 desc,
4283                 implicit_context,
4284                 &hub,
4285                 &mut token,
4286             ) {
4287                 Ok(pair) => pair,
4288                 Err(e) => break e,
4289             };
4290 
4291             let id = fid.assign(pipeline, &mut token);
4292             return (id.0, None);
4293         };
4294 
4295         let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
4296         (id, Some(error))
4297     }
4298 
4299     /// Get an ID of one of the bind group layouts. The ID adds a refcount,
4300     /// which needs to be released by calling `bind_group_layout_drop`.
compute_pipeline_get_bind_group_layout<B: GfxBackend>( &self, pipeline_id: id::ComputePipelineId, index: u32, id_in: Input<G, id::BindGroupLayoutId>, ) -> ( id::BindGroupLayoutId, Option<binding_model::GetBindGroupLayoutError>, )4301     pub fn compute_pipeline_get_bind_group_layout<B: GfxBackend>(
4302         &self,
4303         pipeline_id: id::ComputePipelineId,
4304         index: u32,
4305         id_in: Input<G, id::BindGroupLayoutId>,
4306     ) -> (
4307         id::BindGroupLayoutId,
4308         Option<binding_model::GetBindGroupLayoutError>,
4309     ) {
4310         let hub = B::hub(self);
4311         let mut token = Token::root();
4312         let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
4313 
4314         let error = loop {
4315             let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
4316             let (_, mut token) = hub.bind_groups.read(&mut token);
4317             let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
4318 
4319             let pipeline = match pipeline_guard.get(pipeline_id) {
4320                 Ok(pipeline) => pipeline,
4321                 Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
4322             };
4323             let id = match pipeline_layout_guard[pipeline.layout_id.value]
4324                 .bind_group_layout_ids
4325                 .get(index as usize)
4326             {
4327                 Some(id) => id,
4328                 None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
4329             };
4330 
4331             bgl_guard[*id].multi_ref_count.inc();
4332             return (id.0, None);
4333         };
4334 
4335         let id = hub
4336             .bind_group_layouts
4337             .prepare(id_in)
4338             .assign_error("<derived>", &mut token);
4339         (id, Some(error))
4340     }
4341 
compute_pipeline_label<B: GfxBackend>(&self, id: id::ComputePipelineId) -> String4342     pub fn compute_pipeline_label<B: GfxBackend>(&self, id: id::ComputePipelineId) -> String {
4343         B::hub(self).compute_pipelines.label_for_resource(id)
4344     }
4345 
compute_pipeline_drop<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId)4346     pub fn compute_pipeline_drop<B: GfxBackend>(&self, compute_pipeline_id: id::ComputePipelineId) {
4347         profiling::scope!("drop", "ComputePipeline");
4348         let hub = B::hub(self);
4349         let mut token = Token::root();
4350         let (device_guard, mut token) = hub.devices.read(&mut token);
4351 
4352         let (device_id, layout_id) = {
4353             let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token);
4354             match pipeline_guard.get_mut(compute_pipeline_id) {
4355                 Ok(pipeline) => {
4356                     pipeline.life_guard.ref_count.take();
4357                     (pipeline.device_id.value, pipeline.layout_id.clone())
4358                 }
4359                 Err(InvalidId) => {
4360                     hub.compute_pipelines
4361                         .unregister_locked(compute_pipeline_id, &mut *pipeline_guard);
4362                     return;
4363                 }
4364             }
4365         };
4366 
4367         let mut life_lock = device_guard[device_id].lock_life(&mut token);
4368         life_lock
4369             .suspected_resources
4370             .compute_pipelines
4371             .push(id::Valid(compute_pipeline_id));
4372         life_lock
4373             .suspected_resources
4374             .pipeline_layouts
4375             .push(layout_id);
4376     }
4377 
device_create_swap_chain<B: GfxBackend>( &self, device_id: id::DeviceId, surface_id: id::SurfaceId, desc: &wgt::SwapChainDescriptor, ) -> (id::SwapChainId, Option<swap_chain::CreateSwapChainError>)4378     pub fn device_create_swap_chain<B: GfxBackend>(
4379         &self,
4380         device_id: id::DeviceId,
4381         surface_id: id::SurfaceId,
4382         desc: &wgt::SwapChainDescriptor,
4383     ) -> (id::SwapChainId, Option<swap_chain::CreateSwapChainError>) {
4384         profiling::scope!("create_swap_chain", "Device");
4385 
4386         fn validate_swap_chain_descriptor(
4387             config: &mut hal::window::SwapchainConfig,
4388             caps: &hal::window::SurfaceCapabilities,
4389         ) -> Result<(), swap_chain::CreateSwapChainError> {
4390             let width = config.extent.width;
4391             let height = config.extent.height;
4392             if width < caps.extents.start().width
4393                 || width > caps.extents.end().width
4394                 || height < caps.extents.start().height
4395                 || height > caps.extents.end().height
4396             {
4397                 log::warn!(
4398                     "Requested size {}x{} is outside of the supported range: {:?}",
4399                     width,
4400                     height,
4401                     caps.extents
4402                 );
4403             }
4404             if !caps.present_modes.contains(config.present_mode) {
4405                 log::warn!(
4406                     "Surface does not support present mode: {:?}, falling back to {:?}",
4407                     config.present_mode,
4408                     hal::window::PresentMode::FIFO
4409                 );
4410                 config.present_mode = hal::window::PresentMode::FIFO;
4411             }
4412             if width == 0 || height == 0 {
4413                 return Err(swap_chain::CreateSwapChainError::ZeroArea);
4414             }
4415             Ok(())
4416         }
4417 
4418         log::info!("creating swap chain {:?}", desc);
4419         let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
4420         let hub = B::hub(self);
4421         let mut token = Token::root();
4422 
4423         let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
4424         let (adapter_guard, mut token) = hub.adapters.read(&mut token);
4425         let (device_guard, mut token) = hub.devices.read(&mut token);
4426         let (mut swap_chain_guard, _) = hub.swap_chains.write(&mut token);
4427 
4428         let error = loop {
4429             let device = match device_guard.get(device_id) {
4430                 Ok(device) => device,
4431                 Err(_) => break DeviceError::Invalid.into(),
4432             };
4433             #[cfg(feature = "trace")]
4434             if let Some(ref trace) = device.trace {
4435                 trace
4436                     .lock()
4437                     .add(trace::Action::CreateSwapChain(sc_id, desc.clone()));
4438             }
4439 
4440             let surface = match surface_guard.get_mut(surface_id) {
4441                 Ok(surface) => surface,
4442                 Err(_) => break swap_chain::CreateSwapChainError::InvalidSurface,
4443             };
4444 
4445             let (caps, formats) = {
4446                 let surface = B::get_surface_mut(surface);
4447                 let adapter = &adapter_guard[device.adapter_id.value];
4448                 let queue_family = &adapter.raw.queue_families[0];
4449                 if !surface.supports_queue_family(queue_family) {
4450                     break swap_chain::CreateSwapChainError::UnsupportedQueueFamily;
4451                 }
4452                 let formats = surface.supported_formats(&adapter.raw.physical_device);
4453                 let caps = surface.capabilities(&adapter.raw.physical_device);
4454                 (caps, formats)
4455             };
4456 
4457             let num_frames = swap_chain::DESIRED_NUM_FRAMES
4458                 .max(*caps.image_count.start())
4459                 .min(*caps.image_count.end());
4460             let mut config = swap_chain::swap_chain_descriptor_to_hal(
4461                 &desc,
4462                 num_frames,
4463                 device.private_features,
4464             );
4465             if let Some(formats) = formats {
4466                 if !formats.contains(&config.format) {
4467                     break swap_chain::CreateSwapChainError::UnsupportedFormat {
4468                         requested: config.format,
4469                         available: formats,
4470                     };
4471                 }
4472             }
4473             if let Err(error) = validate_swap_chain_descriptor(&mut config, &caps) {
4474                 break error;
4475             }
4476             let framebuffer_attachment = config.framebuffer_attachment();
4477 
4478             match unsafe { B::get_surface_mut(surface).configure_swapchain(&device.raw, config) } {
4479                 Ok(()) => (),
4480                 Err(hal::window::SwapchainError::OutOfMemory(_)) => {
4481                     break DeviceError::OutOfMemory.into()
4482                 }
4483                 Err(hal::window::SwapchainError::DeviceLost(_)) => break DeviceError::Lost.into(),
4484                 Err(err) => panic!("failed to configure swap chain on creation: {}", err),
4485             }
4486 
4487             if let Some(sc) = swap_chain_guard.try_remove(sc_id) {
4488                 if sc.acquired_view_id.is_some() {
4489                     break swap_chain::CreateSwapChainError::SwapChainOutputExists;
4490                 }
4491                 unsafe {
4492                     device.raw.destroy_semaphore(sc.semaphore);
4493                 }
4494             }
4495 
4496             let swap_chain = swap_chain::SwapChain {
4497                 life_guard: LifeGuard::new("<SwapChain>"),
4498                 device_id: Stored {
4499                     value: id::Valid(device_id),
4500                     ref_count: device.life_guard.add_ref(),
4501                 },
4502                 desc: desc.clone(),
4503                 num_frames,
4504                 semaphore: match device.raw.create_semaphore() {
4505                     Ok(sem) => sem,
4506                     Err(_) => break DeviceError::OutOfMemory.into(),
4507                 },
4508                 acquired_view_id: None,
4509                 active_submission_index: 0,
4510                 framebuffer_attachment,
4511             };
4512             swap_chain_guard.insert(sc_id, swap_chain);
4513 
4514             return (sc_id, None);
4515         };
4516 
4517         swap_chain_guard.insert_error(sc_id, "");
4518         (sc_id, Some(error))
4519     }
4520 
4521     #[cfg(feature = "replay")]
4522     /// Only triange suspected resource IDs. This helps us to avoid ID collisions
4523     /// upon creating new resources when re-playing a trace.
device_maintain_ids<B: GfxBackend>( &self, device_id: id::DeviceId, ) -> Result<(), InvalidDevice>4524     pub fn device_maintain_ids<B: GfxBackend>(
4525         &self,
4526         device_id: id::DeviceId,
4527     ) -> Result<(), InvalidDevice> {
4528         let hub = B::hub(self);
4529         let mut token = Token::root();
4530         let (device_guard, mut token) = hub.devices.read(&mut token);
4531         let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
4532         device.lock_life(&mut token).triage_suspected(
4533             &hub,
4534             &device.trackers,
4535             #[cfg(feature = "trace")]
4536             None,
4537             &mut token,
4538         );
4539         Ok(())
4540     }
4541 
device_poll<B: GfxBackend>( &self, device_id: id::DeviceId, force_wait: bool, ) -> Result<(), WaitIdleError>4542     pub fn device_poll<B: GfxBackend>(
4543         &self,
4544         device_id: id::DeviceId,
4545         force_wait: bool,
4546     ) -> Result<(), WaitIdleError> {
4547         let hub = B::hub(self);
4548         let mut token = Token::root();
4549         let callbacks = {
4550             let (device_guard, mut token) = hub.devices.read(&mut token);
4551             device_guard
4552                 .get(device_id)
4553                 .map_err(|_| DeviceError::Invalid)?
4554                 .maintain(&hub, force_wait, &mut token)?
4555         };
4556         fire_map_callbacks(callbacks);
4557         Ok(())
4558     }
4559 
poll_devices<B: GfxBackend>( &self, force_wait: bool, callbacks: &mut Vec<BufferMapPendingCallback>, ) -> Result<(), WaitIdleError>4560     fn poll_devices<B: GfxBackend>(
4561         &self,
4562         force_wait: bool,
4563         callbacks: &mut Vec<BufferMapPendingCallback>,
4564     ) -> Result<(), WaitIdleError> {
4565         profiling::scope!("poll_devices");
4566 
4567         let hub = B::hub(self);
4568         let mut token = Token::root();
4569         let (device_guard, mut token) = hub.devices.read(&mut token);
4570         for (_, device) in device_guard.iter(B::VARIANT) {
4571             let cbs = device.maintain(&hub, force_wait, &mut token)?;
4572             callbacks.extend(cbs);
4573         }
4574         Ok(())
4575     }
4576 
poll_all_devices(&self, force_wait: bool) -> Result<(), WaitIdleError>4577     pub fn poll_all_devices(&self, force_wait: bool) -> Result<(), WaitIdleError> {
4578         use crate::backend;
4579         let mut callbacks = Vec::new();
4580 
4581         #[cfg(vulkan)]
4582         {
4583             self.poll_devices::<backend::Vulkan>(force_wait, &mut callbacks)?;
4584         }
4585         #[cfg(metal)]
4586         {
4587             self.poll_devices::<backend::Metal>(force_wait, &mut callbacks)?;
4588         }
4589         #[cfg(dx12)]
4590         {
4591             self.poll_devices::<backend::Dx12>(force_wait, &mut callbacks)?;
4592         }
4593         #[cfg(dx11)]
4594         {
4595             self.poll_devices::<backend::Dx11>(force_wait, &mut callbacks)?;
4596         }
4597 
4598         fire_map_callbacks(callbacks);
4599 
4600         Ok(())
4601     }
4602 
device_label<B: GfxBackend>(&self, id: id::DeviceId) -> String4603     pub fn device_label<B: GfxBackend>(&self, id: id::DeviceId) -> String {
4604         B::hub(self).devices.label_for_resource(id)
4605     }
4606 
device_start_capture<B: GfxBackend>(&self, id: id::DeviceId)4607     pub fn device_start_capture<B: GfxBackend>(&self, id: id::DeviceId) {
4608         let hub = B::hub(self);
4609         let mut token = Token::root();
4610         let (device_guard, _) = hub.devices.read(&mut token);
4611         if let Ok(device) = device_guard.get(id) {
4612             device.raw.start_capture();
4613         }
4614     }
4615 
device_stop_capture<B: GfxBackend>(&self, id: id::DeviceId)4616     pub fn device_stop_capture<B: GfxBackend>(&self, id: id::DeviceId) {
4617         let hub = B::hub(self);
4618         let mut token = Token::root();
4619         let (device_guard, _) = hub.devices.read(&mut token);
4620         if let Ok(device) = device_guard.get(id) {
4621             device.raw.stop_capture();
4622         }
4623     }
4624 
device_drop<B: GfxBackend>(&self, device_id: id::DeviceId)4625     pub fn device_drop<B: GfxBackend>(&self, device_id: id::DeviceId) {
4626         profiling::scope!("drop", "Device");
4627 
4628         let hub = B::hub(self);
4629         let mut token = Token::root();
4630         let (device, _) = hub.devices.unregister(device_id, &mut token);
4631         if let Some(mut device) = device {
4632             device.prepare_to_die();
4633 
4634             // Adapter is only referenced by the device and itself.
4635             // This isn't a robust way to destroy them, we should find a better one.
4636             if device.adapter_id.ref_count.load() == 1 {
4637                 let (_adapter, _) = hub
4638                     .adapters
4639                     .unregister(device.adapter_id.value.0, &mut token);
4640             }
4641 
4642             device.dispose();
4643         }
4644     }
4645 
buffer_map_async<B: GfxBackend>( &self, buffer_id: id::BufferId, range: Range<BufferAddress>, op: resource::BufferMapOperation, ) -> Result<(), resource::BufferAccessError>4646     pub fn buffer_map_async<B: GfxBackend>(
4647         &self,
4648         buffer_id: id::BufferId,
4649         range: Range<BufferAddress>,
4650         op: resource::BufferMapOperation,
4651     ) -> Result<(), resource::BufferAccessError> {
4652         profiling::scope!("map_async", "Buffer");
4653 
4654         let hub = B::hub(self);
4655         let mut token = Token::root();
4656         let (device_guard, mut token) = hub.devices.read(&mut token);
4657         let (pub_usage, internal_use) = match op.host {
4658             HostMap::Read => (wgt::BufferUsage::MAP_READ, resource::BufferUse::MAP_READ),
4659             HostMap::Write => (wgt::BufferUsage::MAP_WRITE, resource::BufferUse::MAP_WRITE),
4660         };
4661 
4662         if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
4663             return Err(resource::BufferAccessError::UnalignedRange);
4664         }
4665 
4666         let (device_id, ref_count) = {
4667             let (mut buffer_guard, _) = hub.buffers.write(&mut token);
4668             let buffer = buffer_guard
4669                 .get_mut(buffer_id)
4670                 .map_err(|_| resource::BufferAccessError::Invalid)?;
4671 
4672             check_buffer_usage(buffer.usage, pub_usage)?;
4673             buffer.map_state = match buffer.map_state {
4674                 resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
4675                     return Err(resource::BufferAccessError::AlreadyMapped);
4676                 }
4677                 resource::BufferMapState::Waiting(_) => {
4678                     op.call_error();
4679                     return Ok(());
4680                 }
4681                 resource::BufferMapState::Idle => {
4682                     resource::BufferMapState::Waiting(resource::BufferPendingMapping {
4683                         range,
4684                         op,
4685                         parent_ref_count: buffer.life_guard.add_ref(),
4686                     })
4687                 }
4688             };
4689             log::debug!("Buffer {:?} map state -> Waiting", buffer_id);
4690 
4691             (buffer.device_id.value, buffer.life_guard.add_ref())
4692         };
4693 
4694         let device = &device_guard[device_id];
4695         device.trackers.lock().buffers.change_replace(
4696             id::Valid(buffer_id),
4697             &ref_count,
4698             (),
4699             internal_use,
4700         );
4701 
4702         device
4703             .lock_life(&mut token)
4704             .map(id::Valid(buffer_id), ref_count);
4705 
4706         Ok(())
4707     }
4708 
buffer_get_mapped_range<B: GfxBackend>( &self, buffer_id: id::BufferId, offset: BufferAddress, size: Option<BufferAddress>, ) -> Result<(*mut u8, u64), resource::BufferAccessError>4709     pub fn buffer_get_mapped_range<B: GfxBackend>(
4710         &self,
4711         buffer_id: id::BufferId,
4712         offset: BufferAddress,
4713         size: Option<BufferAddress>,
4714     ) -> Result<(*mut u8, u64), resource::BufferAccessError> {
4715         profiling::scope!("get_mapped_range", "Buffer");
4716 
4717         let hub = B::hub(self);
4718         let mut token = Token::root();
4719         let (buffer_guard, _) = hub.buffers.read(&mut token);
4720         let buffer = buffer_guard
4721             .get(buffer_id)
4722             .map_err(|_| resource::BufferAccessError::Invalid)?;
4723 
4724         let range_size = if let Some(size) = size {
4725             size
4726         } else if offset > buffer.size {
4727             0
4728         } else {
4729             buffer.size - offset
4730         };
4731 
4732         if offset % wgt::MAP_ALIGNMENT != 0 {
4733             return Err(resource::BufferAccessError::UnalignedOffset { offset });
4734         }
4735         if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
4736             return Err(resource::BufferAccessError::UnalignedRangeSize { range_size });
4737         }
4738 
4739         match buffer.map_state {
4740             resource::BufferMapState::Init { ptr, .. } => {
4741                 // offset (u64) can not be < 0, so no need to validate the lower bound
4742                 if offset + range_size > buffer.size {
4743                     return Err(resource::BufferAccessError::OutOfBoundsOverrun {
4744                         index: offset + range_size - 1,
4745                         max: buffer.size,
4746                     });
4747                 }
4748                 unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) }
4749             }
4750             resource::BufferMapState::Active {
4751                 ptr, ref sub_range, ..
4752             } => {
4753                 if offset < sub_range.offset {
4754                     return Err(resource::BufferAccessError::OutOfBoundsUnderrun {
4755                         index: offset,
4756                         min: sub_range.offset,
4757                     });
4758                 }
4759                 let range_end_offset = sub_range
4760                     .size
4761                     .map(|size| size + sub_range.offset)
4762                     .unwrap_or(buffer.size);
4763                 if offset + range_size > range_end_offset {
4764                     return Err(resource::BufferAccessError::OutOfBoundsOverrun {
4765                         index: offset + range_size - 1,
4766                         max: range_end_offset,
4767                     });
4768                 }
4769                 unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) }
4770             }
4771             resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => {
4772                 Err(resource::BufferAccessError::NotMapped)
4773             }
4774         }
4775     }
4776 
buffer_unmap_inner<B: GfxBackend>( &self, buffer_id: id::BufferId, ) -> Result<Option<BufferMapPendingCallback>, resource::BufferAccessError>4777     fn buffer_unmap_inner<B: GfxBackend>(
4778         &self,
4779         buffer_id: id::BufferId,
4780     ) -> Result<Option<BufferMapPendingCallback>, resource::BufferAccessError> {
4781         profiling::scope!("unmap", "Buffer");
4782 
4783         let hub = B::hub(self);
4784         let mut token = Token::root();
4785 
4786         let (mut device_guard, mut token) = hub.devices.write(&mut token);
4787         let (mut buffer_guard, _) = hub.buffers.write(&mut token);
4788         let buffer = buffer_guard
4789             .get_mut(buffer_id)
4790             .map_err(|_| resource::BufferAccessError::Invalid)?;
4791         let device = &mut device_guard[buffer.device_id.value];
4792 
4793         log::debug!("Buffer {:?} map state -> Idle", buffer_id);
4794         match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
4795             resource::BufferMapState::Init {
4796                 ptr,
4797                 stage_buffer,
4798                 stage_memory,
4799                 needs_flush,
4800             } => {
4801                 #[cfg(feature = "trace")]
4802                 if let Some(ref trace) = device.trace {
4803                     let mut trace = trace.lock();
4804                     let data = trace.make_binary("bin", unsafe {
4805                         std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize)
4806                     });
4807                     trace.add(trace::Action::WriteBuffer {
4808                         id: buffer_id,
4809                         data,
4810                         range: 0..buffer.size,
4811                         queued: true,
4812                     });
4813                 }
4814                 let _ = ptr;
4815 
4816                 if needs_flush {
4817                     stage_memory.flush_range(&device.raw, 0, None)?;
4818                 }
4819 
4820                 let &(ref buf_raw, _) = buffer
4821                     .raw
4822                     .as_ref()
4823                     .ok_or(resource::BufferAccessError::Destroyed)?;
4824 
4825                 buffer.life_guard.use_at(device.active_submission_index + 1);
4826                 let region = hal::command::BufferCopy {
4827                     src: 0,
4828                     dst: 0,
4829                     size: buffer.size,
4830                 };
4831                 let transition_src = hal::memory::Barrier::Buffer {
4832                     states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
4833                     target: &stage_buffer,
4834                     range: hal::buffer::SubRange::WHOLE,
4835                     families: None,
4836                 };
4837                 let transition_dst = hal::memory::Barrier::Buffer {
4838                     states: hal::buffer::Access::empty()..hal::buffer::Access::TRANSFER_WRITE,
4839                     target: buf_raw,
4840                     range: hal::buffer::SubRange::WHOLE,
4841                     families: None,
4842                 };
4843                 unsafe {
4844                     let cmdbuf = device.borrow_pending_writes();
4845                     cmdbuf.pipeline_barrier(
4846                         hal::pso::PipelineStage::HOST..hal::pso::PipelineStage::TRANSFER,
4847                         hal::memory::Dependencies::empty(),
4848                         iter::once(transition_src).chain(iter::once(transition_dst)),
4849                     );
4850                     if buffer.size > 0 {
4851                         cmdbuf.copy_buffer(&stage_buffer, buf_raw, iter::once(region));
4852                     }
4853                 }
4854                 device
4855                     .pending_writes
4856                     .consume_temp(queue::TempResource::Buffer(stage_buffer), stage_memory);
4857                 device.pending_writes.dst_buffers.insert(buffer_id);
4858             }
4859             resource::BufferMapState::Idle => {
4860                 return Err(resource::BufferAccessError::NotMapped);
4861             }
4862             resource::BufferMapState::Waiting(pending) => {
4863                 return Ok(Some((pending.op, resource::BufferMapAsyncStatus::Aborted)));
4864             }
4865             resource::BufferMapState::Active {
4866                 ptr,
4867                 sub_range,
4868                 host,
4869             } => {
4870                 if host == HostMap::Write {
4871                     #[cfg(feature = "trace")]
4872                     if let Some(ref trace) = device.trace {
4873                         let mut trace = trace.lock();
4874                         let size = sub_range.size_to(buffer.size);
4875                         let data = trace.make_binary("bin", unsafe {
4876                             std::slice::from_raw_parts(ptr.as_ptr(), size as usize)
4877                         });
4878                         trace.add(trace::Action::WriteBuffer {
4879                             id: buffer_id,
4880                             data,
4881                             range: sub_range.offset..sub_range.offset + size,
4882                             queued: false,
4883                         });
4884                     }
4885                     let _ = (ptr, sub_range);
4886                 }
4887                 unmap_buffer(&device.raw, buffer)?;
4888             }
4889         }
4890         Ok(None)
4891     }
4892 
buffer_unmap<B: GfxBackend>( &self, buffer_id: id::BufferId, ) -> Result<(), resource::BufferAccessError>4893     pub fn buffer_unmap<B: GfxBackend>(
4894         &self,
4895         buffer_id: id::BufferId,
4896     ) -> Result<(), resource::BufferAccessError> {
4897         self.buffer_unmap_inner::<B>(buffer_id)
4898             //Note: outside inner function so no locks are held when calling the callback
4899             .map(|pending_callback| fire_map_callbacks(pending_callback.into_iter()))
4900     }
4901 }
4902