1 /* This Source Code Form is subject to the terms of the Mozilla Public
2  * License, v. 2.0. If a copy of the MPL was not distributed with this
3  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 
5 #[cfg(feature = "trace")]
6 use crate::device::trace;
7 use crate::{
8     device::{
9         alloc,
10         descriptor::{DescriptorAllocator, DescriptorSet},
11         queue::TempResource,
12         DeviceError,
13     },
14     hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
15     id, resource,
16     track::TrackerSet,
17     RefCount, Stored, SubmissionIndex,
18 };
19 
20 use copyless::VecHelper as _;
21 use hal::device::Device as _;
22 use parking_lot::Mutex;
23 use thiserror::Error;
24 
25 use std::sync::atomic::Ordering;
26 
27 const CLEANUP_WAIT_MS: u64 = 5000;
28 
29 /// A struct that keeps lists of resources that are no longer needed by the user.
30 #[derive(Debug, Default)]
31 pub(super) struct SuspectedResources {
32     pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
33     pub(crate) textures: Vec<id::Valid<id::TextureId>>,
34     pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
35     pub(crate) samplers: Vec<id::Valid<id::SamplerId>>,
36     pub(crate) bind_groups: Vec<id::Valid<id::BindGroupId>>,
37     pub(crate) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
38     pub(crate) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
39     pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
40     pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
41     pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
42     pub(crate) query_sets: Vec<id::Valid<id::QuerySetId>>,
43 }
44 
45 impl SuspectedResources {
clear(&mut self)46     pub(super) fn clear(&mut self) {
47         self.buffers.clear();
48         self.textures.clear();
49         self.texture_views.clear();
50         self.samplers.clear();
51         self.bind_groups.clear();
52         self.compute_pipelines.clear();
53         self.render_pipelines.clear();
54         self.bind_group_layouts.clear();
55         self.pipeline_layouts.clear();
56         self.render_bundles.clear();
57         self.query_sets.clear();
58     }
59 
extend(&mut self, other: &Self)60     pub(super) fn extend(&mut self, other: &Self) {
61         self.buffers.extend_from_slice(&other.buffers);
62         self.textures.extend_from_slice(&other.textures);
63         self.texture_views.extend_from_slice(&other.texture_views);
64         self.samplers.extend_from_slice(&other.samplers);
65         self.bind_groups.extend_from_slice(&other.bind_groups);
66         self.compute_pipelines
67             .extend_from_slice(&other.compute_pipelines);
68         self.render_pipelines
69             .extend_from_slice(&other.render_pipelines);
70         self.bind_group_layouts
71             .extend_from_slice(&other.bind_group_layouts);
72         self.pipeline_layouts
73             .extend_from_slice(&other.pipeline_layouts);
74         self.render_bundles.extend_from_slice(&other.render_bundles);
75         self.query_sets.extend_from_slice(&other.query_sets);
76     }
77 
add_trackers(&mut self, trackers: &TrackerSet)78     pub(super) fn add_trackers(&mut self, trackers: &TrackerSet) {
79         self.buffers.extend(trackers.buffers.used());
80         self.textures.extend(trackers.textures.used());
81         self.texture_views.extend(trackers.views.used());
82         self.samplers.extend(trackers.samplers.used());
83         self.bind_groups.extend(trackers.bind_groups.used());
84         self.compute_pipelines.extend(trackers.compute_pipes.used());
85         self.render_pipelines.extend(trackers.render_pipes.used());
86         self.render_bundles.extend(trackers.bundles.used());
87         self.query_sets.extend(trackers.query_sets.used());
88     }
89 }
90 
91 /// A struct that keeps lists of resources that are no longer needed.
92 #[derive(Debug)]
93 struct NonReferencedResources<B: hal::Backend> {
94     buffers: Vec<(B::Buffer, alloc::MemoryBlock<B>)>,
95     images: Vec<(B::Image, alloc::MemoryBlock<B>)>,
96     // Note: we keep the associated ID here in order to be able to check
97     // at any point what resources are used in a submission.
98     image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
99     samplers: Vec<B::Sampler>,
100     framebuffers: Vec<B::Framebuffer>,
101     desc_sets: Vec<DescriptorSet<B>>,
102     compute_pipes: Vec<B::ComputePipeline>,
103     graphics_pipes: Vec<B::GraphicsPipeline>,
104     descriptor_set_layouts: Vec<B::DescriptorSetLayout>,
105     pipeline_layouts: Vec<B::PipelineLayout>,
106     query_sets: Vec<B::QueryPool>,
107 }
108 
109 impl<B: hal::Backend> NonReferencedResources<B> {
new() -> Self110     fn new() -> Self {
111         Self {
112             buffers: Vec::new(),
113             images: Vec::new(),
114             image_views: Vec::new(),
115             samplers: Vec::new(),
116             framebuffers: Vec::new(),
117             desc_sets: Vec::new(),
118             compute_pipes: Vec::new(),
119             graphics_pipes: Vec::new(),
120             descriptor_set_layouts: Vec::new(),
121             pipeline_layouts: Vec::new(),
122             query_sets: Vec::new(),
123         }
124     }
125 
extend(&mut self, other: Self)126     fn extend(&mut self, other: Self) {
127         self.buffers.extend(other.buffers);
128         self.images.extend(other.images);
129         self.image_views.extend(other.image_views);
130         self.samplers.extend(other.samplers);
131         self.framebuffers.extend(other.framebuffers);
132         self.desc_sets.extend(other.desc_sets);
133         self.compute_pipes.extend(other.compute_pipes);
134         self.graphics_pipes.extend(other.graphics_pipes);
135         self.query_sets.extend(other.query_sets);
136         assert!(other.descriptor_set_layouts.is_empty());
137         assert!(other.pipeline_layouts.is_empty());
138     }
139 
clean( &mut self, device: &B::Device, memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>, descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>, )140     unsafe fn clean(
141         &mut self,
142         device: &B::Device,
143         memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
144         descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
145     ) {
146         if !self.buffers.is_empty() || !self.images.is_empty() {
147             let mut allocator = memory_allocator_mutex.lock();
148             for (raw, memory) in self.buffers.drain(..) {
149                 log::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
150                 device.destroy_buffer(raw);
151                 allocator.free(device, memory);
152             }
153             for (raw, memory) in self.images.drain(..) {
154                 log::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
155                 device.destroy_image(raw);
156                 allocator.free(device, memory);
157             }
158         }
159 
160         for (_, raw) in self.image_views.drain(..) {
161             device.destroy_image_view(raw);
162         }
163         for raw in self.samplers.drain(..) {
164             device.destroy_sampler(raw);
165         }
166         for raw in self.framebuffers.drain(..) {
167             device.destroy_framebuffer(raw);
168         }
169 
170         if !self.desc_sets.is_empty() {
171             descriptor_allocator_mutex
172                 .lock()
173                 .free(device, self.desc_sets.drain(..));
174         }
175 
176         for raw in self.compute_pipes.drain(..) {
177             device.destroy_compute_pipeline(raw);
178         }
179         for raw in self.graphics_pipes.drain(..) {
180             device.destroy_graphics_pipeline(raw);
181         }
182         for raw in self.descriptor_set_layouts.drain(..) {
183             device.destroy_descriptor_set_layout(raw);
184         }
185         for raw in self.pipeline_layouts.drain(..) {
186             device.destroy_pipeline_layout(raw);
187         }
188         for raw in self.query_sets.drain(..) {
189             device.destroy_query_pool(raw);
190         }
191     }
192 }
193 
194 #[derive(Debug)]
195 struct ActiveSubmission<B: hal::Backend> {
196     index: SubmissionIndex,
197     fence: B::Fence,
198     last_resources: NonReferencedResources<B>,
199     mapped: Vec<id::Valid<id::BufferId>>,
200 }
201 
202 #[derive(Clone, Debug, Error)]
203 pub enum WaitIdleError {
204     #[error(transparent)]
205     Device(#[from] DeviceError),
206     #[error("GPU got stuck :(")]
207     StuckGpu,
208 }
209 
210 /// A struct responsible for tracking resource lifetimes.
211 ///
212 /// Here is how host mapping is handled:
213 ///   1. When mapping is requested we add the buffer to the life_tracker list of `mapped` buffers.
214 ///   2. When `triage_suspected` is called, it checks the last submission index associated with each of the mapped buffer,
215 /// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
216 ///   3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
217 ///   4. Finally, `handle_mapping` issues all the callbacks.
218 #[derive(Debug)]
219 pub(super) struct LifetimeTracker<B: hal::Backend> {
220     /// Resources that the user has requested be mapped, but are still in use.
221     mapped: Vec<Stored<id::BufferId>>,
222     /// Buffers can be used in a submission that is yet to be made, by the
223     /// means of `write_buffer()`, so we have a special place for them.
224     pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
225     /// Textures can be used in the upcoming submission by `write_texture`.
226     pub future_suspected_textures: Vec<Stored<id::TextureId>>,
227     /// Resources that are suspected for destruction.
228     pub suspected_resources: SuspectedResources,
229     /// Resources that are not referenced any more but still used by GPU.
230     /// Grouped by submissions associated with a fence and a submission index.
231     /// The active submissions have to be stored in FIFO order: oldest come first.
232     active: Vec<ActiveSubmission<B>>,
233     /// Resources that are neither referenced or used, just life_tracker
234     /// actual deletion.
235     free_resources: NonReferencedResources<B>,
236     ready_to_map: Vec<id::Valid<id::BufferId>>,
237 }
238 
239 impl<B: hal::Backend> LifetimeTracker<B> {
new() -> Self240     pub fn new() -> Self {
241         Self {
242             mapped: Vec::new(),
243             future_suspected_buffers: Vec::new(),
244             future_suspected_textures: Vec::new(),
245             suspected_resources: SuspectedResources::default(),
246             active: Vec::new(),
247             free_resources: NonReferencedResources::new(),
248             ready_to_map: Vec::new(),
249         }
250     }
251 
track_submission( &mut self, index: SubmissionIndex, fence: B::Fence, new_suspects: &SuspectedResources, temp_resources: impl Iterator<Item = (TempResource<B>, alloc::MemoryBlock<B>)>, )252     pub fn track_submission(
253         &mut self,
254         index: SubmissionIndex,
255         fence: B::Fence,
256         new_suspects: &SuspectedResources,
257         temp_resources: impl Iterator<Item = (TempResource<B>, alloc::MemoryBlock<B>)>,
258     ) {
259         let mut last_resources = NonReferencedResources::new();
260         for (res, memory) in temp_resources {
261             match res {
262                 TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
263                 TempResource::Image(raw) => last_resources.images.push((raw, memory)),
264             }
265         }
266 
267         self.suspected_resources.buffers.extend(
268             self.future_suspected_buffers
269                 .drain(..)
270                 .map(|stored| stored.value),
271         );
272         self.suspected_resources.textures.extend(
273             self.future_suspected_textures
274                 .drain(..)
275                 .map(|stored| stored.value),
276         );
277         self.suspected_resources.extend(new_suspects);
278 
279         self.active.alloc().init(ActiveSubmission {
280             index,
281             fence,
282             last_resources,
283             mapped: Vec::new(),
284         });
285     }
286 
map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount)287     pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
288         self.mapped.push(Stored { value, ref_count });
289     }
290 
wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError>291     fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
292         if !self.active.is_empty() {
293             log::debug!("Waiting for IDLE...");
294             let status = unsafe {
295                 device
296                     .wait_for_fences(
297                         self.active.iter().map(|a| &a.fence),
298                         hal::device::WaitFor::All,
299                         CLEANUP_WAIT_MS * 1_000_000,
300                     )
301                     .map_err(DeviceError::from)?
302             };
303             log::debug!("...Done");
304 
305             if !status {
306                 // We timed out while waiting for the fences
307                 return Err(WaitIdleError::StuckGpu);
308             }
309         }
310         Ok(())
311     }
312 
313     /// Returns the last submission index that is done.
triage_submissions( &mut self, device: &B::Device, force_wait: bool, ) -> Result<SubmissionIndex, WaitIdleError>314     pub fn triage_submissions(
315         &mut self,
316         device: &B::Device,
317         force_wait: bool,
318     ) -> Result<SubmissionIndex, WaitIdleError> {
319         profiling::scope!("triage_submissions");
320         if force_wait {
321             self.wait_idle(device)?;
322         }
323         //TODO: enable when `is_sorted_by_key` is stable
324         //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
325         let done_count = self
326             .active
327             .iter()
328             .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
329             .unwrap_or_else(|| self.active.len());
330         let last_done = match done_count.checked_sub(1) {
331             Some(i) => self.active[i].index,
332             None => return Ok(0),
333         };
334 
335         for a in self.active.drain(..done_count) {
336             log::trace!("Active submission {} is done", a.index);
337             self.free_resources.extend(a.last_resources);
338             self.ready_to_map.extend(a.mapped);
339             unsafe {
340                 device.destroy_fence(a.fence);
341             }
342         }
343 
344         Ok(last_done)
345     }
346 
cleanup( &mut self, device: &B::Device, memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>, descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>, )347     pub fn cleanup(
348         &mut self,
349         device: &B::Device,
350         memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<B>>,
351         descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
352     ) {
353         profiling::scope!("cleanup");
354         unsafe {
355             self.free_resources
356                 .clean(device, memory_allocator_mutex, descriptor_allocator_mutex);
357             descriptor_allocator_mutex.lock().cleanup(device);
358         }
359     }
360 
schedule_resource_destruction( &mut self, temp_resource: TempResource<B>, memory: alloc::MemoryBlock<B>, last_submit_index: SubmissionIndex, )361     pub fn schedule_resource_destruction(
362         &mut self,
363         temp_resource: TempResource<B>,
364         memory: alloc::MemoryBlock<B>,
365         last_submit_index: SubmissionIndex,
366     ) {
367         let resources = self
368             .active
369             .iter_mut()
370             .find(|a| a.index == last_submit_index)
371             .map_or(&mut self.free_resources, |a| &mut a.last_resources);
372         match temp_resource {
373             TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
374             TempResource::Image(raw) => resources.images.push((raw, memory)),
375         }
376     }
377 }
378 
379 impl<B: GfxBackend> LifetimeTracker<B> {
triage_suspected<G: GlobalIdentityHandlerFactory>( &mut self, hub: &Hub<B, G>, trackers: &Mutex<TrackerSet>, #[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>, token: &mut Token<super::Device<B>>, )380     pub(super) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
381         &mut self,
382         hub: &Hub<B, G>,
383         trackers: &Mutex<TrackerSet>,
384         #[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
385         token: &mut Token<super::Device<B>>,
386     ) {
387         profiling::scope!("triage_suspected");
388 
389         if !self.suspected_resources.render_bundles.is_empty() {
390             let (mut guard, _) = hub.render_bundles.write(token);
391             let mut trackers = trackers.lock();
392 
393             while let Some(id) = self.suspected_resources.render_bundles.pop() {
394                 if trackers.bundles.remove_abandoned(id) {
395                     #[cfg(feature = "trace")]
396                     if let Some(t) = trace {
397                         t.lock().add(trace::Action::DestroyRenderBundle(id.0));
398                     }
399 
400                     if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
401                         self.suspected_resources.add_trackers(&res.used);
402                     }
403                 }
404             }
405         }
406 
407         if !self.suspected_resources.bind_groups.is_empty() {
408             let (mut guard, _) = hub.bind_groups.write(token);
409             let mut trackers = trackers.lock();
410 
411             while let Some(id) = self.suspected_resources.bind_groups.pop() {
412                 if trackers.bind_groups.remove_abandoned(id) {
413                     #[cfg(feature = "trace")]
414                     if let Some(t) = trace {
415                         t.lock().add(trace::Action::DestroyBindGroup(id.0));
416                     }
417 
418                     if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
419                         self.suspected_resources.add_trackers(&res.used);
420 
421                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
422                         self.active
423                             .iter_mut()
424                             .find(|a| a.index == submit_index)
425                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
426                             .desc_sets
427                             .push(res.raw);
428                     }
429                 }
430             }
431         }
432 
433         if !self.suspected_resources.texture_views.is_empty() {
434             let (mut guard, _) = hub.texture_views.write(token);
435             let mut trackers = trackers.lock();
436 
437             for id in self.suspected_resources.texture_views.drain(..) {
438                 if trackers.views.remove_abandoned(id) {
439                     #[cfg(feature = "trace")]
440                     if let Some(t) = trace {
441                         t.lock().add(trace::Action::DestroyTextureView(id.0));
442                     }
443 
444                     if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
445                         let raw = match res.inner {
446                             resource::TextureViewInner::Native { raw, source_id } => {
447                                 self.suspected_resources.textures.push(source_id.value);
448                                 raw
449                             }
450                             resource::TextureViewInner::SwapChain { .. } => unreachable!(),
451                         };
452 
453                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
454                         self.active
455                             .iter_mut()
456                             .find(|a| a.index == submit_index)
457                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
458                             .image_views
459                             .push((id, raw));
460                     }
461                 }
462             }
463         }
464 
465         if !self.suspected_resources.textures.is_empty() {
466             let (mut guard, _) = hub.textures.write(token);
467             let mut trackers = trackers.lock();
468 
469             for id in self.suspected_resources.textures.drain(..) {
470                 if trackers.textures.remove_abandoned(id) {
471                     #[cfg(feature = "trace")]
472                     if let Some(t) = trace {
473                         t.lock().add(trace::Action::DestroyTexture(id.0));
474                     }
475 
476                     if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
477                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
478                         self.active
479                             .iter_mut()
480                             .find(|a| a.index == submit_index)
481                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
482                             .images
483                             .extend(res.raw);
484                     }
485                 }
486             }
487         }
488 
489         if !self.suspected_resources.samplers.is_empty() {
490             let (mut guard, _) = hub.samplers.write(token);
491             let mut trackers = trackers.lock();
492 
493             for id in self.suspected_resources.samplers.drain(..) {
494                 if trackers.samplers.remove_abandoned(id) {
495                     #[cfg(feature = "trace")]
496                     if let Some(t) = trace {
497                         t.lock().add(trace::Action::DestroySampler(id.0));
498                     }
499 
500                     if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
501                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
502                         self.active
503                             .iter_mut()
504                             .find(|a| a.index == submit_index)
505                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
506                             .samplers
507                             .push(res.raw);
508                     }
509                 }
510             }
511         }
512 
513         if !self.suspected_resources.buffers.is_empty() {
514             let (mut guard, _) = hub.buffers.write(token);
515             let mut trackers = trackers.lock();
516 
517             for id in self.suspected_resources.buffers.drain(..) {
518                 if trackers.buffers.remove_abandoned(id) {
519                     #[cfg(feature = "trace")]
520                     if let Some(t) = trace {
521                         t.lock().add(trace::Action::DestroyBuffer(id.0));
522                     }
523                     log::debug!("Buffer {:?} is detached", id);
524 
525                     if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
526                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
527                         if let resource::BufferMapState::Init {
528                             stage_buffer,
529                             stage_memory,
530                             ..
531                         } = res.map_state
532                         {
533                             self.free_resources
534                                 .buffers
535                                 .push((stage_buffer, stage_memory));
536                         }
537                         self.active
538                             .iter_mut()
539                             .find(|a| a.index == submit_index)
540                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
541                             .buffers
542                             .extend(res.raw);
543                     }
544                 }
545             }
546         }
547 
548         if !self.suspected_resources.compute_pipelines.is_empty() {
549             let (mut guard, _) = hub.compute_pipelines.write(token);
550             let mut trackers = trackers.lock();
551 
552             for id in self.suspected_resources.compute_pipelines.drain(..) {
553                 if trackers.compute_pipes.remove_abandoned(id) {
554                     #[cfg(feature = "trace")]
555                     if let Some(t) = trace {
556                         t.lock().add(trace::Action::DestroyComputePipeline(id.0));
557                     }
558 
559                     if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
560                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
561                         self.active
562                             .iter_mut()
563                             .find(|a| a.index == submit_index)
564                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
565                             .compute_pipes
566                             .push(res.raw);
567                     }
568                 }
569             }
570         }
571 
572         if !self.suspected_resources.render_pipelines.is_empty() {
573             let (mut guard, _) = hub.render_pipelines.write(token);
574             let mut trackers = trackers.lock();
575 
576             for id in self.suspected_resources.render_pipelines.drain(..) {
577                 if trackers.render_pipes.remove_abandoned(id) {
578                     #[cfg(feature = "trace")]
579                     if let Some(t) = trace {
580                         t.lock().add(trace::Action::DestroyRenderPipeline(id.0));
581                     }
582 
583                     if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
584                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
585                         self.active
586                             .iter_mut()
587                             .find(|a| a.index == submit_index)
588                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
589                             .graphics_pipes
590                             .push(res.raw);
591                     }
592                 }
593             }
594         }
595 
596         if !self.suspected_resources.pipeline_layouts.is_empty() {
597             let (mut guard, _) = hub.pipeline_layouts.write(token);
598 
599             for Stored {
600                 value: id,
601                 ref_count,
602             } in self.suspected_resources.pipeline_layouts.drain(..)
603             {
604                 //Note: this has to happen after all the suspected pipelines are destroyed
605                 if ref_count.load() == 1 {
606                     #[cfg(feature = "trace")]
607                     if let Some(t) = trace {
608                         t.lock().add(trace::Action::DestroyPipelineLayout(id.0));
609                     }
610 
611                     if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
612                         self.suspected_resources
613                             .bind_group_layouts
614                             .extend_from_slice(&lay.bind_group_layout_ids);
615                         self.free_resources.pipeline_layouts.push(lay.raw);
616                     }
617                 }
618             }
619         }
620 
621         if !self.suspected_resources.bind_group_layouts.is_empty() {
622             let (mut guard, _) = hub.bind_group_layouts.write(token);
623 
624             for id in self.suspected_resources.bind_group_layouts.drain(..) {
625                 //Note: this has to happen after all the suspected pipelines are destroyed
626                 //Note: nothing else can bump the refcount since the guard is locked exclusively
627                 //Note: same BGL can appear multiple times in the list, but only the last
628                 // encounter could drop the refcount to 0.
629                 if guard[id].multi_ref_count.dec_and_check_empty() {
630                     #[cfg(feature = "trace")]
631                     if let Some(t) = trace {
632                         t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
633                     }
634                     if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
635                         self.free_resources.descriptor_set_layouts.push(lay.raw);
636                     }
637                 }
638             }
639         }
640 
641         if !self.suspected_resources.query_sets.is_empty() {
642             let (mut guard, _) = hub.query_sets.write(token);
643             let mut trackers = trackers.lock();
644 
645             for id in self.suspected_resources.query_sets.drain(..) {
646                 if trackers.query_sets.remove_abandoned(id) {
647                     // #[cfg(feature = "trace")]
648                     // trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
649                     if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) {
650                         let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
651                         self.active
652                             .iter_mut()
653                             .find(|a| a.index == submit_index)
654                             .map_or(&mut self.free_resources, |a| &mut a.last_resources)
655                             .query_sets
656                             .push(res.raw);
657                     }
658                 }
659             }
660         }
661     }
662 
triage_mapped<G: GlobalIdentityHandlerFactory>( &mut self, hub: &Hub<B, G>, token: &mut Token<super::Device<B>>, )663     pub(super) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
664         &mut self,
665         hub: &Hub<B, G>,
666         token: &mut Token<super::Device<B>>,
667     ) {
668         if self.mapped.is_empty() {
669             return;
670         }
671         let (buffer_guard, _) = hub.buffers.read(token);
672 
673         for stored in self.mapped.drain(..) {
674             let resource_id = stored.value;
675             let buf = &buffer_guard[resource_id];
676 
677             let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
678             log::trace!(
679                 "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
680                 resource_id,
681                 submit_index,
682                 self.active.iter().position(|a| a.index == submit_index)
683             );
684 
685             self.active
686                 .iter_mut()
687                 .find(|a| a.index == submit_index)
688                 .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
689                 .push(resource_id);
690         }
691     }
692 
handle_mapping<G: GlobalIdentityHandlerFactory>( &mut self, hub: &Hub<B, G>, raw: &B::Device, trackers: &Mutex<TrackerSet>, token: &mut Token<super::Device<B>>, ) -> Vec<super::BufferMapPendingCallback>693     pub(super) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
694         &mut self,
695         hub: &Hub<B, G>,
696         raw: &B::Device,
697         trackers: &Mutex<TrackerSet>,
698         token: &mut Token<super::Device<B>>,
699     ) -> Vec<super::BufferMapPendingCallback> {
700         if self.ready_to_map.is_empty() {
701             return Vec::new();
702         }
703         let (mut buffer_guard, _) = hub.buffers.write(token);
704         let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
705             Vec::with_capacity(self.ready_to_map.len());
706         let mut trackers = trackers.lock();
707         for buffer_id in self.ready_to_map.drain(..) {
708             let buffer = &mut buffer_guard[buffer_id];
709             if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
710             {
711                 buffer.map_state = resource::BufferMapState::Idle;
712                 log::debug!("Mapping request is dropped because the buffer is destroyed.");
713                 if let Some(buf) = hub
714                     .buffers
715                     .unregister_locked(buffer_id.0, &mut *buffer_guard)
716                 {
717                     self.free_resources.buffers.extend(buf.raw);
718                 }
719             } else {
720                 let mapping = match std::mem::replace(
721                     &mut buffer.map_state,
722                     resource::BufferMapState::Idle,
723                 ) {
724                     resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
725                     // Mapping cancelled
726                     resource::BufferMapState::Idle => continue,
727                     // Mapping queued at least twice by map -> unmap -> map
728                     // and was already successfully mapped below
729                     active @ resource::BufferMapState::Active { .. } => {
730                         buffer.map_state = active;
731                         continue;
732                     }
733                     _ => panic!("No pending mapping."),
734                 };
735                 let status = if mapping.range.start != mapping.range.end {
736                     log::debug!("Buffer {:?} map state -> Active", buffer_id);
737                     let host = mapping.op.host;
738                     let size = mapping.range.end - mapping.range.start;
739                     match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
740                         Ok(ptr) => {
741                             buffer.map_state = resource::BufferMapState::Active {
742                                 ptr,
743                                 sub_range: hal::buffer::SubRange {
744                                     offset: mapping.range.start,
745                                     size: Some(size),
746                                 },
747                                 host,
748                             };
749                             resource::BufferMapAsyncStatus::Success
750                         }
751                         Err(e) => {
752                             log::error!("Mapping failed {:?}", e);
753                             resource::BufferMapAsyncStatus::Error
754                         }
755                     }
756                 } else {
757                     resource::BufferMapAsyncStatus::Success
758                 };
759                 pending_callbacks.push((mapping.op, status));
760             }
761         }
762         pending_callbacks
763     }
764 }
765