1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 #[cfg(feature = "local")]
6 use crate::instance::Limits;
7 use crate::{
8 binding_model,
9 command,
10 conv,
11 hub::{GfxBackend, Global, Token},
12 id::{Input, Output},
13 pipeline,
14 resource,
15 swap_chain,
16 track::{Stitch, TrackerSet},
17 AdapterId,
18 BindGroupId,
19 BindGroupLayoutId,
20 BufferAddress,
21 BufferId,
22 BufferMapAsyncStatus,
23 BufferMapOperation,
24 CommandBufferId,
25 CommandEncoderId,
26 ComputePipelineId,
27 DeviceId,
28 FastHashMap,
29 Features,
30 LifeGuard,
31 PipelineLayoutId,
32 QueueId,
33 RefCount,
34 RenderPipelineId,
35 SamplerId,
36 ShaderModuleId,
37 Stored,
38 SubmissionIndex,
39 SurfaceId,
40 SwapChainId,
41 TextureDimension,
42 TextureFormat,
43 TextureId,
44 TextureUsage,
45 TextureViewId,
46 };
47 #[cfg(feature = "local")]
48 use crate::{gfx_select, hub::GLOBAL};
49
50 use arrayvec::ArrayVec;
51 use copyless::VecHelper as _;
52 use hal::{
53 self,
54 command::CommandBuffer as _,
55 device::Device as _,
56 queue::CommandQueue as _,
57 window::{PresentationSurface as _, Surface as _},
58 };
59 use parking_lot::Mutex;
60 use rendy_descriptor::{DescriptorAllocator, DescriptorRanges, DescriptorSet};
61 use rendy_memory::{Block, Heaps, MemoryBlock};
62
63 #[cfg(feature = "local")]
64 use std::marker::PhantomData;
65 use std::{
66 collections::hash_map::Entry,
67 ffi,
68 iter,
69 ops::Range,
70 ptr,
71 slice,
72 sync::atomic::Ordering,
73 };
74
75
76 const CLEANUP_WAIT_MS: u64 = 5000;
77 pub const MAX_COLOR_TARGETS: usize = 4;
78 pub const MAX_MIP_LEVELS: usize = 16;
79 pub const MAX_VERTEX_BUFFERS: usize = 8;
80
81 /// Bound uniform/storage buffer offsets must be aligned to this number.
82 pub const BIND_BUFFER_ALIGNMENT: hal::buffer::Offset = 256;
83
all_buffer_stages() -> hal::pso::PipelineStage84 pub fn all_buffer_stages() -> hal::pso::PipelineStage {
85 use hal::pso::PipelineStage as Ps;
86 Ps::DRAW_INDIRECT
87 | Ps::VERTEX_INPUT
88 | Ps::VERTEX_SHADER
89 | Ps::FRAGMENT_SHADER
90 | Ps::COMPUTE_SHADER
91 | Ps::TRANSFER
92 | Ps::HOST
93 }
all_image_stages() -> hal::pso::PipelineStage94 pub fn all_image_stages() -> hal::pso::PipelineStage {
95 use hal::pso::PipelineStage as Ps;
96 Ps::EARLY_FRAGMENT_TESTS
97 | Ps::LATE_FRAGMENT_TESTS
98 | Ps::COLOR_ATTACHMENT_OUTPUT
99 | Ps::VERTEX_SHADER
100 | Ps::FRAGMENT_SHADER
101 | Ps::COMPUTE_SHADER
102 | Ps::TRANSFER
103 }
104
105 #[derive(Clone, Copy, Debug, PartialEq)]
106 enum HostMap {
107 Read,
108 Write,
109 }
110
111 #[derive(Clone, Debug, Hash, PartialEq)]
112 pub(crate) struct AttachmentData<T> {
113 pub colors: ArrayVec<[T; MAX_COLOR_TARGETS]>,
114 pub resolves: ArrayVec<[T; MAX_COLOR_TARGETS]>,
115 pub depth_stencil: Option<T>,
116 }
117 impl<T: PartialEq> Eq for AttachmentData<T> {}
118 impl<T> AttachmentData<T> {
all(&self) -> impl Iterator<Item = &T>119 pub(crate) fn all(&self) -> impl Iterator<Item = &T> {
120 self.colors
121 .iter()
122 .chain(&self.resolves)
123 .chain(&self.depth_stencil)
124 }
125 }
126
127 impl RenderPassContext {
128 // Assumed the renderpass only contains one subpass
compatible(&self, other: &RenderPassContext) -> bool129 pub(crate) fn compatible(&self, other: &RenderPassContext) -> bool {
130 self.colors == other.colors && self.depth_stencil == other.depth_stencil
131 }
132 }
133
134 pub(crate) type RenderPassKey = AttachmentData<hal::pass::Attachment>;
135 pub(crate) type FramebufferKey = AttachmentData<TextureViewId>;
136 pub(crate) type RenderPassContext = AttachmentData<resource::TextureFormat>;
137
138 #[derive(Debug, PartialEq)]
139 enum ResourceId {
140 Buffer(BufferId),
141 Texture(TextureId),
142 TextureView(TextureViewId),
143 BindGroup(BindGroupId),
144 Sampler(SamplerId),
145 }
146
147 #[derive(Debug)]
148 enum NativeResource<B: hal::Backend> {
149 Buffer(B::Buffer, MemoryBlock<B>),
150 Image(B::Image, MemoryBlock<B>),
151 ImageView(B::ImageView),
152 Framebuffer(B::Framebuffer),
153 DescriptorSet(DescriptorSet<B>),
154 Sampler(B::Sampler),
155 }
156
157 #[derive(Debug)]
158 struct ActiveSubmission<B: hal::Backend> {
159 index: SubmissionIndex,
160 fence: B::Fence,
161 // Note: we keep the associated ID here in order to be able to check
162 // at any point what resources are used in a submission.
163 resources: Vec<(Option<ResourceId>, NativeResource<B>)>,
164 mapped: Vec<BufferId>,
165 }
166
167 /// A struct responsible for tracking resource lifetimes.
168 ///
169 /// Here is how host mapping is handled:
170 /// 1. When mapping is requested we add the buffer to the pending list of `mapped` buffers.
171 /// 2. When `triage_referenced` is called, it checks the last submission index associated with each of the mapped buffer,
172 /// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
173 /// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
174 /// 4. Finally, `handle_mapping` issues all the callbacks.
175
176 #[derive(Debug)]
177 struct PendingResources<B: hal::Backend> {
178 /// Resources that the user has requested be mapped, but are still in use.
179 mapped: Vec<Stored<BufferId>>,
180 /// Resources that are destroyed by the user but still referenced by
181 /// other objects or command buffers.
182 referenced: Vec<(ResourceId, RefCount)>,
183 /// Resources that are not referenced any more but still used by GPU.
184 /// Grouped by submissions associated with a fence and a submission index.
185 /// The active submissions have to be stored in FIFO order: oldest come first.
186 active: Vec<ActiveSubmission<B>>,
187 /// Resources that are neither referenced or used, just pending
188 /// actual deletion.
189 free: Vec<NativeResource<B>>,
190 ready_to_map: Vec<BufferId>,
191 }
192
193 impl<B: GfxBackend> PendingResources<B> {
destroy(&mut self, resource_id: ResourceId, ref_count: RefCount)194 fn destroy(&mut self, resource_id: ResourceId, ref_count: RefCount) {
195 debug_assert!(!self.referenced.iter().any(|r| r.0 == resource_id));
196 self.referenced.push((resource_id, ref_count));
197 }
198
map(&mut self, buffer: BufferId, ref_count: RefCount)199 fn map(&mut self, buffer: BufferId, ref_count: RefCount) {
200 self.mapped.push(Stored {
201 value: buffer,
202 ref_count,
203 });
204 }
205
206 /// Returns the last submission index that is done.
cleanup( &mut self, device: &B::Device, heaps_mutex: &Mutex<Heaps<B>>, descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>, force_wait: bool, ) -> SubmissionIndex207 fn cleanup(
208 &mut self,
209 device: &B::Device,
210 heaps_mutex: &Mutex<Heaps<B>>,
211 descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
212 force_wait: bool,
213 ) -> SubmissionIndex {
214 if force_wait && !self.active.is_empty() {
215 let status = unsafe {
216 device.wait_for_fences(
217 self.active.iter().map(|a| &a.fence),
218 hal::device::WaitFor::All,
219 CLEANUP_WAIT_MS * 1_000_000,
220 )
221 };
222 assert_eq!(status, Ok(true), "GPU got stuck :(");
223 }
224
225 //TODO: enable when `is_sorted_by_key` is stable
226 //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
227 let done_count = self
228 .active
229 .iter()
230 .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap() })
231 .unwrap_or(self.active.len());
232 let last_done = if done_count != 0 {
233 self.active[done_count - 1].index
234 } else {
235 return 0;
236 };
237
238 for a in self.active.drain(.. done_count) {
239 log::trace!("Active submission {} is done", a.index);
240 self.free.extend(a.resources.into_iter().map(|(_, r)| r));
241 self.ready_to_map.extend(a.mapped);
242 unsafe {
243 device.destroy_fence(a.fence);
244 }
245 }
246
247 let mut heaps = heaps_mutex.lock();
248 let mut descriptor_allocator = descriptor_allocator_mutex.lock();
249 for resource in self.free.drain(..) {
250 match resource {
251 NativeResource::Buffer(raw, memory) => unsafe {
252 device.destroy_buffer(raw);
253 heaps.free(device, memory);
254 },
255 NativeResource::Image(raw, memory) => unsafe {
256 device.destroy_image(raw);
257 heaps.free(device, memory);
258 },
259 NativeResource::ImageView(raw) => unsafe {
260 device.destroy_image_view(raw);
261 },
262 NativeResource::Framebuffer(raw) => unsafe {
263 device.destroy_framebuffer(raw);
264 },
265 NativeResource::DescriptorSet(raw) => unsafe {
266 descriptor_allocator.free(iter::once(raw));
267 },
268 NativeResource::Sampler(raw) => unsafe {
269 device.destroy_sampler(raw);
270 },
271 }
272 }
273
274 last_done
275 }
276
triage_referenced( &mut self, global: &Global, trackers: &mut TrackerSet, mut token: &mut Token<Device<B>>, )277 fn triage_referenced(
278 &mut self,
279 global: &Global,
280 trackers: &mut TrackerSet,
281 mut token: &mut Token<Device<B>>,
282 ) {
283 // Before destruction, a resource is expected to have the following strong refs:
284 // - in resource itself
285 // - in the device tracker
286 // - in this list
287 const MIN_REFS: usize = 4;
288
289 if self.referenced.iter().all(|r| r.1.load() >= MIN_REFS) {
290 return;
291 }
292
293 let hub = B::hub(global);
294 //TODO: lock less, if possible
295 let (mut bind_group_guard, mut token) = hub.bind_groups.write(&mut token);
296 let (mut buffer_guard, mut token) = hub.buffers.write(&mut token);
297 let (mut texture_guard, mut token) = hub.textures.write(&mut token);
298 let (mut teview_view_guard, mut token) = hub.texture_views.write(&mut token);
299 let (mut sampler_guard, _) = hub.samplers.write(&mut token);
300
301 for i in (0 .. self.referenced.len()).rev() {
302 let num_refs = self.referenced[i].1.load();
303 if num_refs <= 3 {
304 let resource_id = self.referenced.swap_remove(i).0;
305 assert_eq!(
306 num_refs, 3,
307 "Resource {:?} misses some references",
308 resource_id
309 );
310 let (life_guard, resource) = match resource_id {
311 ResourceId::Buffer(id) => {
312 if buffer_guard[id].pending_map_operation.is_some() {
313 continue;
314 }
315 trackers.buffers.remove(id);
316 let buf = buffer_guard.remove(id).unwrap();
317 #[cfg(feature = "local")]
318 hub.buffers.identity.lock().free(id);
319 (buf.life_guard, NativeResource::Buffer(buf.raw, buf.memory))
320 }
321 ResourceId::Texture(id) => {
322 trackers.textures.remove(id);
323 let tex = texture_guard.remove(id).unwrap();
324 #[cfg(feature = "local")]
325 hub.textures.identity.lock().free(id);
326 (tex.life_guard, NativeResource::Image(tex.raw, tex.memory))
327 }
328 ResourceId::TextureView(id) => {
329 trackers.views.remove(id);
330 let view = teview_view_guard.remove(id).unwrap();
331 let raw = match view.inner {
332 resource::TextureViewInner::Native { raw, .. } => raw,
333 resource::TextureViewInner::SwapChain { .. } => unreachable!(),
334 };
335 #[cfg(feature = "local")]
336 hub.texture_views.identity.lock().free(id);
337 (view.life_guard, NativeResource::ImageView(raw))
338 }
339 ResourceId::BindGroup(id) => {
340 trackers.bind_groups.remove(id);
341 let bind_group = bind_group_guard.remove(id).unwrap();
342 #[cfg(feature = "local")]
343 hub.bind_groups.identity.lock().free(id);
344 (
345 bind_group.life_guard,
346 NativeResource::DescriptorSet(bind_group.raw),
347 )
348 }
349 ResourceId::Sampler(id) => {
350 trackers.samplers.remove(id);
351 let sampler = sampler_guard.remove(id).unwrap();
352 #[cfg(feature = "local")]
353 hub.samplers.identity.lock().free(id);
354 (sampler.life_guard, NativeResource::Sampler(sampler.raw))
355 }
356 };
357
358 let submit_index = life_guard.submission_index.load(Ordering::Acquire);
359 match self.active.iter_mut().find(|a| a.index == submit_index) {
360 Some(a) => {
361 a.resources.alloc().init((Some(resource_id), resource));
362 }
363 None => self.free.push(resource),
364 }
365 }
366 }
367 }
368
triage_mapped(&mut self, global: &Global, token: &mut Token<Device<B>>)369 fn triage_mapped(&mut self, global: &Global, token: &mut Token<Device<B>>) {
370 if self.mapped.is_empty() {
371 return;
372 }
373 let (buffer_guard, _) = B::hub(global).buffers.read(token);
374
375 for stored in self.mapped.drain(..) {
376 let resource_id = stored.value;
377 let buf = &buffer_guard[resource_id];
378
379 let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
380 log::trace!(
381 "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
382 resource_id,
383 submit_index,
384 self.active.iter().position(|a| a.index == submit_index)
385 );
386
387 self.active
388 .iter_mut()
389 .find(|a| a.index == submit_index)
390 .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
391 .push(resource_id);
392 }
393 }
394
triage_framebuffers( &mut self, global: &Global, framebuffers: &mut FastHashMap<FramebufferKey, B::Framebuffer>, token: &mut Token<Device<B>>, )395 fn triage_framebuffers(
396 &mut self,
397 global: &Global,
398 framebuffers: &mut FastHashMap<FramebufferKey, B::Framebuffer>,
399 token: &mut Token<Device<B>>,
400 ) {
401 let (texture_view_guard, _) = B::hub(global).texture_views.read(token);
402 let remove_list = framebuffers
403 .keys()
404 .filter_map(|key| {
405 let mut last_submit: SubmissionIndex = 0;
406 for &at in key.all() {
407 if texture_view_guard.contains(at) {
408 return None;
409 }
410 // This attachment is no longer registered.
411 // Let's see if it's used by any of the active submissions.
412 let res_id = &Some(ResourceId::TextureView(at));
413 for a in &self.active {
414 if a.resources.iter().any(|&(ref id, _)| id == res_id) {
415 last_submit = last_submit.max(a.index);
416 }
417 }
418 }
419 Some((key.clone(), last_submit))
420 })
421 .collect::<FastHashMap<_, _>>();
422
423 for (ref key, submit_index) in remove_list {
424 let resource = NativeResource::Framebuffer(framebuffers.remove(key).unwrap());
425 match self.active.iter_mut().find(|a| a.index == submit_index) {
426 Some(a) => {
427 a.resources.alloc().init((None, resource));
428 }
429 None => self.free.push(resource),
430 }
431 }
432 }
433
handle_mapping( &mut self, global: &Global, raw: &B::Device, token: &mut Token<Device<B>>, ) -> Vec<BufferMapPendingCallback>434 fn handle_mapping(
435 &mut self,
436 global: &Global,
437 raw: &B::Device,
438 token: &mut Token<Device<B>>,
439 ) -> Vec<BufferMapPendingCallback> {
440 if self.ready_to_map.is_empty() {
441 return Vec::new();
442 }
443 let (mut buffer_guard, _) = B::hub(global).buffers.write(token);
444 self.ready_to_map
445 .drain(..)
446 .map(|buffer_id| {
447 let buffer = &mut buffer_guard[buffer_id];
448 let operation = buffer.pending_map_operation.take().unwrap();
449 let result = match operation {
450 BufferMapOperation::Read(ref range, ..) => {
451 map_buffer(raw, buffer, range.clone(), HostMap::Read)
452 }
453 BufferMapOperation::Write(ref range, ..) => {
454 map_buffer(raw, buffer, range.clone(), HostMap::Write)
455 }
456 };
457 (operation, result)
458 })
459 .collect()
460 }
461 }
462
463 type BufferMapResult = Result<*mut u8, hal::device::MapError>;
464 type BufferMapPendingCallback = (BufferMapOperation, BufferMapResult);
465
map_buffer<B: hal::Backend>( raw: &B::Device, buffer: &mut resource::Buffer<B>, buffer_range: Range<BufferAddress>, kind: HostMap, ) -> BufferMapResult466 fn map_buffer<B: hal::Backend>(
467 raw: &B::Device,
468 buffer: &mut resource::Buffer<B>,
469 buffer_range: Range<BufferAddress>,
470 kind: HostMap,
471 ) -> BufferMapResult {
472 let is_coherent = buffer
473 .memory
474 .properties()
475 .contains(hal::memory::Properties::COHERENT);
476 let (ptr, mapped_range) = {
477 let mapped = buffer.memory.map(raw, buffer_range)?;
478 (mapped.ptr(), mapped.range())
479 };
480
481 if !is_coherent {
482 match kind {
483 HostMap::Read => unsafe {
484 raw.invalidate_mapped_memory_ranges(iter::once((
485 buffer.memory.memory(),
486 mapped_range,
487 )))
488 .unwrap();
489 },
490 HostMap::Write => {
491 buffer.mapped_write_ranges.push(mapped_range);
492 }
493 }
494 }
495
496 Ok(ptr.as_ptr())
497 }
498
499 #[derive(Debug)]
500 pub struct Device<B: hal::Backend> {
501 pub(crate) raw: B::Device,
502 pub(crate) adapter_id: AdapterId,
503 pub(crate) queue_group: hal::queue::QueueGroup<B>,
504 pub(crate) com_allocator: command::CommandAllocator<B>,
505 mem_allocator: Mutex<Heaps<B>>,
506 desc_allocator: Mutex<DescriptorAllocator<B>>,
507 life_guard: LifeGuard,
508 pub(crate) trackers: Mutex<TrackerSet>,
509 pub(crate) render_passes: Mutex<FastHashMap<RenderPassKey, B::RenderPass>>,
510 pub(crate) framebuffers: Mutex<FastHashMap<FramebufferKey, B::Framebuffer>>,
511 pending: Mutex<PendingResources<B>>,
512 pub(crate) features: Features,
513 }
514
515 impl<B: GfxBackend> Device<B> {
new( raw: B::Device, adapter_id: AdapterId, queue_group: hal::queue::QueueGroup<B>, mem_props: hal::adapter::MemoryProperties, supports_texture_d24_s8: bool, max_bind_groups: u32, ) -> Self516 pub(crate) fn new(
517 raw: B::Device,
518 adapter_id: AdapterId,
519 queue_group: hal::queue::QueueGroup<B>,
520 mem_props: hal::adapter::MemoryProperties,
521 supports_texture_d24_s8: bool,
522 max_bind_groups: u32,
523 ) -> Self {
524 // don't start submission index at zero
525 let life_guard = LifeGuard::new();
526 life_guard.submission_index.fetch_add(1, Ordering::Relaxed);
527
528 let heaps = {
529 let types = mem_props.memory_types.iter().map(|mt| {
530 use rendy_memory::{DynamicConfig, HeapsConfig, LinearConfig};
531 let config = HeapsConfig {
532 linear: if mt.properties.contains(hal::memory::Properties::CPU_VISIBLE) {
533 Some(LinearConfig {
534 linear_size: 0x10_00_00,
535 })
536 } else {
537 None
538 },
539 dynamic: Some(DynamicConfig {
540 block_size_granularity: 0x1_00,
541 max_chunk_size: 0x1_00_00_00,
542 min_device_allocation: 0x1_00_00,
543 }),
544 };
545 (mt.properties.into(), mt.heap_index as u32, config)
546 });
547 unsafe { Heaps::new(types, mem_props.memory_heaps.iter().cloned()) }
548 };
549
550 Device {
551 raw,
552 adapter_id,
553 com_allocator: command::CommandAllocator::new(queue_group.family),
554 mem_allocator: Mutex::new(heaps),
555 desc_allocator: Mutex::new(DescriptorAllocator::new()),
556 queue_group,
557 life_guard,
558 trackers: Mutex::new(TrackerSet::new(B::VARIANT)),
559 render_passes: Mutex::new(FastHashMap::default()),
560 framebuffers: Mutex::new(FastHashMap::default()),
561 pending: Mutex::new(PendingResources {
562 mapped: Vec::new(),
563 referenced: Vec::new(),
564 active: Vec::new(),
565 free: Vec::new(),
566 ready_to_map: Vec::new(),
567 }),
568 features: Features {
569 max_bind_groups,
570 supports_texture_d24_s8,
571 },
572 }
573 }
574
maintain( &self, global: &Global, force_wait: bool, token: &mut Token<Self>, ) -> Vec<BufferMapPendingCallback>575 fn maintain(
576 &self,
577 global: &Global,
578 force_wait: bool,
579 token: &mut Token<Self>,
580 ) -> Vec<BufferMapPendingCallback> {
581 let mut pending = self.pending.lock();
582 let mut trackers = self.trackers.lock();
583
584 pending.triage_referenced(global, &mut *trackers, token);
585 pending.triage_mapped(global, token);
586 pending.triage_framebuffers(global, &mut *self.framebuffers.lock(), token);
587 let last_done = pending.cleanup(
588 &self.raw,
589 &self.mem_allocator,
590 &self.desc_allocator,
591 force_wait,
592 );
593 let callbacks = pending.handle_mapping(global, &self.raw, token);
594
595 unsafe {
596 self.desc_allocator.lock().cleanup(&self.raw);
597 }
598
599 if last_done != 0 {
600 self.com_allocator.maintain(last_done);
601 }
602
603 callbacks
604 }
605
606 //Note: this logic is specifically moved out of `handle_mapping()` in order to
607 // have nothing locked by the time we execute users callback code.
fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I)608 fn fire_map_callbacks<I: IntoIterator<Item = BufferMapPendingCallback>>(callbacks: I) {
609 for (operation, result) in callbacks {
610 let (status, ptr) = match result {
611 Ok(ptr) => (BufferMapAsyncStatus::Success, ptr),
612 Err(e) => {
613 log::error!("failed to map buffer: {:?}", e);
614 (BufferMapAsyncStatus::Error, ptr::null_mut())
615 }
616 };
617 match operation {
618 BufferMapOperation::Read(_, on_read, userdata) => on_read(status, ptr, userdata),
619 BufferMapOperation::Write(_, on_write, userdata) => on_write(status, ptr, userdata),
620 }
621 }
622 }
623
create_buffer( &self, self_id: DeviceId, desc: &resource::BufferDescriptor, ) -> resource::Buffer<B>624 fn create_buffer(
625 &self,
626 self_id: DeviceId,
627 desc: &resource::BufferDescriptor,
628 ) -> resource::Buffer<B> {
629 debug_assert_eq!(self_id.backend(), B::VARIANT);
630 let (usage, _memory_properties) = conv::map_buffer_usage(desc.usage);
631
632 let rendy_usage = {
633 use rendy_memory::MemoryUsageValue as Muv;
634 use resource::BufferUsage as Bu;
635
636 if !desc.usage.intersects(Bu::MAP_READ | Bu::MAP_WRITE) {
637 Muv::Data
638 } else if (Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage) {
639 Muv::Upload
640 } else if (Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage) {
641 Muv::Download
642 } else {
643 Muv::Dynamic
644 }
645 };
646
647 let mut buffer = unsafe { self.raw.create_buffer(desc.size, usage).unwrap() };
648 let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
649 let memory = self
650 .mem_allocator
651 .lock()
652 .allocate(
653 &self.raw,
654 requirements.type_mask as u32,
655 rendy_usage,
656 requirements.size,
657 requirements.alignment,
658 )
659 .unwrap();
660
661 unsafe {
662 self.raw
663 .bind_buffer_memory(memory.memory(), memory.range().start, &mut buffer)
664 .unwrap()
665 };
666
667 resource::Buffer {
668 raw: buffer,
669 device_id: Stored {
670 value: self_id,
671 ref_count: self.life_guard.ref_count.clone(),
672 },
673 usage: desc.usage,
674 memory,
675 size: desc.size,
676 mapped_write_ranges: Vec::new(),
677 pending_map_operation: None,
678 life_guard: LifeGuard::new(),
679 }
680 }
681
create_texture( &self, self_id: DeviceId, desc: &resource::TextureDescriptor, ) -> resource::Texture<B>682 fn create_texture(
683 &self,
684 self_id: DeviceId,
685 desc: &resource::TextureDescriptor,
686 ) -> resource::Texture<B> {
687 debug_assert_eq!(self_id.backend(), B::VARIANT);
688
689 // Ensure `D24Plus` textures cannot be copied
690 match desc.format {
691 TextureFormat::Depth24Plus | TextureFormat::Depth24PlusStencil8 => {
692 assert!(!desc
693 .usage
694 .intersects(TextureUsage::COPY_SRC | TextureUsage::COPY_DST));
695 }
696 _ => {}
697 }
698
699 let kind = conv::map_texture_dimension_size(
700 desc.dimension,
701 desc.size,
702 desc.array_layer_count,
703 desc.sample_count,
704 );
705 let format = conv::map_texture_format(desc.format, self.features);
706 let aspects = format.surface_desc().aspects;
707 let usage = conv::map_texture_usage(desc.usage, aspects);
708
709 assert!((desc.mip_level_count as usize) < MAX_MIP_LEVELS);
710 let mut view_capabilities = hal::image::ViewCapabilities::empty();
711
712 // 2D textures with array layer counts that are multiples of 6 could be cubemaps
713 // Following gpuweb/gpuweb#68 always add the hint in that case
714 if desc.dimension == TextureDimension::D2 && desc.array_layer_count % 6 == 0 {
715 view_capabilities |= hal::image::ViewCapabilities::KIND_CUBE;
716 };
717
718 // TODO: 2D arrays, cubemap arrays
719
720 let mut image = unsafe {
721 self.raw.create_image(
722 kind,
723 desc.mip_level_count as hal::image::Level,
724 format,
725 hal::image::Tiling::Optimal,
726 usage,
727 view_capabilities,
728 )
729 }
730 .unwrap();
731 let requirements = unsafe { self.raw.get_image_requirements(&image) };
732
733 let memory = self
734 .mem_allocator
735 .lock()
736 .allocate(
737 &self.raw,
738 requirements.type_mask as u32,
739 rendy_memory::Data,
740 requirements.size,
741 requirements.alignment,
742 )
743 .unwrap();
744
745 unsafe {
746 self.raw
747 .bind_image_memory(memory.memory(), memory.range().start, &mut image)
748 .unwrap()
749 };
750
751 resource::Texture {
752 raw: image,
753 device_id: Stored {
754 value: self_id,
755 ref_count: self.life_guard.ref_count.clone(),
756 },
757 usage: desc.usage,
758 kind,
759 format: desc.format,
760 full_range: hal::image::SubresourceRange {
761 aspects,
762 levels: 0 .. desc.mip_level_count as hal::image::Level,
763 layers: 0 .. desc.array_layer_count as hal::image::Layer,
764 },
765 memory,
766 life_guard: LifeGuard::new(),
767 }
768 }
769 }
770
771 impl<B: hal::Backend> Device<B> {
destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>)772 pub(crate) fn destroy_bind_group(&self, bind_group: binding_model::BindGroup<B>) {
773 unsafe {
774 self.desc_allocator.lock().free(iter::once(bind_group.raw));
775 }
776 }
777
dispose(self)778 pub(crate) fn dispose(self) {
779 self.com_allocator.destroy(&self.raw);
780 let desc_alloc = self.desc_allocator.into_inner();
781 unsafe {
782 desc_alloc.dispose(&self.raw);
783 }
784 }
785 }
786
787 #[cfg(feature = "local")]
788 #[no_mangle]
wgpu_device_get_limits(_device_id: DeviceId, limits: &mut Limits)789 pub extern "C" fn wgpu_device_get_limits(_device_id: DeviceId, limits: &mut Limits) {
790 *limits = Limits::default(); // TODO
791 }
792
793 #[derive(Debug)]
794 pub struct ShaderModule<B: hal::Backend> {
795 pub(crate) raw: B::ShaderModule,
796 }
797
device_create_buffer<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &resource::BufferDescriptor, id_in: Input<BufferId>, ) -> Output<BufferId>798 pub fn device_create_buffer<B: GfxBackend>(
799 global: &Global,
800 device_id: DeviceId,
801 desc: &resource::BufferDescriptor,
802 id_in: Input<BufferId>,
803 ) -> Output<BufferId> {
804 let hub = B::hub(global);
805 let mut token = Token::root();
806
807 let (device_guard, _) = hub.devices.read(&mut token);
808 let device = &device_guard[device_id];
809 let buffer = device.create_buffer(device_id, desc);
810
811 let (id, id_out) = hub.buffers.new_identity(id_in);
812 let ok = device.trackers.lock().buffers.init(
813 id,
814 &buffer.life_guard.ref_count,
815 (),
816 resource::BufferUsage::empty(),
817 );
818 assert!(ok);
819
820 hub.buffers.register(id, buffer, &mut token);
821 id_out
822 }
823
824 #[cfg(feature = "local")]
825 #[no_mangle]
wgpu_device_create_buffer( device_id: DeviceId, desc: &resource::BufferDescriptor, ) -> BufferId826 pub extern "C" fn wgpu_device_create_buffer(
827 device_id: DeviceId,
828 desc: &resource::BufferDescriptor,
829 ) -> BufferId {
830 gfx_select!(device_id => device_create_buffer(&*GLOBAL, device_id, desc, PhantomData))
831 }
832
device_create_buffer_mapped<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &resource::BufferDescriptor, mapped_ptr_out: *mut *mut u8, id_in: Input<BufferId>, ) -> Output<BufferId>833 pub fn device_create_buffer_mapped<B: GfxBackend>(
834 global: &Global,
835 device_id: DeviceId,
836 desc: &resource::BufferDescriptor,
837 mapped_ptr_out: *mut *mut u8,
838 id_in: Input<BufferId>,
839 ) -> Output<BufferId> {
840 let hub = B::hub(global);
841 let mut token = Token::root();
842 let mut desc = desc.clone();
843 desc.usage |= resource::BufferUsage::MAP_WRITE;
844
845 let (device_guard, _) = hub.devices.read(&mut token);
846 let device = &device_guard[device_id];
847 let mut buffer = device.create_buffer(device_id, &desc);
848
849 match map_buffer(&device.raw, &mut buffer, 0 .. desc.size, HostMap::Write) {
850 Ok(ptr) => unsafe {
851 *mapped_ptr_out = ptr;
852 },
853 Err(e) => {
854 log::error!("failed to create buffer in a mapped state: {:?}", e);
855 unsafe {
856 *mapped_ptr_out = ptr::null_mut();
857 }
858 }
859 }
860
861 let (id, id_out) = hub.buffers.new_identity(id_in);
862 let ok = device.trackers.lock().buffers.init(
863 id,
864 &buffer.life_guard.ref_count,
865 (),
866 resource::BufferUsage::MAP_WRITE,
867 );
868 assert!(ok);
869
870 hub.buffers.register(id, buffer, &mut token);
871 id_out
872 }
873
874 #[cfg(feature = "local")]
875 #[no_mangle]
wgpu_device_create_buffer_mapped( device_id: DeviceId, desc: &resource::BufferDescriptor, mapped_ptr_out: *mut *mut u8, ) -> BufferId876 pub extern "C" fn wgpu_device_create_buffer_mapped(
877 device_id: DeviceId,
878 desc: &resource::BufferDescriptor,
879 mapped_ptr_out: *mut *mut u8,
880 ) -> BufferId {
881 gfx_select!(device_id => device_create_buffer_mapped(&*GLOBAL, device_id, desc, mapped_ptr_out, PhantomData))
882 }
883
buffer_destroy<B: GfxBackend>(global: &Global, buffer_id: BufferId)884 pub fn buffer_destroy<B: GfxBackend>(global: &Global, buffer_id: BufferId) {
885 let hub = B::hub(global);
886 let mut token = Token::root();
887 let (device_guard, mut token) = hub.devices.read(&mut token);
888 let (buffer_guard, _) = hub.buffers.read(&mut token);
889 let buffer = &buffer_guard[buffer_id];
890 device_guard[buffer.device_id.value].pending.lock().destroy(
891 ResourceId::Buffer(buffer_id),
892 buffer.life_guard.ref_count.clone(),
893 );
894 }
895
896 #[cfg(feature = "local")]
897 #[no_mangle]
wgpu_buffer_destroy(buffer_id: BufferId)898 pub extern "C" fn wgpu_buffer_destroy(buffer_id: BufferId) {
899 gfx_select!(buffer_id => buffer_destroy(&*GLOBAL, buffer_id))
900 }
901
device_create_texture<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &resource::TextureDescriptor, id_in: Input<TextureId>, ) -> Output<TextureId>902 pub fn device_create_texture<B: GfxBackend>(
903 global: &Global,
904 device_id: DeviceId,
905 desc: &resource::TextureDescriptor,
906 id_in: Input<TextureId>,
907 ) -> Output<TextureId> {
908 let hub = B::hub(global);
909 let mut token = Token::root();
910
911 let (device_guard, _) = hub.devices.read(&mut token);
912 let device = &device_guard[device_id];
913 let texture = device.create_texture(device_id, desc);
914
915 let (id, id_out) = hub.textures.new_identity(id_in);
916 let ok = device.trackers.lock().textures.init(
917 id,
918 &texture.life_guard.ref_count,
919 texture.full_range.clone(),
920 resource::TextureUsage::UNINITIALIZED,
921 );
922 assert!(ok);
923
924 hub.textures.register(id, texture, &mut token);
925 id_out
926 }
927
928 #[cfg(feature = "local")]
929 #[no_mangle]
wgpu_device_create_texture( device_id: DeviceId, desc: &resource::TextureDescriptor, ) -> TextureId930 pub extern "C" fn wgpu_device_create_texture(
931 device_id: DeviceId,
932 desc: &resource::TextureDescriptor,
933 ) -> TextureId {
934 gfx_select!(device_id => device_create_texture(&*GLOBAL, device_id, desc, PhantomData))
935 }
936
texture_create_view<B: GfxBackend>( global: &Global, texture_id: TextureId, desc: Option<&resource::TextureViewDescriptor>, id_in: Input<TextureViewId>, ) -> Output<TextureViewId>937 pub fn texture_create_view<B: GfxBackend>(
938 global: &Global,
939 texture_id: TextureId,
940 desc: Option<&resource::TextureViewDescriptor>,
941 id_in: Input<TextureViewId>,
942 ) -> Output<TextureViewId> {
943 let hub = B::hub(global);
944 let mut token = Token::root();
945
946 let (device_guard, mut token) = hub.devices.read(&mut token);
947 let (texture_guard, mut token) = hub.textures.read(&mut token);
948 let texture = &texture_guard[texture_id];
949 let device = &device_guard[texture.device_id.value];
950
951 let (format, view_kind, range) = match desc {
952 Some(desc) => {
953 let kind = conv::map_texture_view_dimension(desc.dimension);
954 let end_level = if desc.level_count == 0 {
955 texture.full_range.levels.end
956 } else {
957 (desc.base_mip_level + desc.level_count) as u8
958 };
959 let end_layer = if desc.array_layer_count == 0 {
960 texture.full_range.layers.end
961 } else {
962 (desc.base_array_layer + desc.array_layer_count) as u16
963 };
964 let range = hal::image::SubresourceRange {
965 aspects: match desc.aspect {
966 resource::TextureAspect::All => texture.full_range.aspects,
967 resource::TextureAspect::DepthOnly => hal::format::Aspects::DEPTH,
968 resource::TextureAspect::StencilOnly => hal::format::Aspects::STENCIL,
969 },
970 levels: desc.base_mip_level as u8 .. end_level,
971 layers: desc.base_array_layer as u16 .. end_layer,
972 };
973 (desc.format, kind, range)
974 }
975 None => {
976 let kind = match texture.kind {
977 hal::image::Kind::D1(_, 1) => hal::image::ViewKind::D1,
978 hal::image::Kind::D1(..) => hal::image::ViewKind::D1Array,
979 hal::image::Kind::D2(_, _, 1, _) => hal::image::ViewKind::D2,
980 hal::image::Kind::D2(..) => hal::image::ViewKind::D2Array,
981 hal::image::Kind::D3(..) => hal::image::ViewKind::D3,
982 };
983 (texture.format, kind, texture.full_range.clone())
984 }
985 };
986
987 let raw = unsafe {
988 device
989 .raw
990 .create_image_view(
991 &texture.raw,
992 view_kind,
993 conv::map_texture_format(format, device.features),
994 hal::format::Swizzle::NO,
995 range.clone(),
996 )
997 .unwrap()
998 };
999
1000 let view = resource::TextureView {
1001 inner: resource::TextureViewInner::Native {
1002 raw,
1003 source_id: Stored {
1004 value: texture_id,
1005 ref_count: texture.life_guard.ref_count.clone(),
1006 },
1007 },
1008 format: texture.format,
1009 extent: texture.kind.extent().at_level(range.levels.start),
1010 samples: texture.kind.num_samples(),
1011 range,
1012 life_guard: LifeGuard::new(),
1013 };
1014
1015 let (id, id_out) = hub.texture_views.new_identity(id_in);
1016 let ok = device
1017 .trackers
1018 .lock()
1019 .views
1020 .init(id, &view.life_guard.ref_count, (), ());
1021 assert!(ok);
1022
1023 hub.texture_views.register(id, view, &mut token);
1024 id_out
1025 }
1026
1027 #[cfg(feature = "local")]
1028 #[no_mangle]
wgpu_texture_create_view( texture_id: TextureId, desc: Option<&resource::TextureViewDescriptor>, ) -> TextureViewId1029 pub extern "C" fn wgpu_texture_create_view(
1030 texture_id: TextureId,
1031 desc: Option<&resource::TextureViewDescriptor>,
1032 ) -> TextureViewId {
1033 gfx_select!(texture_id => texture_create_view(&*GLOBAL, texture_id, desc, PhantomData))
1034 }
1035
texture_destroy<B: GfxBackend>(global: &Global, texture_id: TextureId)1036 pub fn texture_destroy<B: GfxBackend>(global: &Global, texture_id: TextureId) {
1037 let hub = B::hub(global);
1038 let mut token = Token::root();
1039
1040 let (device_guard, mut token) = hub.devices.read(&mut token);
1041 let (texture_guard, _) = hub.textures.read(&mut token);
1042 let texture = &texture_guard[texture_id];
1043 device_guard[texture.device_id.value]
1044 .pending
1045 .lock()
1046 .destroy(
1047 ResourceId::Texture(texture_id),
1048 texture.life_guard.ref_count.clone(),
1049 );
1050 }
1051
1052 #[cfg(feature = "local")]
1053 #[no_mangle]
wgpu_texture_destroy(texture_id: TextureId)1054 pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
1055 gfx_select!(texture_id => texture_destroy(&*GLOBAL, texture_id))
1056 }
1057
texture_view_destroy<B: GfxBackend>(global: &Global, texture_view_id: TextureViewId)1058 pub fn texture_view_destroy<B: GfxBackend>(global: &Global, texture_view_id: TextureViewId) {
1059 let hub = B::hub(global);
1060 let mut token = Token::root();
1061 let (device_guard, mut token) = hub.devices.read(&mut token);
1062 let (texture_guard, mut token) = hub.textures.read(&mut token);
1063 let (texture_view_guard, _) = hub.texture_views.read(&mut token);
1064 let view = &texture_view_guard[texture_view_id];
1065 let device_id = match view.inner {
1066 resource::TextureViewInner::Native { ref source_id, .. } => {
1067 texture_guard[source_id.value].device_id.value
1068 }
1069 resource::TextureViewInner::SwapChain { .. } => panic!("Can't destroy a swap chain image"),
1070 };
1071 device_guard[device_id].pending.lock().destroy(
1072 ResourceId::TextureView(texture_view_id),
1073 view.life_guard.ref_count.clone(),
1074 );
1075 }
1076
1077 #[cfg(feature = "local")]
1078 #[no_mangle]
wgpu_texture_view_destroy(texture_view_id: TextureViewId)1079 pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) {
1080 gfx_select!(texture_view_id => texture_view_destroy(&*GLOBAL, texture_view_id))
1081 }
1082
device_create_sampler<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &resource::SamplerDescriptor, id_in: Input<SamplerId>, ) -> Output<SamplerId>1083 pub fn device_create_sampler<B: GfxBackend>(
1084 global: &Global,
1085 device_id: DeviceId,
1086 desc: &resource::SamplerDescriptor,
1087 id_in: Input<SamplerId>,
1088 ) -> Output<SamplerId> {
1089 let hub = B::hub(global);
1090 let mut token = Token::root();
1091 let (device_guard, mut token) = hub.devices.read(&mut token);
1092 let device = &device_guard[device_id];
1093
1094 let info = hal::image::SamplerDesc {
1095 min_filter: conv::map_filter(desc.min_filter),
1096 mag_filter: conv::map_filter(desc.mag_filter),
1097 mip_filter: conv::map_filter(desc.mipmap_filter),
1098 wrap_mode: (
1099 conv::map_wrap(desc.address_mode_u),
1100 conv::map_wrap(desc.address_mode_v),
1101 conv::map_wrap(desc.address_mode_w),
1102 ),
1103 lod_bias: hal::image::Lod(0.0),
1104 lod_range: hal::image::Lod(desc.lod_min_clamp) .. hal::image::Lod(desc.lod_max_clamp),
1105 comparison: if desc.compare_function == resource::CompareFunction::Always {
1106 None
1107 } else {
1108 Some(conv::map_compare_function(desc.compare_function))
1109 },
1110 border: hal::image::PackedColor(0),
1111 normalized: true,
1112 anisotropic: hal::image::Anisotropic::Off, //TODO
1113 };
1114
1115 let sampler = resource::Sampler {
1116 raw: unsafe { device.raw.create_sampler(&info).unwrap() },
1117 device_id: Stored {
1118 value: device_id,
1119 ref_count: device.life_guard.ref_count.clone(),
1120 },
1121 life_guard: LifeGuard::new(),
1122 };
1123 hub.samplers.register_identity(id_in, sampler, &mut token)
1124 }
1125
1126 #[cfg(feature = "local")]
1127 #[no_mangle]
wgpu_device_create_sampler( device_id: DeviceId, desc: &resource::SamplerDescriptor, ) -> SamplerId1128 pub extern "C" fn wgpu_device_create_sampler(
1129 device_id: DeviceId,
1130 desc: &resource::SamplerDescriptor,
1131 ) -> SamplerId {
1132 gfx_select!(device_id => device_create_sampler(&*GLOBAL, device_id, desc, PhantomData))
1133 }
1134
sampler_destroy<B: GfxBackend>(global: &Global, sampler_id: SamplerId)1135 pub fn sampler_destroy<B: GfxBackend>(global: &Global, sampler_id: SamplerId) {
1136 let hub = B::hub(global);
1137 let mut token = Token::root();
1138 let (device_guard, mut token) = hub.devices.read(&mut token);
1139 let (sampler_guard, _) = hub.samplers.read(&mut token);
1140 let sampler = &sampler_guard[sampler_id];
1141 device_guard[sampler.device_id.value]
1142 .pending
1143 .lock()
1144 .destroy(
1145 ResourceId::Sampler(sampler_id),
1146 sampler.life_guard.ref_count.clone(),
1147 );
1148 }
1149
1150 #[cfg(feature = "local")]
1151 #[no_mangle]
wgpu_sampler_destroy(sampler_id: SamplerId)1152 pub extern "C" fn wgpu_sampler_destroy(sampler_id: SamplerId) {
1153 gfx_select!(sampler_id => sampler_destroy(&*GLOBAL, sampler_id))
1154 }
1155
device_create_bind_group_layout<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &binding_model::BindGroupLayoutDescriptor, id_in: Input<BindGroupLayoutId>, ) -> Output<BindGroupLayoutId>1156 pub fn device_create_bind_group_layout<B: GfxBackend>(
1157 global: &Global,
1158 device_id: DeviceId,
1159 desc: &binding_model::BindGroupLayoutDescriptor,
1160 id_in: Input<BindGroupLayoutId>,
1161 ) -> Output<BindGroupLayoutId> {
1162 let mut token = Token::root();
1163 let hub = B::hub(global);
1164 let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) };
1165
1166 let raw_bindings = bindings
1167 .iter()
1168 .map(|binding| hal::pso::DescriptorSetLayoutBinding {
1169 binding: binding.binding,
1170 ty: conv::map_binding_type(binding),
1171 count: 1, //TODO: consolidate
1172 stage_flags: conv::map_shader_stage_flags(binding.visibility),
1173 immutable_samplers: false, // TODO
1174 })
1175 .collect::<Vec<_>>(); //TODO: avoid heap allocation
1176
1177 let raw = unsafe {
1178 let (device_guard, _) = hub.devices.read(&mut token);
1179 device_guard[device_id]
1180 .raw
1181 .create_descriptor_set_layout(&raw_bindings, &[])
1182 .unwrap()
1183 };
1184
1185 let layout = binding_model::BindGroupLayout {
1186 raw,
1187 bindings: bindings.to_vec(),
1188 desc_ranges: DescriptorRanges::from_bindings(&raw_bindings),
1189 dynamic_count: bindings.iter().filter(|b| b.dynamic).count(),
1190 };
1191
1192 hub.bind_group_layouts
1193 .register_identity(id_in, layout, &mut token)
1194 }
1195
1196 #[cfg(feature = "local")]
1197 #[no_mangle]
wgpu_device_create_bind_group_layout( device_id: DeviceId, desc: &binding_model::BindGroupLayoutDescriptor, ) -> BindGroupLayoutId1198 pub extern "C" fn wgpu_device_create_bind_group_layout(
1199 device_id: DeviceId,
1200 desc: &binding_model::BindGroupLayoutDescriptor,
1201 ) -> BindGroupLayoutId {
1202 gfx_select!(device_id => device_create_bind_group_layout(&*GLOBAL, device_id, desc, PhantomData))
1203 }
1204
device_create_pipeline_layout<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &binding_model::PipelineLayoutDescriptor, id_in: Input<PipelineLayoutId>, ) -> Output<PipelineLayoutId>1205 pub fn device_create_pipeline_layout<B: GfxBackend>(
1206 global: &Global,
1207 device_id: DeviceId,
1208 desc: &binding_model::PipelineLayoutDescriptor,
1209 id_in: Input<PipelineLayoutId>,
1210 ) -> Output<PipelineLayoutId> {
1211 let hub = B::hub(global);
1212 let mut token = Token::root();
1213
1214 let (device_guard, mut token) = hub.devices.read(&mut token);
1215 let device = &device_guard[device_id];
1216 let bind_group_layout_ids =
1217 unsafe { slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length) };
1218
1219 assert!(desc.bind_group_layouts_length <= (device.features.max_bind_groups as usize),
1220 "Cannot set a bind group which is beyond the `max_bind_groups` limit requested on device creation");
1221
1222 // TODO: push constants
1223 let pipeline_layout = {
1224 let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token);
1225 let descriptor_set_layouts = bind_group_layout_ids
1226 .iter()
1227 .map(|&id| &bind_group_layout_guard[id].raw);
1228 unsafe {
1229 device.raw.create_pipeline_layout(descriptor_set_layouts, &[])
1230 }
1231 .unwrap()
1232 };
1233
1234 let layout = binding_model::PipelineLayout {
1235 raw: pipeline_layout,
1236 bind_group_layout_ids: bind_group_layout_ids.iter().cloned().collect(),
1237 };
1238 hub.pipeline_layouts
1239 .register_identity(id_in, layout, &mut token)
1240 }
1241
1242 #[cfg(feature = "local")]
1243 #[no_mangle]
wgpu_device_create_pipeline_layout( device_id: DeviceId, desc: &binding_model::PipelineLayoutDescriptor, ) -> PipelineLayoutId1244 pub extern "C" fn wgpu_device_create_pipeline_layout(
1245 device_id: DeviceId,
1246 desc: &binding_model::PipelineLayoutDescriptor,
1247 ) -> PipelineLayoutId {
1248 gfx_select!(device_id => device_create_pipeline_layout(&*GLOBAL, device_id, desc, PhantomData))
1249 }
1250
device_create_bind_group<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &binding_model::BindGroupDescriptor, id_in: Input<BindGroupId>, ) -> Output<BindGroupId>1251 pub fn device_create_bind_group<B: GfxBackend>(
1252 global: &Global,
1253 device_id: DeviceId,
1254 desc: &binding_model::BindGroupDescriptor,
1255 id_in: Input<BindGroupId>,
1256 ) -> Output<BindGroupId> {
1257 let hub = B::hub(global);
1258 let mut token = Token::root();
1259
1260 let (device_guard, mut token) = hub.devices.read(&mut token);
1261 let device = &device_guard[device_id];
1262 let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token);
1263 let bind_group_layout = &bind_group_layout_guard[desc.layout];
1264 let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length as usize) };
1265 assert_eq!(bindings.len(), bind_group_layout.bindings.len());
1266
1267 let desc_set = unsafe {
1268 let mut desc_sets = ArrayVec::<[_; 1]>::new();
1269 device
1270 .desc_allocator
1271 .lock()
1272 .allocate(
1273 &device.raw,
1274 &bind_group_layout.raw,
1275 bind_group_layout.desc_ranges,
1276 1,
1277 &mut desc_sets,
1278 )
1279 .unwrap();
1280 desc_sets.pop().unwrap()
1281 };
1282
1283 // fill out the descriptors
1284 let mut used = TrackerSet::new(B::VARIANT);
1285 {
1286 let (buffer_guard, mut token) = hub.buffers.read(&mut token);
1287 let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
1288 let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
1289 let (sampler_guard, _) = hub.samplers.read(&mut token);
1290
1291 //TODO: group writes into contiguous sections
1292 let mut writes = Vec::new();
1293 for (b, decl) in bindings.iter().zip(&bind_group_layout.bindings) {
1294 let descriptor = match b.resource {
1295 binding_model::BindingResource::Buffer(ref bb) => {
1296 let (alignment, usage) = match decl.ty {
1297 binding_model::BindingType::UniformBuffer => {
1298 (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::UNIFORM)
1299 }
1300 binding_model::BindingType::StorageBuffer => {
1301 (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::STORAGE)
1302 }
1303 binding_model::BindingType::ReadonlyStorageBuffer => {
1304 (BIND_BUFFER_ALIGNMENT, resource::BufferUsage::STORAGE_READ)
1305 }
1306 binding_model::BindingType::Sampler
1307 | binding_model::BindingType::SampledTexture
1308 | binding_model::BindingType::StorageTexture => {
1309 panic!("Mismatched buffer binding for {:?}", decl)
1310 }
1311 };
1312 assert_eq!(
1313 bb.offset as hal::buffer::Offset % alignment,
1314 0,
1315 "Misaligned buffer offset {}",
1316 bb.offset
1317 );
1318 let buffer = used
1319 .buffers
1320 .use_extend(&*buffer_guard, bb.buffer, (), usage)
1321 .unwrap();
1322 assert!(
1323 buffer.usage.contains(usage),
1324 "Expected buffer usage {:?}",
1325 usage
1326 );
1327
1328 let end = if bb.size == 0 {
1329 None
1330 } else {
1331 let end = bb.offset + bb.size;
1332 assert!(
1333 end <= buffer.size,
1334 "Bound buffer range {:?} does not fit in buffer size {}",
1335 bb.offset .. end,
1336 buffer.size
1337 );
1338 Some(end)
1339 };
1340
1341 let range = Some(bb.offset) .. end;
1342 hal::pso::Descriptor::Buffer(&buffer.raw, range)
1343 }
1344 binding_model::BindingResource::Sampler(id) => {
1345 assert_eq!(decl.ty, binding_model::BindingType::Sampler);
1346 let sampler = used
1347 .samplers
1348 .use_extend(&*sampler_guard, id, (), ())
1349 .unwrap();
1350 hal::pso::Descriptor::Sampler(&sampler.raw)
1351 }
1352 binding_model::BindingResource::TextureView(id) => {
1353 let (usage, image_layout) = match decl.ty {
1354 binding_model::BindingType::SampledTexture => (
1355 resource::TextureUsage::SAMPLED,
1356 hal::image::Layout::ShaderReadOnlyOptimal,
1357 ),
1358 binding_model::BindingType::StorageTexture => {
1359 (resource::TextureUsage::STORAGE, hal::image::Layout::General)
1360 }
1361 _ => panic!("Mismatched texture binding for {:?}", decl),
1362 };
1363 let view = used
1364 .views
1365 .use_extend(&*texture_view_guard, id, (), ())
1366 .unwrap();
1367 match view.inner {
1368 resource::TextureViewInner::Native {
1369 ref raw,
1370 ref source_id,
1371 } => {
1372 let texture = used
1373 .textures
1374 .use_extend(
1375 &*texture_guard,
1376 source_id.value,
1377 view.range.clone(),
1378 usage,
1379 )
1380 .unwrap();
1381 assert!(texture.usage.contains(usage));
1382
1383 hal::pso::Descriptor::Image(raw, image_layout)
1384 }
1385 resource::TextureViewInner::SwapChain { .. } => {
1386 panic!("Unable to create a bind group with a swap chain image")
1387 }
1388 }
1389 }
1390 };
1391 writes.alloc().init(hal::pso::DescriptorSetWrite {
1392 set: desc_set.raw(),
1393 binding: b.binding,
1394 array_offset: 0, //TODO
1395 descriptors: iter::once(descriptor),
1396 });
1397 }
1398
1399 unsafe {
1400 device.raw.write_descriptor_sets(writes);
1401 }
1402 }
1403
1404 let bind_group = binding_model::BindGroup {
1405 raw: desc_set,
1406 device_id: Stored {
1407 value: device_id,
1408 ref_count: device.life_guard.ref_count.clone(),
1409 },
1410 layout_id: desc.layout,
1411 life_guard: LifeGuard::new(),
1412 used,
1413 dynamic_count: bind_group_layout.dynamic_count,
1414 };
1415 let (id, id_out) = hub.bind_groups.new_identity(id_in);
1416 let ok = device
1417 .trackers
1418 .lock()
1419 .bind_groups
1420 .init(id, &bind_group.life_guard.ref_count, (), ());
1421 assert!(ok);
1422
1423 hub.bind_groups.register(id, bind_group, &mut token);
1424 id_out
1425 }
1426
1427 #[cfg(feature = "local")]
1428 #[no_mangle]
wgpu_device_create_bind_group( device_id: DeviceId, desc: &binding_model::BindGroupDescriptor, ) -> BindGroupId1429 pub extern "C" fn wgpu_device_create_bind_group(
1430 device_id: DeviceId,
1431 desc: &binding_model::BindGroupDescriptor,
1432 ) -> BindGroupId {
1433 gfx_select!(device_id => device_create_bind_group(&*GLOBAL, device_id, desc, PhantomData))
1434 }
1435
bind_group_destroy<B: GfxBackend>(global: &Global, bind_group_id: BindGroupId)1436 pub fn bind_group_destroy<B: GfxBackend>(global: &Global, bind_group_id: BindGroupId) {
1437 let hub = B::hub(global);
1438 let mut token = Token::root();
1439 let (device_guard, mut token) = hub.devices.read(&mut token);
1440 let (bind_group_guard, _) = hub.bind_groups.read(&mut token);
1441 let bind_group = &bind_group_guard[bind_group_id];
1442 device_guard[bind_group.device_id.value]
1443 .pending
1444 .lock()
1445 .destroy(
1446 ResourceId::BindGroup(bind_group_id),
1447 bind_group.life_guard.ref_count.clone(),
1448 );
1449 }
1450
1451 #[cfg(feature = "local")]
1452 #[no_mangle]
wgpu_bind_group_destroy(bind_group_id: BindGroupId)1453 pub extern "C" fn wgpu_bind_group_destroy(bind_group_id: BindGroupId) {
1454 gfx_select!(bind_group_id => bind_group_destroy(&*GLOBAL, bind_group_id))
1455 }
1456
device_create_shader_module<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &pipeline::ShaderModuleDescriptor, id_in: Input<ShaderModuleId>, ) -> Output<ShaderModuleId>1457 pub fn device_create_shader_module<B: GfxBackend>(
1458 global: &Global,
1459 device_id: DeviceId,
1460 desc: &pipeline::ShaderModuleDescriptor,
1461 id_in: Input<ShaderModuleId>,
1462 ) -> Output<ShaderModuleId> {
1463 let hub = B::hub(global);
1464 let mut token = Token::root();
1465
1466 let spv = unsafe { slice::from_raw_parts(desc.code.bytes, desc.code.length) };
1467 let shader = {
1468 let (device_guard, _) = hub.devices.read(&mut token);
1469 ShaderModule {
1470 raw: unsafe {
1471 device_guard[device_id]
1472 .raw
1473 .create_shader_module(spv)
1474 .unwrap()
1475 },
1476 }
1477 };
1478 hub.shader_modules
1479 .register_identity(id_in, shader, &mut token)
1480 }
1481
1482 #[cfg(feature = "local")]
1483 #[no_mangle]
wgpu_device_create_shader_module( device_id: DeviceId, desc: &pipeline::ShaderModuleDescriptor, ) -> ShaderModuleId1484 pub extern "C" fn wgpu_device_create_shader_module(
1485 device_id: DeviceId,
1486 desc: &pipeline::ShaderModuleDescriptor,
1487 ) -> ShaderModuleId {
1488 gfx_select!(device_id => device_create_shader_module(&*GLOBAL, device_id, desc, PhantomData))
1489 }
1490
device_create_command_encoder<B: GfxBackend>( global: &Global, device_id: DeviceId, _desc: &command::CommandEncoderDescriptor, id_in: Input<CommandEncoderId>, ) -> Output<CommandEncoderId>1491 pub fn device_create_command_encoder<B: GfxBackend>(
1492 global: &Global,
1493 device_id: DeviceId,
1494 _desc: &command::CommandEncoderDescriptor,
1495 id_in: Input<CommandEncoderId>,
1496 ) -> Output<CommandEncoderId> {
1497 let hub = B::hub(global);
1498 let mut token = Token::root();
1499
1500 let (device_guard, mut token) = hub.devices.read(&mut token);
1501 let device = &device_guard[device_id];
1502
1503 let dev_stored = Stored {
1504 value: device_id,
1505 ref_count: device.life_guard.ref_count.clone(),
1506 };
1507 let mut comb = device
1508 .com_allocator
1509 .allocate(dev_stored, &device.raw, device.features);
1510 unsafe {
1511 comb.raw.last_mut().unwrap().begin(
1512 hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
1513 hal::command::CommandBufferInheritanceInfo::default(),
1514 );
1515 }
1516
1517 hub.command_buffers
1518 .register_identity(id_in, comb, &mut token)
1519 }
1520
1521 #[cfg(feature = "local")]
1522 #[no_mangle]
wgpu_device_create_command_encoder( device_id: DeviceId, desc: Option<&command::CommandEncoderDescriptor>, ) -> CommandEncoderId1523 pub extern "C" fn wgpu_device_create_command_encoder(
1524 device_id: DeviceId,
1525 desc: Option<&command::CommandEncoderDescriptor>,
1526 ) -> CommandEncoderId {
1527 let desc = &desc.cloned().unwrap_or_default();
1528 gfx_select!(device_id => device_create_command_encoder(&*GLOBAL, device_id, desc, PhantomData))
1529 }
1530
1531 #[cfg(feature = "local")]
1532 #[no_mangle]
wgpu_device_get_queue(device_id: DeviceId) -> QueueId1533 pub extern "C" fn wgpu_device_get_queue(device_id: DeviceId) -> QueueId {
1534 device_id
1535 }
1536
queue_submit<B: GfxBackend>( global: &Global, queue_id: QueueId, command_buffer_ids: &[CommandBufferId], )1537 pub fn queue_submit<B: GfxBackend>(
1538 global: &Global,
1539 queue_id: QueueId,
1540 command_buffer_ids: &[CommandBufferId],
1541 ) {
1542 let hub = B::hub(global);
1543
1544 let (submit_index, fence) = {
1545 let mut token = Token::root();
1546 let (mut device_guard, mut token) = hub.devices.write(&mut token);
1547 let (swap_chain_guard, mut token) = hub.swap_chains.read(&mut token);
1548 let device = &mut device_guard[queue_id];
1549
1550 let mut trackers = device.trackers.lock();
1551 let mut signal_semaphores = Vec::new();
1552
1553 let submit_index = 1 + device
1554 .life_guard
1555 .submission_index
1556 .fetch_add(1, Ordering::Relaxed);
1557
1558 let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
1559 let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
1560 let (buffer_guard, mut token) = hub.buffers.read(&mut token);
1561 let (texture_guard, mut token) = hub.textures.read(&mut token);
1562 let (mut texture_view_guard, mut token) = hub.texture_views.write(&mut token);
1563 let (sampler_guard, _) = hub.samplers.read(&mut token);
1564
1565 //TODO: if multiple command buffers are submitted, we can re-use the last
1566 // native command buffer of the previous chain instead of always creating
1567 // a temporary one, since the chains are not finished.
1568
1569 // finish all the command buffers first
1570 for &cmb_id in command_buffer_ids {
1571 let comb = &mut command_buffer_guard[cmb_id];
1572
1573 if let Some((view_id, fbo)) = comb.used_swap_chain.take() {
1574 match texture_view_guard[view_id.value].inner {
1575 resource::TextureViewInner::Native { .. } => unreachable!(),
1576 resource::TextureViewInner::SwapChain {
1577 ref source_id,
1578 ref mut framebuffers,
1579 ..
1580 } => {
1581 if framebuffers.is_empty() {
1582 let sem = &swap_chain_guard[source_id.value].semaphore;
1583 signal_semaphores.push(sem);
1584 }
1585 framebuffers.push(fbo);
1586 }
1587 };
1588 }
1589
1590 // optimize the tracked states
1591 comb.trackers.optimize();
1592
1593 // update submission IDs
1594 for id in comb.trackers.buffers.used() {
1595 let buffer = &buffer_guard[id];
1596 assert!(buffer.pending_map_operation.is_none());
1597 buffer
1598 .life_guard
1599 .submission_index
1600 .store(submit_index, Ordering::Release);
1601 }
1602 for id in comb.trackers.textures.used() {
1603 texture_guard[id]
1604 .life_guard
1605 .submission_index
1606 .store(submit_index, Ordering::Release);
1607 }
1608 for id in comb.trackers.views.used() {
1609 texture_view_guard[id]
1610 .life_guard
1611 .submission_index
1612 .store(submit_index, Ordering::Release);
1613 }
1614 for id in comb.trackers.bind_groups.used() {
1615 bind_group_guard[id]
1616 .life_guard
1617 .submission_index
1618 .store(submit_index, Ordering::Release);
1619 }
1620 for id in comb.trackers.samplers.used() {
1621 sampler_guard[id]
1622 .life_guard
1623 .submission_index
1624 .store(submit_index, Ordering::Release);
1625 }
1626
1627 // execute resource transitions
1628 let mut transit = device.com_allocator.extend(comb);
1629 unsafe {
1630 transit.begin(
1631 hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
1632 hal::command::CommandBufferInheritanceInfo::default(),
1633 );
1634 }
1635 log::trace!("Stitching command buffer {:?} before submission", cmb_id);
1636 command::CommandBuffer::insert_barriers(
1637 &mut transit,
1638 &mut *trackers,
1639 &comb.trackers,
1640 Stitch::Init,
1641 &*buffer_guard,
1642 &*texture_guard,
1643 );
1644 unsafe {
1645 transit.finish();
1646 }
1647 comb.raw.insert(0, transit);
1648 unsafe {
1649 comb.raw.last_mut().unwrap().finish();
1650 }
1651 }
1652
1653 // now prepare the GPU submission
1654 let fence = device.raw.create_fence(false).unwrap();
1655 let submission = hal::queue::Submission::<_, _, Vec<&B::Semaphore>> {
1656 command_buffers: command_buffer_ids
1657 .iter()
1658 .flat_map(|&cmb_id| &command_buffer_guard[cmb_id].raw),
1659 wait_semaphores: Vec::new(),
1660 signal_semaphores,
1661 };
1662
1663 unsafe {
1664 device.queue_group.queues[0].submit(submission, Some(&fence));
1665 }
1666
1667 (submit_index, fence)
1668 };
1669
1670 // No need for write access to the device from here on out
1671 let callbacks = {
1672 let mut token = Token::root();
1673 let (device_guard, mut token) = hub.devices.read(&mut token);
1674 let device = &device_guard[queue_id];
1675
1676 let callbacks = device.maintain(global, false, &mut token);
1677 device.pending.lock().active.alloc().init(ActiveSubmission {
1678 index: submit_index,
1679 fence,
1680 resources: Vec::new(),
1681 mapped: Vec::new(),
1682 });
1683
1684 // finally, return the command buffers to the allocator
1685 for &cmb_id in command_buffer_ids {
1686 let (cmd_buf, _) = hub.command_buffers.unregister(cmb_id, &mut token);
1687 device.com_allocator.after_submit(cmd_buf, submit_index);
1688 }
1689
1690 callbacks
1691 };
1692
1693 Device::<B>::fire_map_callbacks(callbacks);
1694 }
1695
1696 #[cfg(feature = "local")]
1697 #[no_mangle]
wgpu_queue_submit( queue_id: QueueId, command_buffers: *const CommandBufferId, command_buffers_length: usize, )1698 pub extern "C" fn wgpu_queue_submit(
1699 queue_id: QueueId,
1700 command_buffers: *const CommandBufferId,
1701 command_buffers_length: usize,
1702 ) {
1703 let command_buffer_ids =
1704 unsafe { slice::from_raw_parts(command_buffers, command_buffers_length) };
1705 gfx_select!(queue_id => queue_submit(&*GLOBAL, queue_id, command_buffer_ids))
1706 }
1707
device_create_render_pipeline<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &pipeline::RenderPipelineDescriptor, id_in: Input<RenderPipelineId>, ) -> Output<RenderPipelineId>1708 pub fn device_create_render_pipeline<B: GfxBackend>(
1709 global: &Global,
1710 device_id: DeviceId,
1711 desc: &pipeline::RenderPipelineDescriptor,
1712 id_in: Input<RenderPipelineId>,
1713 ) -> Output<RenderPipelineId> {
1714 let hub = B::hub(global);
1715 let mut token = Token::root();
1716
1717 let sc = desc.sample_count;
1718 assert!(
1719 sc == 1 || sc == 2 || sc == 4 || sc == 8 || sc == 16 || sc == 32,
1720 "Invalid sample_count of {}",
1721 sc
1722 );
1723 let sc = sc as u8;
1724
1725 let color_states =
1726 unsafe { slice::from_raw_parts(desc.color_states, desc.color_states_length) };
1727 let depth_stencil_state = unsafe { desc.depth_stencil_state.as_ref() };
1728
1729 let rasterizer = conv::map_rasterization_state_descriptor(
1730 &unsafe { desc.rasterization_state.as_ref() }
1731 .cloned()
1732 .unwrap_or_default(),
1733 );
1734
1735 let desc_vbs = unsafe {
1736 slice::from_raw_parts(
1737 desc.vertex_input.vertex_buffers,
1738 desc.vertex_input.vertex_buffers_length,
1739 )
1740 };
1741 let mut vertex_strides = Vec::with_capacity(desc_vbs.len());
1742 let mut vertex_buffers = Vec::with_capacity(desc_vbs.len());
1743 let mut attributes = Vec::new();
1744 for (i, vb_state) in desc_vbs.iter().enumerate() {
1745 vertex_strides
1746 .alloc()
1747 .init((vb_state.stride, vb_state.step_mode));
1748 if vb_state.attributes_length == 0 {
1749 continue;
1750 }
1751 vertex_buffers.alloc().init(hal::pso::VertexBufferDesc {
1752 binding: i as u32,
1753 stride: vb_state.stride as u32,
1754 rate: match vb_state.step_mode {
1755 pipeline::InputStepMode::Vertex => hal::pso::VertexInputRate::Vertex,
1756 pipeline::InputStepMode::Instance => hal::pso::VertexInputRate::Instance(1),
1757 },
1758 });
1759 let desc_atts =
1760 unsafe { slice::from_raw_parts(vb_state.attributes, vb_state.attributes_length) };
1761 for attribute in desc_atts {
1762 assert_eq!(0, attribute.offset >> 32);
1763 attributes.alloc().init(hal::pso::AttributeDesc {
1764 location: attribute.shader_location,
1765 binding: i as u32,
1766 element: hal::pso::Element {
1767 format: conv::map_vertex_format(attribute.format),
1768 offset: attribute.offset as u32,
1769 },
1770 });
1771 }
1772 }
1773
1774 let input_assembler = hal::pso::InputAssemblerDesc {
1775 primitive: conv::map_primitive_topology(desc.primitive_topology),
1776 with_adjacency: false,
1777 restart_index: None, //TODO
1778 };
1779
1780 let blender = hal::pso::BlendDesc {
1781 logic_op: None, // TODO
1782 targets: color_states
1783 .iter()
1784 .map(conv::map_color_state_descriptor)
1785 .collect(),
1786 };
1787 let depth_stencil = depth_stencil_state
1788 .map(conv::map_depth_stencil_state_descriptor)
1789 .unwrap_or_default();
1790
1791 let multisampling: Option<hal::pso::Multisampling> = if sc == 1 {
1792 None
1793 } else {
1794 Some(hal::pso::Multisampling {
1795 rasterization_samples: sc,
1796 sample_shading: None,
1797 sample_mask: desc.sample_mask as u64,
1798 alpha_coverage: desc.alpha_to_coverage_enabled,
1799 alpha_to_one: false,
1800 })
1801 };
1802
1803 // TODO
1804 let baked_states = hal::pso::BakedStates {
1805 viewport: None,
1806 scissor: None,
1807 blend_color: None,
1808 depth_bounds: None,
1809 };
1810
1811 let raw_pipeline = {
1812 let (device_guard, mut token) = hub.devices.read(&mut token);
1813 let device = &device_guard[device_id];
1814 let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
1815 let layout = &pipeline_layout_guard[desc.layout].raw;
1816 let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
1817
1818 let rp_key = RenderPassKey {
1819 colors: color_states
1820 .iter()
1821 .map(|at| hal::pass::Attachment {
1822 format: Some(conv::map_texture_format(at.format, device.features)),
1823 samples: sc,
1824 ops: hal::pass::AttachmentOps::PRESERVE,
1825 stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
1826 layouts: hal::image::Layout::General .. hal::image::Layout::General,
1827 })
1828 .collect(),
1829 // We can ignore the resolves as the vulkan specs says:
1830 // As an additional special case, if two render passes have a single subpass,
1831 // they are compatible even if they have different resolve attachment references
1832 // or depth/stencil resolve modes but satisfy the other compatibility conditions.
1833 resolves: ArrayVec::new(),
1834 depth_stencil: depth_stencil_state.map(|at| hal::pass::Attachment {
1835 format: Some(conv::map_texture_format(at.format, device.features)),
1836 samples: sc,
1837 ops: hal::pass::AttachmentOps::PRESERVE,
1838 stencil_ops: hal::pass::AttachmentOps::PRESERVE,
1839 layouts: hal::image::Layout::General .. hal::image::Layout::General,
1840 }),
1841 };
1842
1843 let mut render_pass_cache = device.render_passes.lock();
1844 let main_pass = match render_pass_cache.entry(rp_key) {
1845 Entry::Occupied(e) => e.into_mut(),
1846 Entry::Vacant(e) => {
1847 let color_ids = [
1848 (0, hal::image::Layout::ColorAttachmentOptimal),
1849 (1, hal::image::Layout::ColorAttachmentOptimal),
1850 (2, hal::image::Layout::ColorAttachmentOptimal),
1851 (3, hal::image::Layout::ColorAttachmentOptimal),
1852 ];
1853
1854 let depth_id = (
1855 desc.color_states_length,
1856 hal::image::Layout::DepthStencilAttachmentOptimal,
1857 );
1858
1859 let subpass = hal::pass::SubpassDesc {
1860 colors: &color_ids[.. desc.color_states_length],
1861 depth_stencil: depth_stencil_state.map(|_| &depth_id),
1862 inputs: &[],
1863 resolves: &[],
1864 preserves: &[],
1865 };
1866
1867 let pass = unsafe {
1868 device
1869 .raw
1870 .create_render_pass(e.key().all(), &[subpass], &[])
1871 }
1872 .unwrap();
1873 e.insert(pass)
1874 }
1875 };
1876
1877 let vertex = hal::pso::EntryPoint::<B> {
1878 entry: unsafe { ffi::CStr::from_ptr(desc.vertex_stage.entry_point) }
1879 .to_str()
1880 .to_owned()
1881 .unwrap(), // TODO
1882 module: &shader_module_guard[desc.vertex_stage.module].raw,
1883 specialization: hal::pso::Specialization::EMPTY,
1884 };
1885 let fragment =
1886 unsafe { desc.fragment_stage.as_ref() }.map(|stage| hal::pso::EntryPoint::<B> {
1887 entry: unsafe { ffi::CStr::from_ptr(stage.entry_point) }
1888 .to_str()
1889 .to_owned()
1890 .unwrap(), // TODO
1891 module: &shader_module_guard[stage.module].raw,
1892 specialization: hal::pso::Specialization::EMPTY,
1893 });
1894
1895 let shaders = hal::pso::GraphicsShaderSet {
1896 vertex,
1897 hull: None,
1898 domain: None,
1899 geometry: None,
1900 fragment,
1901 };
1902
1903 let subpass = hal::pass::Subpass {
1904 index: 0,
1905 main_pass,
1906 };
1907
1908 // TODO
1909 let flags = hal::pso::PipelineCreationFlags::empty();
1910 // TODO
1911 let parent = hal::pso::BasePipeline::None;
1912
1913 let pipeline_desc = hal::pso::GraphicsPipelineDesc {
1914 shaders,
1915 rasterizer,
1916 vertex_buffers,
1917 attributes,
1918 input_assembler,
1919 blender,
1920 depth_stencil,
1921 multisampling,
1922 baked_states,
1923 layout,
1924 subpass,
1925 flags,
1926 parent,
1927 };
1928
1929 // TODO: cache
1930 unsafe {
1931 device
1932 .raw
1933 .create_graphics_pipeline(&pipeline_desc, None)
1934 .unwrap()
1935 }
1936 };
1937
1938 let pass_context = RenderPassContext {
1939 colors: color_states.iter().map(|state| state.format).collect(),
1940 resolves: ArrayVec::new(),
1941 depth_stencil: depth_stencil_state.map(|state| state.format),
1942 };
1943
1944 let mut flags = pipeline::PipelineFlags::empty();
1945 for state in color_states {
1946 if state.color_blend.uses_color() | state.alpha_blend.uses_color() {
1947 flags |= pipeline::PipelineFlags::BLEND_COLOR;
1948 }
1949 }
1950 if let Some(ds) = depth_stencil_state {
1951 if ds.needs_stencil_reference() {
1952 flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
1953 }
1954 }
1955
1956 let pipeline = pipeline::RenderPipeline {
1957 raw: raw_pipeline,
1958 layout_id: desc.layout,
1959 pass_context,
1960 flags,
1961 index_format: desc.vertex_input.index_format,
1962 vertex_strides,
1963 sample_count: sc,
1964 };
1965
1966 hub.render_pipelines
1967 .register_identity(id_in, pipeline, &mut token)
1968 }
1969
1970 #[cfg(feature = "local")]
1971 #[no_mangle]
wgpu_device_create_render_pipeline( device_id: DeviceId, desc: &pipeline::RenderPipelineDescriptor, ) -> RenderPipelineId1972 pub extern "C" fn wgpu_device_create_render_pipeline(
1973 device_id: DeviceId,
1974 desc: &pipeline::RenderPipelineDescriptor,
1975 ) -> RenderPipelineId {
1976 gfx_select!(device_id => device_create_render_pipeline(&*GLOBAL, device_id, desc, PhantomData))
1977 }
1978
device_create_compute_pipeline<B: GfxBackend>( global: &Global, device_id: DeviceId, desc: &pipeline::ComputePipelineDescriptor, id_in: Input<ComputePipelineId>, ) -> Output<ComputePipelineId>1979 pub fn device_create_compute_pipeline<B: GfxBackend>(
1980 global: &Global,
1981 device_id: DeviceId,
1982 desc: &pipeline::ComputePipelineDescriptor,
1983 id_in: Input<ComputePipelineId>,
1984 ) -> Output<ComputePipelineId> {
1985 let hub = B::hub(global);
1986 let mut token = Token::root();
1987
1988 let raw_pipeline = {
1989 let (device_guard, mut token) = hub.devices.read(&mut token);
1990 let device = &device_guard[device_id].raw;
1991 let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
1992 let layout = &pipeline_layout_guard[desc.layout].raw;
1993 let pipeline_stage = &desc.compute_stage;
1994 let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
1995
1996 let shader = hal::pso::EntryPoint::<B> {
1997 entry: unsafe { ffi::CStr::from_ptr(pipeline_stage.entry_point) }
1998 .to_str()
1999 .to_owned()
2000 .unwrap(), // TODO
2001 module: &shader_module_guard[pipeline_stage.module].raw,
2002 specialization: hal::pso::Specialization::EMPTY,
2003 };
2004
2005 // TODO
2006 let flags = hal::pso::PipelineCreationFlags::empty();
2007 // TODO
2008 let parent = hal::pso::BasePipeline::None;
2009
2010 let pipeline_desc = hal::pso::ComputePipelineDesc {
2011 shader,
2012 layout,
2013 flags,
2014 parent,
2015 };
2016
2017 unsafe {
2018 device
2019 .create_compute_pipeline(&pipeline_desc, None)
2020 .unwrap()
2021 }
2022 };
2023
2024 let pipeline = pipeline::ComputePipeline {
2025 raw: raw_pipeline,
2026 layout_id: desc.layout,
2027 };
2028 hub.compute_pipelines
2029 .register_identity(id_in, pipeline, &mut token)
2030 }
2031
2032 #[cfg(feature = "local")]
2033 #[no_mangle]
wgpu_device_create_compute_pipeline( device_id: DeviceId, desc: &pipeline::ComputePipelineDescriptor, ) -> ComputePipelineId2034 pub extern "C" fn wgpu_device_create_compute_pipeline(
2035 device_id: DeviceId,
2036 desc: &pipeline::ComputePipelineDescriptor,
2037 ) -> ComputePipelineId {
2038 gfx_select!(device_id => device_create_compute_pipeline(&*GLOBAL, device_id, desc, PhantomData))
2039 }
2040
device_create_swap_chain<B: GfxBackend>( global: &Global, device_id: DeviceId, surface_id: SurfaceId, desc: &swap_chain::SwapChainDescriptor, ) -> SwapChainId2041 pub fn device_create_swap_chain<B: GfxBackend>(
2042 global: &Global,
2043 device_id: DeviceId,
2044 surface_id: SurfaceId,
2045 desc: &swap_chain::SwapChainDescriptor,
2046 ) -> SwapChainId {
2047 log::info!("creating swap chain {:?}", desc);
2048 let hub = B::hub(global);
2049 let mut token = Token::root();
2050
2051 let (mut surface_guard, mut token) = global.surfaces.write(&mut token);
2052 let (adapter_guard, mut token) = hub.adapters.read(&mut token);
2053 let (device_guard, mut token) = hub.devices.read(&mut token);
2054 let (mut swap_chain_guard, _) = hub.swap_chains.write(&mut token);
2055 let device = &device_guard[device_id];
2056 let surface = &mut surface_guard[surface_id];
2057
2058 let (caps, formats) = {
2059 let suf = B::get_surface_mut(surface);
2060 let adapter = &adapter_guard[device.adapter_id];
2061 assert!(suf.supports_queue_family(&adapter.raw.queue_families[0]));
2062 let formats = suf.supported_formats(&adapter.raw.physical_device);
2063 let caps = suf.capabilities(&adapter.raw.physical_device);
2064 (caps, formats)
2065 };
2066 let num_frames = swap_chain::DESIRED_NUM_FRAMES
2067 .max(*caps.image_count.start())
2068 .min(*caps.image_count.end());
2069 let config = desc.to_hal(num_frames, &device.features);
2070
2071 if let Some(formats) = formats {
2072 assert!(
2073 formats.contains(&config.format),
2074 "Requested format {:?} is not in supported list: {:?}",
2075 config.format,
2076 formats
2077 );
2078 }
2079 if desc.width < caps.extents.start().width
2080 || desc.width > caps.extents.end().width
2081 || desc.height < caps.extents.start().height
2082 || desc.height > caps.extents.end().height
2083 {
2084 log::warn!(
2085 "Requested size {}x{} is outside of the supported range: {:?}",
2086 desc.width,
2087 desc.height,
2088 caps.extents
2089 );
2090 }
2091
2092 unsafe {
2093 B::get_surface_mut(surface)
2094 .configure_swapchain(&device.raw, config)
2095 .unwrap();
2096 }
2097
2098 let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
2099 if let Some(sc) = swap_chain_guard.remove(sc_id) {
2100 unsafe {
2101 device.raw.destroy_semaphore(sc.semaphore);
2102 }
2103 }
2104 let swap_chain = swap_chain::SwapChain {
2105 life_guard: LifeGuard::new(),
2106 device_id: Stored {
2107 value: device_id,
2108 ref_count: device.life_guard.ref_count.clone(),
2109 },
2110 desc: desc.clone(),
2111 num_frames,
2112 semaphore: device.raw.create_semaphore().unwrap(),
2113 acquired_view_id: None,
2114 };
2115 swap_chain_guard.insert(sc_id, swap_chain);
2116 sc_id
2117 }
2118
2119 #[cfg(feature = "local")]
2120 #[no_mangle]
wgpu_device_create_swap_chain( device_id: DeviceId, surface_id: SurfaceId, desc: &swap_chain::SwapChainDescriptor, ) -> SwapChainId2121 pub extern "C" fn wgpu_device_create_swap_chain(
2122 device_id: DeviceId,
2123 surface_id: SurfaceId,
2124 desc: &swap_chain::SwapChainDescriptor,
2125 ) -> SwapChainId {
2126 gfx_select!(device_id => device_create_swap_chain(&*GLOBAL, device_id, surface_id, desc))
2127 }
2128
device_poll<B: GfxBackend>(global: &Global, device_id: DeviceId, force_wait: bool)2129 pub fn device_poll<B: GfxBackend>(global: &Global, device_id: DeviceId, force_wait: bool) {
2130 let hub = B::hub(global);
2131 let callbacks = {
2132 let (device_guard, mut token) = hub.devices.read(&mut Token::root());
2133 device_guard[device_id].maintain(global, force_wait, &mut token)
2134 };
2135 Device::<B>::fire_map_callbacks(callbacks);
2136 }
2137
2138 #[cfg(feature = "local")]
2139 #[no_mangle]
wgpu_device_poll(device_id: DeviceId, force_wait: bool)2140 pub extern "C" fn wgpu_device_poll(device_id: DeviceId, force_wait: bool) {
2141 gfx_select!(device_id => device_poll(&*GLOBAL, device_id, force_wait))
2142 }
2143
device_destroy<B: GfxBackend>(global: &Global, device_id: DeviceId)2144 pub fn device_destroy<B: GfxBackend>(global: &Global, device_id: DeviceId) {
2145 let hub = B::hub(global);
2146 let (device, mut token) = hub.devices.unregister(device_id, &mut Token::root());
2147 device.maintain(global, true, &mut token);
2148 device.com_allocator.destroy(&device.raw);
2149 }
2150
2151 #[cfg(feature = "local")]
2152 #[no_mangle]
wgpu_device_destroy(device_id: DeviceId)2153 pub extern "C" fn wgpu_device_destroy(device_id: DeviceId) {
2154 gfx_select!(device_id => device_destroy(&*GLOBAL, device_id))
2155 }
2156
2157 pub type BufferMapReadCallback =
2158 extern "C" fn(status: BufferMapAsyncStatus, data: *const u8, userdata: *mut u8);
2159 pub type BufferMapWriteCallback =
2160 extern "C" fn(status: BufferMapAsyncStatus, data: *mut u8, userdata: *mut u8);
2161
buffer_map_async<B: GfxBackend>( global: &Global, buffer_id: BufferId, usage: resource::BufferUsage, operation: BufferMapOperation, )2162 pub fn buffer_map_async<B: GfxBackend>(
2163 global: &Global,
2164 buffer_id: BufferId,
2165 usage: resource::BufferUsage,
2166 operation: BufferMapOperation,
2167 ) {
2168 let hub = B::hub(global);
2169 let mut token = Token::root();
2170 let (device_guard, mut token) = hub.devices.read(&mut token);
2171
2172 let (device_id, ref_count) = {
2173 let (mut buffer_guard, _) = hub.buffers.write(&mut token);
2174 let buffer = &mut buffer_guard[buffer_id];
2175
2176 if usage.contains(resource::BufferUsage::MAP_READ) {
2177 assert!(buffer.usage.contains(resource::BufferUsage::MAP_READ));
2178 }
2179
2180 if usage.contains(resource::BufferUsage::MAP_WRITE) {
2181 assert!(buffer.usage.contains(resource::BufferUsage::MAP_WRITE));
2182 }
2183
2184 if buffer.pending_map_operation.is_some() {
2185 operation.call_error();
2186 return;
2187 }
2188
2189 buffer.pending_map_operation = Some(operation);
2190 (buffer.device_id.value, buffer.life_guard.ref_count.clone())
2191 };
2192
2193 let device = &device_guard[device_id];
2194
2195 device
2196 .trackers
2197 .lock()
2198 .buffers
2199 .change_replace(buffer_id, &ref_count, (), usage);
2200
2201 device.pending.lock().map(buffer_id, ref_count);
2202 }
2203
2204 #[cfg(feature = "local")]
2205 #[no_mangle]
wgpu_buffer_map_read_async( buffer_id: BufferId, start: BufferAddress, size: BufferAddress, callback: BufferMapReadCallback, userdata: *mut u8, )2206 pub extern "C" fn wgpu_buffer_map_read_async(
2207 buffer_id: BufferId,
2208 start: BufferAddress,
2209 size: BufferAddress,
2210 callback: BufferMapReadCallback,
2211 userdata: *mut u8,
2212 ) {
2213 let operation = BufferMapOperation::Read(start .. start + size, callback, userdata);
2214 gfx_select!(buffer_id => buffer_map_async(&*GLOBAL, buffer_id, resource::BufferUsage::MAP_READ, operation))
2215 }
2216
2217 #[cfg(feature = "local")]
2218 #[no_mangle]
wgpu_buffer_map_write_async( buffer_id: BufferId, start: BufferAddress, size: BufferAddress, callback: BufferMapWriteCallback, userdata: *mut u8, )2219 pub extern "C" fn wgpu_buffer_map_write_async(
2220 buffer_id: BufferId,
2221 start: BufferAddress,
2222 size: BufferAddress,
2223 callback: BufferMapWriteCallback,
2224 userdata: *mut u8,
2225 ) {
2226 let operation = BufferMapOperation::Write(start .. start + size, callback, userdata);
2227 gfx_select!(buffer_id => buffer_map_async(&*GLOBAL, buffer_id, resource::BufferUsage::MAP_WRITE, operation))
2228 }
2229
buffer_unmap<B: GfxBackend>(global: &Global, buffer_id: BufferId)2230 pub fn buffer_unmap<B: GfxBackend>(global: &Global, buffer_id: BufferId) {
2231 let hub = B::hub(global);
2232 let mut token = Token::root();
2233
2234 let (device_guard, mut token) = hub.devices.read(&mut token);
2235 let (mut buffer_guard, _) = hub.buffers.write(&mut token);
2236
2237 let buffer = &mut buffer_guard[buffer_id];
2238 let device_raw = &device_guard[buffer.device_id.value].raw;
2239
2240 if !buffer.mapped_write_ranges.is_empty() {
2241 unsafe {
2242 device_raw
2243 .flush_mapped_memory_ranges(
2244 buffer
2245 .mapped_write_ranges
2246 .iter()
2247 .map(|r| (buffer.memory.memory(), r.clone())),
2248 )
2249 .unwrap()
2250 };
2251 buffer.mapped_write_ranges.clear();
2252 }
2253
2254 buffer.memory.unmap(device_raw);
2255 }
2256
2257 #[cfg(feature = "local")]
2258 #[no_mangle]
wgpu_buffer_unmap(buffer_id: BufferId)2259 pub extern "C" fn wgpu_buffer_unmap(buffer_id: BufferId) {
2260 gfx_select!(buffer_id => buffer_unmap(&*GLOBAL, buffer_id))
2261 }
2262