1 #[test]
2 #[cfg(any(feature = "vulkan", feature = "metal", feature = "dx12"))]
multithreaded_compute()3 fn multithreaded_compute() {
4 use std::sync::mpsc;
5 use std::thread;
6 use std::time::Duration;
7
8 let thread_count = 8;
9
10 let (tx, rx) = mpsc::channel();
11 for _ in 0 .. thread_count {
12 let tx = tx.clone();
13 thread::spawn(move || {
14 let numbers = vec![100, 100, 100];
15
16 let size = (numbers.len() * std::mem::size_of::<u32>()) as wgpu::BufferAddress;
17
18 let instance = wgpu::Instance::new();
19 let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
20 power_preference: wgpu::PowerPreference::Default,
21 });
22
23 let mut device = adapter.request_device(&wgpu::DeviceDescriptor {
24 extensions: wgpu::Extensions {
25 anisotropic_filtering: false,
26 },
27 limits: wgpu::Limits::default(),
28 });
29
30 let cs = include_bytes!("../examples/hello-compute/shader.comp.spv");
31 let cs_module = device.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&cs[..])).unwrap());
32
33 let staging_buffer = device
34 .create_buffer_mapped(
35 numbers.len(),
36 wgpu::BufferUsage::MAP_READ
37 | wgpu::BufferUsage::COPY_DST
38 | wgpu::BufferUsage::COPY_SRC,
39 )
40 .fill_from_slice(&numbers);
41
42 let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor {
43 size,
44 usage: wgpu::BufferUsage::STORAGE
45 | wgpu::BufferUsage::COPY_DST
46 | wgpu::BufferUsage::COPY_SRC,
47 });
48
49 let bind_group_layout =
50 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
51 bindings: &[
52 wgpu::BindGroupLayoutBinding {
53 binding: 0,
54 visibility: wgpu::ShaderStage::COMPUTE,
55 ty: wgpu::BindingType::StorageBuffer {
56 dynamic: false,
57 readonly: false,
58 },
59 },
60 ],
61 });
62
63 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
64 layout: &bind_group_layout,
65 bindings: &[wgpu::Binding {
66 binding: 0,
67 resource: wgpu::BindingResource::Buffer {
68 buffer: &storage_buffer,
69 range: 0 .. size,
70 },
71 }],
72 });
73
74 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
75 bind_group_layouts: &[&bind_group_layout],
76 });
77
78 let compute_pipeline =
79 device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
80 layout: &pipeline_layout,
81 compute_stage: wgpu::ProgrammableStageDescriptor {
82 module: &cs_module,
83 entry_point: "main",
84 },
85 });
86
87 let mut encoder =
88 device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
89 encoder.copy_buffer_to_buffer(&staging_buffer, 0, &storage_buffer, 0, size);
90 {
91 let mut cpass = encoder.begin_compute_pass();
92 cpass.set_pipeline(&compute_pipeline);
93 cpass.set_bind_group(0, &bind_group, &[]);
94 cpass.dispatch(numbers.len() as u32, 1, 1);
95 }
96 encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size);
97
98 device.get_queue().submit(&[encoder.finish()]);
99
100 staging_buffer.map_read_async(0, size, |result: wgpu::BufferMapAsyncResult<&[u32]>| {
101 assert_eq!(result.unwrap().data, [25, 25, 25]);
102 });
103 tx.send(true).unwrap();
104 });
105 }
106
107 for _ in 0 .. thread_count {
108 rx.recv_timeout(Duration::from_secs(10))
109 .expect("A thread never completed.");
110 }
111 }
112