1 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #include "WebGPUParent.h"
7 #include "mozilla/webgpu/ffi/wgpu.h"
8 #include "mozilla/layers/ImageDataSerializer.h"
9 #include "mozilla/layers/TextureHost.h"
10
11 namespace mozilla {
12 namespace webgpu {
13
14 const uint64_t POLL_TIME_MS = 100;
15
16 class PresentationData {
17 NS_INLINE_DECL_REFCOUNTING(PresentationData);
18
19 public:
20 RawId mDeviceId = 0;
21 RawId mQueueId = 0;
22 RefPtr<layers::MemoryTextureHost> mTextureHost;
23 uint32_t mSourcePitch = 0;
24 uint32_t mTargetPitch = 0;
25 uint32_t mRowCount = 0;
26 std::vector<RawId> mUnassignedBufferIds;
27 std::vector<RawId> mAvailableBufferIds;
28 std::vector<RawId> mQueuedBufferIds;
29 Mutex mBuffersLock;
30
PresentationData()31 PresentationData() : mBuffersLock("WebGPU presentation buffers") {
32 MOZ_COUNT_CTOR(PresentationData);
33 }
34
35 private:
~PresentationData()36 ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); }
37 };
38
FreeAdapter(RawId id,void * param)39 static void FreeAdapter(RawId id, void* param) {
40 if (!static_cast<WebGPUParent*>(param)->SendFreeAdapter(id)) {
41 MOZ_CRASH("IPC failure");
42 }
43 }
FreeDevice(RawId id,void * param)44 static void FreeDevice(RawId id, void* param) {
45 if (!static_cast<WebGPUParent*>(param)->SendFreeDevice(id)) {
46 MOZ_CRASH("IPC failure");
47 }
48 }
FreeSwapChain(RawId id,void * param)49 static void FreeSwapChain(RawId id, void* param) {
50 Unused << id;
51 Unused << param;
52 }
FreePipelineLayout(RawId id,void * param)53 static void FreePipelineLayout(RawId id, void* param) {
54 if (!static_cast<WebGPUParent*>(param)->SendFreePipelineLayout(id)) {
55 MOZ_CRASH("IPC failure");
56 }
57 }
FreeShaderModule(RawId id,void * param)58 static void FreeShaderModule(RawId id, void* param) {
59 if (!static_cast<WebGPUParent*>(param)->SendFreeShaderModule(id)) {
60 MOZ_CRASH("IPC failure");
61 }
62 }
FreeBindGroupLayout(RawId id,void * param)63 static void FreeBindGroupLayout(RawId id, void* param) {
64 if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroupLayout(id)) {
65 MOZ_CRASH("IPC failure");
66 }
67 }
FreeBindGroup(RawId id,void * param)68 static void FreeBindGroup(RawId id, void* param) {
69 if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroup(id)) {
70 MOZ_CRASH("IPC failure");
71 }
72 }
FreeCommandBuffer(RawId id,void * param)73 static void FreeCommandBuffer(RawId id, void* param) {
74 if (!static_cast<WebGPUParent*>(param)->SendFreeCommandBuffer(id)) {
75 MOZ_CRASH("IPC failure");
76 }
77 }
FreeRenderPipeline(RawId id,void * param)78 static void FreeRenderPipeline(RawId id, void* param) {
79 if (!static_cast<WebGPUParent*>(param)->SendFreeRenderPipeline(id)) {
80 MOZ_CRASH("IPC failure");
81 }
82 }
FreeComputePipeline(RawId id,void * param)83 static void FreeComputePipeline(RawId id, void* param) {
84 if (!static_cast<WebGPUParent*>(param)->SendFreeComputePipeline(id)) {
85 MOZ_CRASH("IPC failure");
86 }
87 }
FreeBuffer(RawId id,void * param)88 static void FreeBuffer(RawId id, void* param) {
89 if (!static_cast<WebGPUParent*>(param)->SendFreeBuffer(id)) {
90 MOZ_CRASH("IPC failure");
91 }
92 }
FreeTexture(RawId id,void * param)93 static void FreeTexture(RawId id, void* param) {
94 if (!static_cast<WebGPUParent*>(param)->SendFreeTexture(id)) {
95 MOZ_CRASH("IPC failure");
96 }
97 }
FreeTextureView(RawId id,void * param)98 static void FreeTextureView(RawId id, void* param) {
99 if (!static_cast<WebGPUParent*>(param)->SendFreeTextureView(id)) {
100 MOZ_CRASH("IPC failure");
101 }
102 }
FreeSampler(RawId id,void * param)103 static void FreeSampler(RawId id, void* param) {
104 if (!static_cast<WebGPUParent*>(param)->SendFreeSampler(id)) {
105 MOZ_CRASH("IPC failure");
106 }
107 }
FreeSurface(RawId id,void * param)108 static void FreeSurface(RawId id, void* param) {
109 Unused << id;
110 Unused << param;
111 }
112
MakeFactory(void * param)113 static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
114 // Note: careful about the order here!
115 const ffi::WGPUIdentityRecyclerFactory factory = {
116 param,
117 FreeAdapter,
118 FreeDevice,
119 FreeSwapChain,
120 FreePipelineLayout,
121 FreeShaderModule,
122 FreeBindGroupLayout,
123 FreeBindGroup,
124 FreeCommandBuffer,
125 FreeRenderPipeline,
126 FreeComputePipeline,
127 FreeBuffer,
128 FreeTexture,
129 FreeTextureView,
130 FreeSampler,
131 FreeSurface,
132 };
133 return factory;
134 }
135
WebGPUParent()136 WebGPUParent::WebGPUParent()
137 : mContext(ffi::wgpu_server_new(MakeFactory(this))) {
138 mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this,
139 &WebGPUParent::MaintainDevices);
140 }
141
142 WebGPUParent::~WebGPUParent() = default;
143
MaintainDevices()144 void WebGPUParent::MaintainDevices() {
145 ffi::wgpu_server_poll_all_devices(mContext, false);
146 }
147
RecvInstanceRequestAdapter(const dom::GPURequestAdapterOptions & aOptions,const nsTArray<RawId> & aTargetIds,InstanceRequestAdapterResolver && resolver)148 ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
149 const dom::GPURequestAdapterOptions& aOptions,
150 const nsTArray<RawId>& aTargetIds,
151 InstanceRequestAdapterResolver&& resolver) {
152 ffi::WGPURequestAdapterOptions options = {};
153 if (aOptions.mPowerPreference.WasPassed()) {
154 options.power_preference = static_cast<ffi::WGPUPowerPreference>(
155 aOptions.mPowerPreference.Value());
156 }
157 // TODO: make available backends configurable by prefs
158
159 int8_t index = ffi::wgpu_server_instance_request_adapter(
160 mContext, &options, aTargetIds.Elements(), aTargetIds.Length());
161 if (index >= 0) {
162 resolver(aTargetIds[index]);
163 } else {
164 resolver(0);
165 }
166
167 // free the unused IDs
168 for (size_t i = 0; i < aTargetIds.Length(); ++i) {
169 if (static_cast<int8_t>(i) != index && !SendFreeAdapter(aTargetIds[i])) {
170 MOZ_CRASH("IPC failure");
171 }
172 }
173 return IPC_OK();
174 }
175
RecvAdapterRequestDevice(RawId aSelfId,const dom::GPUDeviceDescriptor & aDesc,RawId aNewId)176 ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
177 RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) {
178 ffi::WGPUDeviceDescriptor desc = {};
179 desc.limits.max_bind_groups = aDesc.mLimits.WasPassed()
180 ? aDesc.mLimits.Value().mMaxBindGroups
181 : WGPUDEFAULT_BIND_GROUPS;
182 // TODO: fill up the descriptor
183 ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId);
184 return IPC_OK();
185 }
186
RecvAdapterDestroy(RawId aSelfId)187 ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aSelfId) {
188 ffi::wgpu_server_adapter_destroy(mContext, aSelfId);
189 return IPC_OK();
190 }
191
RecvDeviceDestroy(RawId aSelfId)192 ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) {
193 ffi::wgpu_server_device_destroy(mContext, aSelfId);
194 return IPC_OK();
195 }
196
RecvDeviceCreateBuffer(RawId aSelfId,const ffi::WGPUBufferDescriptor & aDesc,const nsCString & aLabel,RawId aNewId)197 ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer(
198 RawId aSelfId, const ffi::WGPUBufferDescriptor& aDesc,
199 const nsCString& aLabel, RawId aNewId) {
200 ffi::WGPUBufferDescriptor desc = aDesc;
201 if (!aLabel.IsEmpty()) {
202 desc.label = aLabel.Data();
203 }
204 ffi::wgpu_server_device_create_buffer(mContext, aSelfId, &desc, aNewId);
205 return IPC_OK();
206 }
207
RecvDeviceUnmapBuffer(RawId aSelfId,RawId aBufferId,Shmem && aShmem,bool aFlush)208 ipc::IPCResult WebGPUParent::RecvDeviceUnmapBuffer(RawId aSelfId,
209 RawId aBufferId,
210 Shmem&& aShmem,
211 bool aFlush) {
212 if (aFlush) {
213 ffi::wgpu_server_device_set_buffer_sub_data(mContext, aSelfId, aBufferId, 0,
214 aShmem.get<uint8_t>(),
215 aShmem.Size<uint8_t>());
216 } else {
217 ffi::wgpu_server_buffer_unmap(mContext, aBufferId);
218 }
219 DeallocShmem(aShmem);
220 return IPC_OK();
221 }
222
223 struct MapReadRequest {
224 ipc::Shmem mShmem;
225 WebGPUParent::BufferMapReadResolver mResolver;
MapReadRequestmozilla::webgpu::MapReadRequest226 MapReadRequest(ipc::Shmem&& shmem,
227 WebGPUParent::BufferMapReadResolver&& resolver)
228 : mShmem(shmem), mResolver(resolver) {}
229 };
230
MapReadCallback(ffi::WGPUBufferMapAsyncStatus status,const uint8_t * ptr,uint8_t * userdata)231 static void MapReadCallback(ffi::WGPUBufferMapAsyncStatus status,
232 const uint8_t* ptr, uint8_t* userdata) {
233 auto req = reinterpret_cast<MapReadRequest*>(userdata);
234 // TODO: better handle errors
235 MOZ_ASSERT(status == ffi::WGPUBufferMapAsyncStatus_Success);
236 memcpy(req->mShmem.get<uint8_t>(), ptr, req->mShmem.Size<uint8_t>());
237 req->mResolver(std::move(req->mShmem));
238 delete req;
239 }
240
RecvBufferMapRead(RawId aSelfId,Shmem && aShmem,BufferMapReadResolver && aResolver)241 ipc::IPCResult WebGPUParent::RecvBufferMapRead(
242 RawId aSelfId, Shmem&& aShmem, BufferMapReadResolver&& aResolver) {
243 auto size = aShmem.Size<uint8_t>();
244 auto request = new MapReadRequest(std::move(aShmem), std::move(aResolver));
245 ffi::wgpu_server_buffer_map_read(mContext, aSelfId, 0, size, &MapReadCallback,
246 reinterpret_cast<uint8_t*>(request));
247 return IPC_OK();
248 }
249
RecvBufferDestroy(RawId aSelfId)250 ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
251 ffi::wgpu_server_buffer_destroy(mContext, aSelfId);
252 return IPC_OK();
253 }
254
RecvDeviceCreateTexture(RawId aSelfId,const ffi::WGPUTextureDescriptor & aDesc,const nsCString & aLabel,RawId aNewId)255 ipc::IPCResult WebGPUParent::RecvDeviceCreateTexture(
256 RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc,
257 const nsCString& aLabel, RawId aNewId) {
258 ffi::WGPUTextureDescriptor desc = aDesc;
259 if (!aLabel.IsEmpty()) {
260 desc.label = aLabel.Data();
261 }
262 ffi::wgpu_server_device_create_texture(mContext, aSelfId, &desc, aNewId);
263 return IPC_OK();
264 }
265
RecvTextureCreateView(RawId aSelfId,const ffi::WGPUTextureViewDescriptor & aDesc,const nsCString & aLabel,RawId aNewId)266 ipc::IPCResult WebGPUParent::RecvTextureCreateView(
267 RawId aSelfId, const ffi::WGPUTextureViewDescriptor& aDesc,
268 const nsCString& aLabel, RawId aNewId) {
269 ffi::WGPUTextureViewDescriptor desc = aDesc;
270 if (!aLabel.IsEmpty()) {
271 desc.label = aLabel.Data();
272 }
273 ffi::wgpu_server_texture_create_view(mContext, aSelfId, &desc, aNewId);
274 return IPC_OK();
275 }
276
RecvTextureDestroy(RawId aSelfId)277 ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aSelfId) {
278 ffi::wgpu_server_texture_destroy(mContext, aSelfId);
279 return IPC_OK();
280 }
281
RecvTextureViewDestroy(RawId aSelfId)282 ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) {
283 ffi::wgpu_server_texture_view_destroy(mContext, aSelfId);
284 return IPC_OK();
285 }
286
RecvDeviceCreateSampler(RawId aSelfId,const ffi::WGPUSamplerDescriptor & aDesc,const nsCString & aLabel,RawId aNewId)287 ipc::IPCResult WebGPUParent::RecvDeviceCreateSampler(
288 RawId aSelfId, const ffi::WGPUSamplerDescriptor& aDesc,
289 const nsCString& aLabel, RawId aNewId) {
290 ffi::WGPUSamplerDescriptor desc = aDesc;
291 if (!aLabel.IsEmpty()) {
292 desc.label = aLabel.Data();
293 }
294 ffi::wgpu_server_device_create_sampler(mContext, aSelfId, &desc, aNewId);
295 return IPC_OK();
296 }
297
RecvSamplerDestroy(RawId aSelfId)298 ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSelfId) {
299 ffi::wgpu_server_sampler_destroy(mContext, aSelfId);
300 return IPC_OK();
301 }
302
RecvDeviceCreateCommandEncoder(RawId aSelfId,const dom::GPUCommandEncoderDescriptor & aDesc,RawId aNewId)303 ipc::IPCResult WebGPUParent::RecvDeviceCreateCommandEncoder(
304 RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc,
305 RawId aNewId) {
306 ffi::WGPUCommandEncoderDescriptor desc = {};
307 if (aDesc.mLabel.WasPassed()) {
308 // TODO: desc.label = aDesc.mLabel.Value();
309 }
310 ffi::wgpu_server_device_create_encoder(mContext, aSelfId, &desc, aNewId);
311 return IPC_OK();
312 }
313
RecvCommandEncoderCopyBufferToBuffer(RawId aSelfId,RawId aSourceId,BufferAddress aSourceOffset,RawId aDestinationId,BufferAddress aDestinationOffset,BufferAddress aSize)314 ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToBuffer(
315 RawId aSelfId, RawId aSourceId, BufferAddress aSourceOffset,
316 RawId aDestinationId, BufferAddress aDestinationOffset,
317 BufferAddress aSize) {
318 ffi::wgpu_server_encoder_copy_buffer_to_buffer(mContext, aSelfId, aSourceId,
319 aSourceOffset, aDestinationId,
320 aDestinationOffset, aSize);
321 return IPC_OK();
322 }
323
RecvCommandEncoderCopyBufferToTexture(RawId aSelfId,WGPUBufferCopyView aSource,WGPUTextureCopyView aDestination,WGPUExtent3d aCopySize)324 ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToTexture(
325 RawId aSelfId, WGPUBufferCopyView aSource, WGPUTextureCopyView aDestination,
326 WGPUExtent3d aCopySize) {
327 ffi::wgpu_server_encoder_copy_buffer_to_texture(mContext, aSelfId, &aSource,
328 &aDestination, aCopySize);
329 return IPC_OK();
330 }
RecvCommandEncoderCopyTextureToBuffer(RawId aSelfId,WGPUTextureCopyView aSource,WGPUBufferCopyView aDestination,WGPUExtent3d aCopySize)331 ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToBuffer(
332 RawId aSelfId, WGPUTextureCopyView aSource, WGPUBufferCopyView aDestination,
333 WGPUExtent3d aCopySize) {
334 ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aSelfId, &aSource,
335 &aDestination, aCopySize);
336 return IPC_OK();
337 }
RecvCommandEncoderCopyTextureToTexture(RawId aSelfId,WGPUTextureCopyView aSource,WGPUTextureCopyView aDestination,WGPUExtent3d aCopySize)338 ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToTexture(
339 RawId aSelfId, WGPUTextureCopyView aSource,
340 WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize) {
341 ffi::wgpu_server_encoder_copy_texture_to_texture(mContext, aSelfId, &aSource,
342 &aDestination, aCopySize);
343 return IPC_OK();
344 }
345
RecvCommandEncoderRunComputePass(RawId aSelfId,Shmem && aShmem)346 ipc::IPCResult WebGPUParent::RecvCommandEncoderRunComputePass(RawId aSelfId,
347 Shmem&& aShmem) {
348 ffi::wgpu_server_encode_compute_pass(mContext, aSelfId, aShmem.get<uint8_t>(),
349 aShmem.Size<uint8_t>());
350 DeallocShmem(aShmem);
351 return IPC_OK();
352 }
353
RecvCommandEncoderRunRenderPass(RawId aSelfId,Shmem && aShmem)354 ipc::IPCResult WebGPUParent::RecvCommandEncoderRunRenderPass(RawId aSelfId,
355 Shmem&& aShmem) {
356 ffi::wgpu_server_encode_render_pass(mContext, aSelfId, aShmem.get<uint8_t>(),
357 aShmem.Size<uint8_t>());
358 DeallocShmem(aShmem);
359 return IPC_OK();
360 }
361
RecvCommandEncoderFinish(RawId aSelfId,const dom::GPUCommandBufferDescriptor & aDesc)362 ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
363 RawId aSelfId, const dom::GPUCommandBufferDescriptor& aDesc) {
364 Unused << aDesc;
365 ffi::WGPUCommandBufferDescriptor desc = {};
366 ffi::wgpu_server_encoder_finish(mContext, aSelfId, &desc);
367 return IPC_OK();
368 }
369
RecvCommandEncoderDestroy(RawId aSelfId)370 ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aSelfId) {
371 ffi::wgpu_server_encoder_destroy(mContext, aSelfId);
372 return IPC_OK();
373 }
374
RecvCommandBufferDestroy(RawId aSelfId)375 ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aSelfId) {
376 ffi::wgpu_server_command_buffer_destroy(mContext, aSelfId);
377 return IPC_OK();
378 }
379
RecvQueueSubmit(RawId aSelfId,const nsTArray<RawId> & aCommandBuffers)380 ipc::IPCResult WebGPUParent::RecvQueueSubmit(
381 RawId aSelfId, const nsTArray<RawId>& aCommandBuffers) {
382 ffi::wgpu_server_queue_submit(mContext, aSelfId, aCommandBuffers.Elements(),
383 aCommandBuffers.Length());
384 return IPC_OK();
385 }
386
RecvDeviceCreateBindGroupLayout(RawId aSelfId,const SerialBindGroupLayoutDescriptor & aDesc,RawId aNewId)387 ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroupLayout(
388 RawId aSelfId, const SerialBindGroupLayoutDescriptor& aDesc, RawId aNewId) {
389 ffi::WGPUBindGroupLayoutDescriptor desc = {};
390 desc.entries = aDesc.mEntries.Elements();
391 desc.entries_length = aDesc.mEntries.Length();
392 ffi::wgpu_server_device_create_bind_group_layout(mContext, aSelfId, &desc,
393 aNewId);
394 return IPC_OK();
395 }
396
RecvBindGroupLayoutDestroy(RawId aSelfId)397 ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aSelfId) {
398 ffi::wgpu_server_bind_group_layout_destroy(mContext, aSelfId);
399 return IPC_OK();
400 }
401
RecvDeviceCreatePipelineLayout(RawId aSelfId,const SerialPipelineLayoutDescriptor & aDesc,RawId aNewId)402 ipc::IPCResult WebGPUParent::RecvDeviceCreatePipelineLayout(
403 RawId aSelfId, const SerialPipelineLayoutDescriptor& aDesc, RawId aNewId) {
404 ffi::WGPUPipelineLayoutDescriptor desc = {};
405 desc.bind_group_layouts = aDesc.mBindGroupLayouts.Elements();
406 desc.bind_group_layouts_length = aDesc.mBindGroupLayouts.Length();
407 ffi::wgpu_server_device_create_pipeline_layout(mContext, aSelfId, &desc,
408 aNewId);
409 return IPC_OK();
410 }
411
RecvPipelineLayoutDestroy(RawId aSelfId)412 ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aSelfId) {
413 ffi::wgpu_server_pipeline_layout_destroy(mContext, aSelfId);
414 return IPC_OK();
415 }
416
RecvDeviceCreateBindGroup(RawId aSelfId,const SerialBindGroupDescriptor & aDesc,RawId aNewId)417 ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroup(
418 RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId) {
419 nsTArray<ffi::WGPUBindGroupEntry> ffiEntries(aDesc.mEntries.Length());
420 for (const auto& entry : aDesc.mEntries) {
421 ffi::WGPUBindGroupEntry bgb = {};
422 bgb.binding = entry.mBinding;
423 switch (entry.mType) {
424 case SerialBindGroupEntryType::Buffer:
425 bgb.resource.tag = ffi::WGPUBindingResource_Buffer;
426 bgb.resource.buffer = {entry.mValue, entry.mBufferOffset, entry.mBufferSize};
427 break;
428 case SerialBindGroupEntryType::Texture:
429 bgb.resource.tag = ffi::WGPUBindingResource_TextureView;
430 bgb.resource.texture_view = {entry.mValue};
431 break;
432 case SerialBindGroupEntryType::Sampler:
433 bgb.resource.tag = ffi::WGPUBindingResource_Sampler;
434 bgb.resource.sampler = {entry.mValue};
435 break;
436 default:
437 MOZ_CRASH("unreachable");
438 }
439 ffiEntries.AppendElement(bgb);
440 }
441 ffi::WGPUBindGroupDescriptor desc = {};
442 desc.layout = aDesc.mLayout;
443 desc.entries = ffiEntries.Elements();
444 desc.entries_length = ffiEntries.Length();
445 ffi::wgpu_server_device_create_bind_group(mContext, aSelfId, &desc, aNewId);
446 return IPC_OK();
447 }
448
RecvBindGroupDestroy(RawId aSelfId)449 ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) {
450 ffi::wgpu_server_bind_group_destroy(mContext, aSelfId);
451 return IPC_OK();
452 }
453
RecvDeviceCreateShaderModule(RawId aSelfId,const nsTArray<uint32_t> & aData,RawId aNewId)454 ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule(
455 RawId aSelfId, const nsTArray<uint32_t>& aData, RawId aNewId) {
456 ffi::WGPUShaderModuleDescriptor desc = {};
457 desc.code.bytes = aData.Elements();
458 desc.code.length = aData.Length();
459 ffi::wgpu_server_device_create_shader_module(mContext, aSelfId, &desc,
460 aNewId);
461 return IPC_OK();
462 }
463
RecvShaderModuleDestroy(RawId aSelfId)464 ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aSelfId) {
465 ffi::wgpu_server_shader_module_destroy(mContext, aSelfId);
466 return IPC_OK();
467 }
468
RecvDeviceCreateComputePipeline(RawId aSelfId,const SerialComputePipelineDescriptor & aDesc,RawId aNewId)469 ipc::IPCResult WebGPUParent::RecvDeviceCreateComputePipeline(
470 RawId aSelfId, const SerialComputePipelineDescriptor& aDesc, RawId aNewId) {
471 const NS_LossyConvertUTF16toASCII entryPoint(aDesc.mComputeStage.mEntryPoint);
472 ffi::WGPUComputePipelineDescriptor desc = {};
473 desc.layout = aDesc.mLayout;
474 desc.compute_stage.module = aDesc.mComputeStage.mModule;
475 desc.compute_stage.entry_point = entryPoint.get();
476 ffi::wgpu_server_device_create_compute_pipeline(mContext, aSelfId, &desc,
477 aNewId);
478 return IPC_OK();
479 }
480
RecvComputePipelineDestroy(RawId aSelfId)481 ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) {
482 ffi::wgpu_server_compute_pipeline_destroy(mContext, aSelfId);
483 return IPC_OK();
484 }
485
RecvDeviceCreateRenderPipeline(RawId aSelfId,const SerialRenderPipelineDescriptor & aDesc,RawId aNewId)486 ipc::IPCResult WebGPUParent::RecvDeviceCreateRenderPipeline(
487 RawId aSelfId, const SerialRenderPipelineDescriptor& aDesc, RawId aNewId) {
488 const NS_LossyConvertUTF16toASCII vsEntryPoint(
489 aDesc.mVertexStage.mEntryPoint);
490 const NS_LossyConvertUTF16toASCII fsEntryPoint(
491 aDesc.mFragmentStage.mEntryPoint);
492 nsTArray<ffi::WGPUVertexBufferLayoutDescriptor> vertexBuffers(
493 aDesc.mVertexState.mVertexBuffers.Length());
494
495 ffi::WGPURenderPipelineDescriptor desc = {};
496 ffi::WGPUProgrammableStageDescriptor fragmentDesc = {};
497 desc.layout = aDesc.mLayout;
498 desc.vertex_stage.module = aDesc.mVertexStage.mModule;
499 desc.vertex_stage.entry_point = vsEntryPoint.get();
500 if (aDesc.mFragmentStage.mModule != 0) {
501 fragmentDesc.module = aDesc.mFragmentStage.mModule;
502 fragmentDesc.entry_point = fsEntryPoint.get();
503 desc.fragment_stage = &fragmentDesc;
504 }
505 desc.primitive_topology = aDesc.mPrimitiveTopology;
506 if (aDesc.mRasterizationState.isSome()) {
507 desc.rasterization_state = aDesc.mRasterizationState.ptr();
508 }
509 desc.color_states = aDesc.mColorStates.Elements();
510 desc.color_states_length = aDesc.mColorStates.Length();
511 if (aDesc.mDepthStencilState.isSome()) {
512 desc.depth_stencil_state = aDesc.mDepthStencilState.ptr();
513 }
514 for (const auto& vertexBuffer : aDesc.mVertexState.mVertexBuffers) {
515 ffi::WGPUVertexBufferLayoutDescriptor vb = {};
516 vb.array_stride = vertexBuffer.mArrayStride;
517 vb.step_mode = vertexBuffer.mStepMode;
518 vb.attributes = vertexBuffer.mAttributes.Elements();
519 vb.attributes_length = vertexBuffer.mAttributes.Length();
520 vertexBuffers.AppendElement(vb);
521 }
522 desc.vertex_state.index_format = aDesc.mVertexState.mIndexFormat;
523 desc.vertex_state.vertex_buffers = vertexBuffers.Elements();
524 desc.vertex_state.vertex_buffers_length = vertexBuffers.Length();
525 desc.sample_count = aDesc.mSampleCount;
526 desc.sample_mask = aDesc.mSampleMask;
527 desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
528 ffi::wgpu_server_device_create_render_pipeline(mContext, aSelfId, &desc,
529 aNewId);
530 return IPC_OK();
531 }
532
RecvRenderPipelineDestroy(RawId aSelfId)533 ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) {
534 ffi::wgpu_server_render_pipeline_destroy(mContext, aSelfId);
535 return IPC_OK();
536 }
537
538 // TODO: proper destruction
539 static const uint64_t kBufferAlignment = 0x100;
540
Align(uint64_t value)541 static uint64_t Align(uint64_t value) {
542 return (value | (kBufferAlignment - 1)) + 1;
543 }
544
RecvDeviceCreateSwapChain(RawId aSelfId,RawId aQueueId,const RGBDescriptor & aDesc,const nsTArray<RawId> & aBufferIds,ExternalImageId aExternalId)545 ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
546 RawId aSelfId, RawId aQueueId, const RGBDescriptor& aDesc,
547 const nsTArray<RawId>& aBufferIds, ExternalImageId aExternalId) {
548 const auto rows = aDesc.size().height;
549 const auto bufferStride =
550 Align(static_cast<uint64_t>(aDesc.size().width) * 4);
551 const auto textureStride = layers::ImageDataSerializer::GetRGBStride(aDesc);
552 const auto wholeBufferSize = CheckedInt<size_t>(textureStride) * rows;
553 if (!wholeBufferSize.isValid()) {
554 NS_ERROR("Invalid total buffer size!");
555 return IPC_OK();
556 }
557 auto textureHostData = new (fallible) uint8_t[wholeBufferSize.value()];
558 if (!textureHostData) {
559 NS_ERROR("Unable to allocate host data!");
560 return IPC_OK();
561 }
562 auto textureHost = new layers::MemoryTextureHost(
563 textureHostData, aDesc, layers::TextureFlags::NO_FLAGS);
564 textureHost->CreateRenderTexture(aExternalId);
565 nsTArray<RawId> bufferIds(aBufferIds.Clone());
566 RefPtr<PresentationData> data = new PresentationData();
567 data->mDeviceId = aSelfId;
568 data->mQueueId = aQueueId;
569 data->mTextureHost = textureHost;
570 data->mSourcePitch = bufferStride;
571 data->mTargetPitch = textureStride;
572 data->mRowCount = rows;
573 for (const RawId id : bufferIds) {
574 data->mUnassignedBufferIds.push_back(id);
575 }
576 if (!mCanvasMap.insert({AsUint64(aExternalId), data}).second) {
577 NS_ERROR("External image is already registered as WebGPU canvas!");
578 }
579 return IPC_OK();
580 }
581
PresentCallback(ffi::WGPUBufferMapAsyncStatus status,const uint8_t * ptr,uint8_t * userdata)582 static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status,
583 const uint8_t* ptr, uint8_t* userdata) {
584 auto data = reinterpret_cast<PresentationData*>(userdata);
585 if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
586 uint8_t* dst = data->mTextureHost->GetBuffer();
587 for (uint32_t row = 0; row < data->mRowCount; ++row) {
588 memcpy(dst, ptr, data->mTargetPitch);
589 dst += data->mTargetPitch;
590 ptr += data->mSourcePitch;
591 }
592 } else {
593 // TODO: better handle errors
594 NS_WARNING("WebGPU frame mapping failed!");
595 }
596 data->mBuffersLock.Lock();
597 RawId bufferId = data->mQueuedBufferIds.back();
598 data->mQueuedBufferIds.pop_back();
599 data->mAvailableBufferIds.push_back(bufferId);
600 data->mBuffersLock.Unlock();
601 // We artificially did `AddRef` before calling `wgpu_server_buffer_map_read`.
602 // Now we can let it go again.
603 data->Release();
604 }
605
RecvSwapChainPresent(wr::ExternalImageId aExternalId,RawId aTextureId,RawId aCommandEncoderId)606 ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
607 wr::ExternalImageId aExternalId, RawId aTextureId,
608 RawId aCommandEncoderId) {
609 // step 0: get the data associated with the swapchain
610 const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
611 if (lookup == mCanvasMap.end()) {
612 NS_WARNING("WebGPU presenting on a destroyed swap chain!");
613 return IPC_OK();
614 }
615 RefPtr<PresentationData> data = lookup->second.get();
616 RawId bufferId = 0;
617 const auto& size = data->mTextureHost->GetSize();
618 const auto bufferSize = data->mRowCount * data->mSourcePitch;
619
620 // step 1: find an available staging buffer, or create one
621 data->mBuffersLock.Lock();
622 if (!data->mAvailableBufferIds.empty()) {
623 bufferId = data->mAvailableBufferIds.back();
624 wgpu_server_buffer_unmap(mContext, bufferId);
625 data->mAvailableBufferIds.pop_back();
626 } else if (!data->mUnassignedBufferIds.empty()) {
627 bufferId = data->mUnassignedBufferIds.back();
628 data->mUnassignedBufferIds.pop_back();
629
630 ffi::WGPUBufferUsage usage =
631 WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ;
632 ffi::WGPUBufferDescriptor desc = {};
633 desc.size = bufferSize;
634 desc.usage = usage;
635 ffi::wgpu_server_device_create_buffer(mContext, data->mDeviceId, &desc,
636 bufferId);
637 } else {
638 bufferId = 0;
639 }
640 if (bufferId) {
641 data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId);
642 }
643 data->mBuffersLock.Unlock();
644 if (!bufferId) {
645 // TODO: add a warning - no buffer are available!
646 return IPC_OK();
647 }
648
649 // step 3: submit a copy command for the frame
650 ffi::WGPUCommandEncoderDescriptor encoderDesc = {};
651 ffi::wgpu_server_device_create_encoder(mContext, data->mDeviceId,
652 &encoderDesc, aCommandEncoderId);
653 const ffi::WGPUTextureCopyView texView = {
654 aTextureId,
655 };
656 const ffi::WGPUBufferCopyView bufView = {
657 bufferId,
658 0,
659 data->mSourcePitch,
660 0,
661 };
662 const ffi::WGPUExtent3d extent = {
663 static_cast<uint32_t>(size.width),
664 static_cast<uint32_t>(size.height),
665 1,
666 };
667 ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aCommandEncoderId,
668 &texView, &bufView, extent);
669 ffi::WGPUCommandBufferDescriptor commandDesc = {};
670 ffi::wgpu_server_encoder_finish(mContext, aCommandEncoderId, &commandDesc);
671 ffi::wgpu_server_queue_submit(mContext, data->mQueueId, &aCommandEncoderId,
672 1);
673
674 // step 4: request the pixels to be copied into the external texture
675 // TODO: this isn't strictly necessary. When WR wants to Lock() the external
676 // texture,
677 // we can just give it the contents of the last mapped buffer instead of the
678 // copy.
679 // This `AddRef` is needed for passing `data` as a raw pointer to
680 // `wgpu_server_buffer_map_read` to serve as `userdata`. It's released at
681 // the end of `PresentCallback` body.
682 const auto userData = do_AddRef(data).take();
683 ffi::wgpu_server_buffer_map_read(mContext, bufferId, 0, bufferSize,
684 &PresentCallback,
685 reinterpret_cast<uint8_t*>(userData));
686
687 return IPC_OK();
688 }
689
RecvSwapChainDestroy(wr::ExternalImageId aExternalId)690 ipc::IPCResult WebGPUParent::RecvSwapChainDestroy(
691 wr::ExternalImageId aExternalId) {
692 const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
693 MOZ_ASSERT(lookup != mCanvasMap.end());
694 RefPtr<PresentationData> data = lookup->second.get();
695 mCanvasMap.erase(AsUint64(aExternalId));
696 data->mTextureHost = nullptr;
697 layers::TextureHost::DestroyRenderTexture(aExternalId);
698
699 data->mBuffersLock.Lock();
700 for (const auto bid : data->mUnassignedBufferIds) {
701 if (!SendFreeBuffer(bid)) {
702 NS_WARNING("Unable to free an ID for non-assigned buffer");
703 }
704 }
705 for (const auto bid : data->mAvailableBufferIds) {
706 ffi::wgpu_server_buffer_destroy(mContext, bid);
707 }
708 for (const auto bid : data->mQueuedBufferIds) {
709 ffi::wgpu_server_buffer_destroy(mContext, bid);
710 }
711 data->mBuffersLock.Unlock();
712 return IPC_OK();
713 }
714
RecvShutdown()715 ipc::IPCResult WebGPUParent::RecvShutdown() {
716 mTimer.Stop();
717 for (const auto& p : mCanvasMap) {
718 const wr::ExternalImageId extId = {p.first};
719 layers::TextureHost::DestroyRenderTexture(extId);
720 }
721 mCanvasMap.clear();
722 ffi::wgpu_server_poll_all_devices(mContext, true);
723 ffi::wgpu_server_delete(const_cast<ffi::WGPUGlobal*>(mContext));
724 return IPC_OK();
725 }
726
727 } // namespace webgpu
728 } // namespace mozilla
729