1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/null/DeviceNull.h"
16 
17 #include "dawn_native/BackendConnection.h"
18 #include "dawn_native/Commands.h"
19 #include "dawn_native/ErrorData.h"
20 #include "dawn_native/Instance.h"
21 #include "dawn_native/Surface.h"
22 
23 #include <spirv_cross.hpp>
24 
25 namespace dawn_native { namespace null {
26 
27     // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
28 
Adapter(InstanceBase * instance)29     Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
30         mPCIInfo.name = "Null backend";
31         mAdapterType = wgpu::AdapterType::CPU;
32 
33         // Enable all extensions by default for the convenience of tests.
34         mSupportedExtensions.extensionsBitSet.flip();
35     }
36 
37     Adapter::~Adapter() = default;
38 
39     // Used for the tests that intend to use an adapter without all extensions enabled.
SetSupportedExtensions(const std::vector<const char * > & requiredExtensions)40     void Adapter::SetSupportedExtensions(const std::vector<const char*>& requiredExtensions) {
41         mSupportedExtensions = GetInstance()->ExtensionNamesToExtensionsSet(requiredExtensions);
42     }
43 
CreateDeviceImpl(const DeviceDescriptor * descriptor)44     ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
45         return Device::Create(this, descriptor);
46     }
47 
48     class Backend : public BackendConnection {
49       public:
Backend(InstanceBase * instance)50         Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
51         }
52 
DiscoverDefaultAdapters()53         std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override {
54             // There is always a single Null adapter because it is purely CPU based and doesn't
55             // depend on the system.
56             std::vector<std::unique_ptr<AdapterBase>> adapters;
57             adapters.push_back(std::make_unique<Adapter>(GetInstance()));
58             return adapters;
59         }
60     };
61 
Connect(InstanceBase * instance)62     BackendConnection* Connect(InstanceBase* instance) {
63         return new Backend(instance);
64     }
65 
66     struct CopyFromStagingToBufferOperation : PendingOperation {
Executedawn_native::null::CopyFromStagingToBufferOperation67         virtual void Execute() {
68             destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
69         }
70 
71         StagingBufferBase* staging;
72         Ref<Buffer> destination;
73         uint64_t sourceOffset;
74         uint64_t destinationOffset;
75         uint64_t size;
76     };
77 
78     // Device
79 
80     // static
Create(Adapter * adapter,const DeviceDescriptor * descriptor)81     ResultOrError<Device*> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
82         Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
83         DAWN_TRY(device->Initialize());
84         return device.Detach();
85     }
86 
~Device()87     Device::~Device() {
88         ShutDownBase();
89     }
90 
Initialize()91     MaybeError Device::Initialize() {
92         return DeviceBase::Initialize(new Queue(this));
93     }
94 
CreateBindGroupImpl(const BindGroupDescriptor * descriptor)95     ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
96         const BindGroupDescriptor* descriptor) {
97         return new BindGroup(this, descriptor);
98     }
CreateBindGroupLayoutImpl(const BindGroupLayoutDescriptor * descriptor)99     ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
100         const BindGroupLayoutDescriptor* descriptor) {
101         return new BindGroupLayout(this, descriptor);
102     }
CreateBufferImpl(const BufferDescriptor * descriptor)103     ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
104         DAWN_TRY(IncrementMemoryUsage(descriptor->size));
105         return AcquireRef(new Buffer(this, descriptor));
106     }
CreateCommandBuffer(CommandEncoder * encoder,const CommandBufferDescriptor * descriptor)107     CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
108                                                    const CommandBufferDescriptor* descriptor) {
109         return new CommandBuffer(encoder, descriptor);
110     }
CreateComputePipelineImpl(const ComputePipelineDescriptor * descriptor)111     ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
112         const ComputePipelineDescriptor* descriptor) {
113         return new ComputePipeline(this, descriptor);
114     }
CreatePipelineLayoutImpl(const PipelineLayoutDescriptor * descriptor)115     ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
116         const PipelineLayoutDescriptor* descriptor) {
117         return new PipelineLayout(this, descriptor);
118     }
CreateQuerySetImpl(const QuerySetDescriptor * descriptor)119     ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
120         return new QuerySet(this, descriptor);
121     }
CreateRenderPipelineImpl(const RenderPipelineDescriptor * descriptor)122     ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
123         const RenderPipelineDescriptor* descriptor) {
124         return new RenderPipeline(this, descriptor);
125     }
CreateSamplerImpl(const SamplerDescriptor * descriptor)126     ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
127         return new Sampler(this, descriptor);
128     }
CreateShaderModuleImpl(const ShaderModuleDescriptor * descriptor)129     ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
130         const ShaderModuleDescriptor* descriptor) {
131         Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
132         DAWN_TRY(module->Initialize());
133         return module.Detach();
134     }
CreateSwapChainImpl(const SwapChainDescriptor * descriptor)135     ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
136         const SwapChainDescriptor* descriptor) {
137         return new OldSwapChain(this, descriptor);
138     }
CreateSwapChainImpl(Surface * surface,NewSwapChainBase * previousSwapChain,const SwapChainDescriptor * descriptor)139     ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
140         Surface* surface,
141         NewSwapChainBase* previousSwapChain,
142         const SwapChainDescriptor* descriptor) {
143         return SwapChain::Create(this, surface, previousSwapChain, descriptor);
144     }
CreateTextureImpl(const TextureDescriptor * descriptor)145     ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
146         return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
147     }
CreateTextureViewImpl(TextureBase * texture,const TextureViewDescriptor * descriptor)148     ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
149         TextureBase* texture,
150         const TextureViewDescriptor* descriptor) {
151         return new TextureView(texture, descriptor);
152     }
153 
CreateStagingBuffer(size_t size)154     ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
155         std::unique_ptr<StagingBufferBase> stagingBuffer =
156             std::make_unique<StagingBuffer>(size, this);
157         DAWN_TRY(stagingBuffer->Initialize());
158         return std::move(stagingBuffer);
159     }
160 
ShutDownImpl()161     void Device::ShutDownImpl() {
162         ASSERT(GetState() == State::Disconnected);
163 
164         // Clear pending operations before checking mMemoryUsage because some operations keep a
165         // reference to Buffers.
166         mPendingOperations.clear();
167         ASSERT(mMemoryUsage == 0);
168     }
169 
WaitForIdleForDestruction()170     MaybeError Device::WaitForIdleForDestruction() {
171         mPendingOperations.clear();
172         return {};
173     }
174 
CopyFromStagingToBuffer(StagingBufferBase * source,uint64_t sourceOffset,BufferBase * destination,uint64_t destinationOffset,uint64_t size)175     MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
176                                                uint64_t sourceOffset,
177                                                BufferBase* destination,
178                                                uint64_t destinationOffset,
179                                                uint64_t size) {
180         if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
181             destination->SetIsDataInitialized();
182         }
183 
184         auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
185         operation->staging = source;
186         operation->destination = ToBackend(destination);
187         operation->sourceOffset = sourceOffset;
188         operation->destinationOffset = destinationOffset;
189         operation->size = size;
190 
191         AddPendingOperation(std::move(operation));
192 
193         return {};
194     }
195 
CopyFromStagingToTexture(const StagingBufferBase * source,const TextureDataLayout & src,TextureCopy * dst,const Extent3D & copySizePixels)196     MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
197                                                 const TextureDataLayout& src,
198                                                 TextureCopy* dst,
199                                                 const Extent3D& copySizePixels) {
200         return {};
201     }
202 
IncrementMemoryUsage(uint64_t bytes)203     MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
204         static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
205         if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
206             return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
207         }
208         mMemoryUsage += bytes;
209         return {};
210     }
211 
DecrementMemoryUsage(uint64_t bytes)212     void Device::DecrementMemoryUsage(uint64_t bytes) {
213         ASSERT(mMemoryUsage >= bytes);
214         mMemoryUsage -= bytes;
215     }
216 
TickImpl()217     MaybeError Device::TickImpl() {
218         SubmitPendingOperations();
219         return {};
220     }
221 
CheckAndUpdateCompletedSerials()222     ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
223         return GetLastSubmittedCommandSerial();
224     }
225 
AddPendingOperation(std::unique_ptr<PendingOperation> operation)226     void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
227         mPendingOperations.emplace_back(std::move(operation));
228     }
SubmitPendingOperations()229     void Device::SubmitPendingOperations() {
230         for (auto& operation : mPendingOperations) {
231             operation->Execute();
232         }
233         mPendingOperations.clear();
234 
235         CheckPassedSerials();
236         IncrementLastSubmittedCommandSerial();
237     }
238 
239     // BindGroupDataHolder
240 
BindGroupDataHolder(size_t size)241     BindGroupDataHolder::BindGroupDataHolder(size_t size)
242         : mBindingDataAllocation(malloc(size))  // malloc is guaranteed to return a
243                                                 // pointer aligned enough for the allocation
244     {
245     }
246 
~BindGroupDataHolder()247     BindGroupDataHolder::~BindGroupDataHolder() {
248         free(mBindingDataAllocation);
249     }
250 
251     // BindGroup
252 
BindGroup(DeviceBase * device,const BindGroupDescriptor * descriptor)253     BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
254         : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
255           BindGroupBase(device, descriptor, mBindingDataAllocation) {
256     }
257 
258     // Buffer
259 
Buffer(Device * device,const BufferDescriptor * descriptor)260     Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
261         : BufferBase(device, descriptor) {
262         mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
263     }
264 
~Buffer()265     Buffer::~Buffer() {
266         DestroyInternal();
267         ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
268     }
269 
IsCPUWritableAtCreation() const270     bool Buffer::IsCPUWritableAtCreation() const {
271         // Only return true for mappable buffers so we can test cases that need / don't need a
272         // staging buffer.
273         return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
274     }
275 
MapAtCreationImpl()276     MaybeError Buffer::MapAtCreationImpl() {
277         return {};
278     }
279 
CopyFromStaging(StagingBufferBase * staging,uint64_t sourceOffset,uint64_t destinationOffset,uint64_t size)280     void Buffer::CopyFromStaging(StagingBufferBase* staging,
281                                  uint64_t sourceOffset,
282                                  uint64_t destinationOffset,
283                                  uint64_t size) {
284         uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
285         memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
286     }
287 
DoWriteBuffer(uint64_t bufferOffset,const void * data,size_t size)288     void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
289         ASSERT(bufferOffset + size <= GetSize());
290         ASSERT(mBackingData);
291         memcpy(mBackingData.get() + bufferOffset, data, size);
292     }
293 
MapAsyncImpl(wgpu::MapMode mode,size_t offset,size_t size)294     MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
295         return {};
296     }
297 
GetMappedPointerImpl()298     void* Buffer::GetMappedPointerImpl() {
299         return mBackingData.get();
300     }
301 
UnmapImpl()302     void Buffer::UnmapImpl() {
303     }
304 
DestroyImpl()305     void Buffer::DestroyImpl() {
306     }
307 
308     // CommandBuffer
309 
CommandBuffer(CommandEncoder * encoder,const CommandBufferDescriptor * descriptor)310     CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
311         : CommandBufferBase(encoder, descriptor) {
312     }
313 
314     // QuerySet
315 
QuerySet(Device * device,const QuerySetDescriptor * descriptor)316     QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
317         : QuerySetBase(device, descriptor) {
318     }
319 
~QuerySet()320     QuerySet::~QuerySet() {
321         DestroyInternal();
322     }
323 
DestroyImpl()324     void QuerySet::DestroyImpl() {
325     }
326 
327     // Queue
328 
Queue(Device * device)329     Queue::Queue(Device* device) : QueueBase(device) {
330     }
331 
~Queue()332     Queue::~Queue() {
333     }
334 
SubmitImpl(uint32_t,CommandBufferBase * const *)335     MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
336         ToBackend(GetDevice())->SubmitPendingOperations();
337         return {};
338     }
339 
WriteBufferImpl(BufferBase * buffer,uint64_t bufferOffset,const void * data,size_t size)340     MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
341                                       uint64_t bufferOffset,
342                                       const void* data,
343                                       size_t size) {
344         ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
345         return {};
346     }
347 
348     // SwapChain
349 
350     // static
Create(Device * device,Surface * surface,NewSwapChainBase * previousSwapChain,const SwapChainDescriptor * descriptor)351     ResultOrError<SwapChain*> SwapChain::Create(Device* device,
352                                                 Surface* surface,
353                                                 NewSwapChainBase* previousSwapChain,
354                                                 const SwapChainDescriptor* descriptor) {
355         std::unique_ptr<SwapChain> swapchain =
356             std::make_unique<SwapChain>(device, surface, descriptor);
357         DAWN_TRY(swapchain->Initialize(previousSwapChain));
358         return swapchain.release();
359     }
360 
Initialize(NewSwapChainBase * previousSwapChain)361     MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
362         if (previousSwapChain != nullptr) {
363             // TODO(cwallez@chromium.org): figure out what should happen when surfaces are used by
364             // multiple backends one after the other. It probably needs to block until the backend
365             // and GPU are completely finished with the previous swapchain.
366             if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
367                 return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
368             }
369         }
370 
371         return {};
372     }
373 
374     SwapChain::~SwapChain() = default;
375 
PresentImpl()376     MaybeError SwapChain::PresentImpl() {
377         mTexture->Destroy();
378         mTexture = nullptr;
379         return {};
380     }
381 
GetCurrentTextureViewImpl()382     ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
383         TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
384         mTexture = AcquireRef(
385             new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
386         return mTexture->CreateView(nullptr);
387     }
388 
DetachFromSurfaceImpl()389     void SwapChain::DetachFromSurfaceImpl() {
390         if (mTexture.Get() != nullptr) {
391             mTexture->Destroy();
392             mTexture = nullptr;
393         }
394     }
395 
396     // ShaderModule
397 
Initialize()398     MaybeError ShaderModule::Initialize() {
399         return InitializeBase();
400     }
401 
402     // OldSwapChain
403 
OldSwapChain(Device * device,const SwapChainDescriptor * descriptor)404     OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
405         : OldSwapChainBase(device, descriptor) {
406         const auto& im = GetImplementation();
407         im.Init(im.userData, nullptr);
408     }
409 
~OldSwapChain()410     OldSwapChain::~OldSwapChain() {
411     }
412 
GetNextTextureImpl(const TextureDescriptor * descriptor)413     TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
414         return GetDevice()->CreateTexture(descriptor);
415     }
416 
OnBeforePresent(TextureViewBase *)417     MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
418         return {};
419     }
420 
421     // NativeSwapChainImpl
422 
Init(WSIContext * context)423     void NativeSwapChainImpl::Init(WSIContext* context) {
424     }
425 
Configure(WGPUTextureFormat format,WGPUTextureUsage,uint32_t width,uint32_t height)426     DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
427                                                       WGPUTextureUsage,
428                                                       uint32_t width,
429                                                       uint32_t height) {
430         return DAWN_SWAP_CHAIN_NO_ERROR;
431     }
432 
GetNextTexture(DawnSwapChainNextTexture * nextTexture)433     DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
434         return DAWN_SWAP_CHAIN_NO_ERROR;
435     }
436 
Present()437     DawnSwapChainError NativeSwapChainImpl::Present() {
438         return DAWN_SWAP_CHAIN_NO_ERROR;
439     }
440 
GetPreferredFormat() const441     wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
442         return wgpu::TextureFormat::RGBA8Unorm;
443     }
444 
445     // StagingBuffer
446 
StagingBuffer(size_t size,Device * device)447     StagingBuffer::StagingBuffer(size_t size, Device* device)
448         : StagingBufferBase(size), mDevice(device) {
449     }
450 
~StagingBuffer()451     StagingBuffer::~StagingBuffer() {
452         if (mBuffer) {
453             mDevice->DecrementMemoryUsage(GetSize());
454         }
455     }
456 
Initialize()457     MaybeError StagingBuffer::Initialize() {
458         DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
459         mBuffer = std::make_unique<uint8_t[]>(GetSize());
460         mMappedPointer = mBuffer.get();
461         return {};
462     }
463 
GetOptimalBytesPerRowAlignment() const464     uint32_t Device::GetOptimalBytesPerRowAlignment() const {
465         return 1;
466     }
467 
GetOptimalBufferToTextureCopyOffsetAlignment() const468     uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
469         return 1;
470     }
471 
472 }}  // namespace dawn_native::null
473