1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "SharedBufferMLGPU.h"
8 #include "BufferCache.h"
9 #include "MLGDevice.h"
10 
11 using namespace std;
12 
13 namespace mozilla {
14 namespace layers {
15 
SharedBufferMLGPU(MLGDevice * aDevice,MLGBufferType aType,size_t aDefaultSize)16 SharedBufferMLGPU::SharedBufferMLGPU(MLGDevice* aDevice, MLGBufferType aType,
17                                      size_t aDefaultSize)
18     : mDevice(aDevice),
19       mType(aType),
20       mDefaultSize(aDefaultSize),
21       mCanUseOffsetAllocation(true),
22       mCurrentPosition(0),
23       mMaxSize(0),
24       mMapped(false),
25       mBytesUsedThisFrame(0),
26       mNumSmallFrames(0) {
27   MOZ_COUNT_CTOR(SharedBufferMLGPU);
28 }
29 
~SharedBufferMLGPU()30 SharedBufferMLGPU::~SharedBufferMLGPU() {
31   MOZ_COUNT_DTOR(SharedBufferMLGPU);
32   Unmap();
33 }
34 
Init()35 bool SharedBufferMLGPU::Init() {
36   // If we can't use buffer offset binding, we never allocated shared buffers.
37   if (!mCanUseOffsetAllocation) {
38     return true;
39   }
40 
41   // If we can use offset binding, allocate an initial shared buffer now.
42   if (!GrowBuffer(mDefaultSize)) {
43     return false;
44   }
45   return true;
46 }
47 
Reset()48 void SharedBufferMLGPU::Reset() {
49   // We shouldn't be mapped here, but just in case, unmap now.
50   Unmap();
51   mBytesUsedThisFrame = 0;
52 
53   // If we allocated a large buffer for a particularly heavy layer tree,
54   // but have not used most of the buffer again for many frames, we
55   // discard the buffer. This is to prevent having to perform large
56   // pointless uploads after visiting a single havy page - it also
57   // lessens ping-ponging between large and small buffers.
58   if (mBuffer && (mBuffer->GetSize() > mDefaultSize * 4) &&
59       mNumSmallFrames >= 10) {
60     mBuffer = nullptr;
61   }
62 
63   // Note that we do not aggressively map a new buffer. There's no reason to,
64   // and it'd cause unnecessary uploads when painting empty frames.
65 }
66 
EnsureMappedBuffer(size_t aBytes)67 bool SharedBufferMLGPU::EnsureMappedBuffer(size_t aBytes) {
68   if (!mBuffer || (mMaxSize - mCurrentPosition < aBytes)) {
69     if (!GrowBuffer(aBytes)) {
70       return false;
71     }
72   }
73   if (!mMapped && !Map()) {
74     return false;
75   }
76   return true;
77 }
78 
79 // We don't want to cache large buffers, since it results in larger uploads
80 // that might not be needed.
81 static const size_t kMaxCachedBufferSize = 128 * 1024;
82 
GrowBuffer(size_t aBytes)83 bool SharedBufferMLGPU::GrowBuffer(size_t aBytes) {
84   // We only pre-allocate buffers if we can use offset allocation.
85   MOZ_ASSERT(mCanUseOffsetAllocation);
86 
87   // Unmap the previous buffer. This will retain mBuffer, but free up the
88   // address space used by its mapping.
89   Unmap();
90 
91   size_t maybeSize = mDefaultSize;
92   if (mBuffer) {
93     // Try to first grow the previous allocation size.
94     maybeSize = std::min(kMaxCachedBufferSize, mBuffer->GetSize() * 2);
95   }
96 
97   size_t bytes = std::max(aBytes, maybeSize);
98   mBuffer = mDevice->CreateBuffer(mType, bytes, MLGUsage::Dynamic);
99   if (!mBuffer) {
100     return false;
101   }
102 
103   mCurrentPosition = 0;
104   mMaxSize = mBuffer->GetSize();
105   return true;
106 }
107 
PrepareForUsage()108 void SharedBufferMLGPU::PrepareForUsage() {
109   Unmap();
110 
111   if (mBytesUsedThisFrame <= mDefaultSize) {
112     mNumSmallFrames++;
113   } else {
114     mNumSmallFrames = 0;
115   }
116 }
117 
Map()118 bool SharedBufferMLGPU::Map() {
119   MOZ_ASSERT(mBuffer);
120   MOZ_ASSERT(!mMapped);
121 
122   if (!mDevice->Map(mBuffer, MLGMapType::WRITE_DISCARD, &mMap)) {
123     // Don't retain the buffer, it's useless if we can't map it.
124     mBuffer = nullptr;
125     return false;
126   }
127 
128   mCurrentPosition = 0;
129   mMapped = true;
130   return true;
131 }
132 
Unmap()133 void SharedBufferMLGPU::Unmap() {
134   if (!mMapped) {
135     return;
136   }
137 
138   mBytesUsedThisFrame += mCurrentPosition;
139 
140   mDevice->Unmap(mBuffer);
141   mMap = MLGMappedResource();
142   mMapped = false;
143 }
144 
GetBufferPointer(size_t aBytes,ptrdiff_t * aOutOffset,RefPtr<MLGBuffer> * aOutBuffer)145 uint8_t* SharedBufferMLGPU::GetBufferPointer(size_t aBytes,
146                                              ptrdiff_t* aOutOffset,
147                                              RefPtr<MLGBuffer>* aOutBuffer) {
148   if (!EnsureMappedBuffer(aBytes)) {
149     return nullptr;
150   }
151 
152   ptrdiff_t newPos = mCurrentPosition + aBytes;
153   MOZ_ASSERT(size_t(newPos) <= mMaxSize);
154 
155   *aOutOffset = mCurrentPosition;
156   *aOutBuffer = mBuffer;
157 
158   uint8_t* ptr = reinterpret_cast<uint8_t*>(mMap.mData) + mCurrentPosition;
159   mCurrentPosition = newPos;
160   return ptr;
161 }
162 
VertexBufferSection()163 VertexBufferSection::VertexBufferSection()
164     : mOffset(-1), mNumVertices(0), mStride(0) {}
165 
Init(MLGBuffer * aBuffer,ptrdiff_t aOffset,size_t aNumVertices,size_t aStride)166 void VertexBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
167                                size_t aNumVertices, size_t aStride) {
168   mBuffer = aBuffer;
169   mOffset = aOffset;
170   mNumVertices = aNumVertices;
171   mStride = aStride;
172 }
173 
ConstantBufferSection()174 ConstantBufferSection::ConstantBufferSection() : mOffset(-1) {}
175 
Init(MLGBuffer * aBuffer,ptrdiff_t aOffset,size_t aBytes,size_t aNumItems)176 void ConstantBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
177                                  size_t aBytes, size_t aNumItems) {
178   mBuffer = aBuffer;
179   mOffset = aOffset;
180   mNumBytes = aBytes;
181   mNumItems = aNumItems;
182 }
183 
SharedVertexBuffer(MLGDevice * aDevice,size_t aDefaultSize)184 SharedVertexBuffer::SharedVertexBuffer(MLGDevice* aDevice, size_t aDefaultSize)
185     : SharedBufferMLGPU(aDevice, MLGBufferType::Vertex, aDefaultSize) {}
186 
Allocate(VertexBufferSection * aHolder,size_t aNumItems,size_t aSizeOfItem,const void * aData)187 bool SharedVertexBuffer::Allocate(VertexBufferSection* aHolder,
188                                   size_t aNumItems, size_t aSizeOfItem,
189                                   const void* aData) {
190   RefPtr<MLGBuffer> buffer;
191   ptrdiff_t offset;
192   size_t bytes = aSizeOfItem * aNumItems;
193   uint8_t* ptr = GetBufferPointer(bytes, &offset, &buffer);
194   if (!ptr) {
195     return false;
196   }
197 
198   memcpy(ptr, aData, bytes);
199   aHolder->Init(buffer, offset, aNumItems, aSizeOfItem);
200   return true;
201 }
202 
AutoBufferUploadBase()203 AutoBufferUploadBase::AutoBufferUploadBase() : mPtr(nullptr) {}
204 
~AutoBufferUploadBase()205 AutoBufferUploadBase::~AutoBufferUploadBase() {
206   if (mBuffer) {
207     UnmapBuffer();
208   }
209 }
210 
Init(void * aPtr,MLGDevice * aDevice,MLGBuffer * aBuffer)211 void AutoBufferUploadBase::Init(void* aPtr, MLGDevice* aDevice,
212                                 MLGBuffer* aBuffer) {
213   MOZ_ASSERT(!mPtr && aPtr);
214   mPtr = aPtr;
215   mDevice = aDevice;
216   mBuffer = aBuffer;
217 }
218 
SharedConstantBuffer(MLGDevice * aDevice,size_t aDefaultSize)219 SharedConstantBuffer::SharedConstantBuffer(MLGDevice* aDevice,
220                                            size_t aDefaultSize)
221     : SharedBufferMLGPU(aDevice, MLGBufferType::Constant, aDefaultSize) {
222   mMaxConstantBufferBindSize = aDevice->GetMaxConstantBufferBindSize();
223   mCanUseOffsetAllocation = aDevice->CanUseConstantBufferOffsetBinding();
224 }
225 
Allocate(ConstantBufferSection * aHolder,AutoBufferUploadBase * aPtr,size_t aNumItems,size_t aSizeOfItem)226 bool SharedConstantBuffer::Allocate(ConstantBufferSection* aHolder,
227                                     AutoBufferUploadBase* aPtr,
228                                     size_t aNumItems, size_t aSizeOfItem) {
229   MOZ_ASSERT(aSizeOfItem % 16 == 0, "Items must be padded to 16 bytes");
230 
231   size_t bytes = aNumItems * aSizeOfItem;
232   if (bytes > mMaxConstantBufferBindSize) {
233     gfxWarning()
234         << "Attempted to allocate too many bytes into a constant buffer";
235     return false;
236   }
237 
238   RefPtr<MLGBuffer> buffer;
239   ptrdiff_t offset;
240   if (!GetBufferPointer(aPtr, bytes, &offset, &buffer)) {
241     return false;
242   }
243 
244   aHolder->Init(buffer, offset, bytes, aNumItems);
245   return true;
246 }
247 
AllocateNewBuffer(size_t aBytes,ptrdiff_t * aOutOffset,RefPtr<MLGBuffer> * aOutBuffer)248 uint8_t* SharedConstantBuffer::AllocateNewBuffer(
249     size_t aBytes, ptrdiff_t* aOutOffset, RefPtr<MLGBuffer>* aOutBuffer) {
250   RefPtr<MLGBuffer> buffer;
251   if (BufferCache* cache = mDevice->GetConstantBufferCache()) {
252     buffer = cache->GetOrCreateBuffer(aBytes);
253   } else {
254     buffer = mDevice->CreateBuffer(MLGBufferType::Constant, aBytes,
255                                    MLGUsage::Dynamic);
256   }
257   if (!buffer) {
258     return nullptr;
259   }
260 
261   MLGMappedResource map;
262   if (!mDevice->Map(buffer, MLGMapType::WRITE_DISCARD, &map)) {
263     return nullptr;
264   }
265 
266   // Signal that offsetting is not supported.
267   *aOutOffset = -1;
268   *aOutBuffer = buffer;
269   return reinterpret_cast<uint8_t*>(map.mData);
270 }
271 
UnmapBuffer()272 void AutoBufferUploadBase::UnmapBuffer() { mDevice->Unmap(mBuffer); }
273 
274 }  // namespace layers
275 }  // namespace mozilla
276