1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "IpcResourceUpdateQueue.h"
8 #include <string.h>
9 #include <algorithm>
10 #include "mozilla/Maybe.h"
11 #include "mozilla/ipc/SharedMemory.h"
12 #include "mozilla/layers/PTextureChild.h"
13 #include "mozilla/layers/WebRenderBridgeChild.h"
14
15 namespace mozilla {
16 namespace wr {
17
18 using namespace mozilla::layers;
19
ShmSegmentsWriter(layers::WebRenderBridgeChild * aAllocator,size_t aChunkSize)20 ShmSegmentsWriter::ShmSegmentsWriter(layers::WebRenderBridgeChild* aAllocator,
21 size_t aChunkSize)
22 : mShmAllocator(aAllocator), mCursor(0), mChunkSize(aChunkSize) {
23 MOZ_ASSERT(mShmAllocator);
24 }
25
~ShmSegmentsWriter()26 ShmSegmentsWriter::~ShmSegmentsWriter() { Clear(); }
27
ShmSegmentsWriter(ShmSegmentsWriter && aOther)28 ShmSegmentsWriter::ShmSegmentsWriter(ShmSegmentsWriter&& aOther) noexcept
29 : mSmallAllocs(std::move(aOther.mSmallAllocs)),
30 mLargeAllocs(std::move(aOther.mLargeAllocs)),
31 mShmAllocator(aOther.mShmAllocator),
32 mCursor(aOther.mCursor),
33 mChunkSize(aOther.mChunkSize) {
34 aOther.mCursor = 0;
35 }
36
operator =(ShmSegmentsWriter && aOther)37 ShmSegmentsWriter& ShmSegmentsWriter::operator=(
38 ShmSegmentsWriter&& aOther) noexcept {
39 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
40 Clear();
41 mSmallAllocs = std::move(aOther.mSmallAllocs);
42 mLargeAllocs = std::move(aOther.mLargeAllocs);
43 mShmAllocator = aOther.mShmAllocator;
44 mCursor = aOther.mCursor;
45 mChunkSize = aOther.mChunkSize;
46 aOther.mCursor = 0;
47 return *this;
48 }
49
Write(Range<uint8_t> aBytes)50 layers::OffsetRange ShmSegmentsWriter::Write(Range<uint8_t> aBytes) {
51 const size_t start = mCursor;
52 const size_t length = aBytes.length();
53
54 if (length >= mChunkSize * 4) {
55 auto range = AllocLargeChunk(length);
56 if (range.length()) {
57 // Allocation was successful
58 uint8_t* dstPtr = mLargeAllocs.LastElement().get<uint8_t>();
59 memcpy(dstPtr, aBytes.begin().get(), length);
60 }
61 return range;
62 }
63
64 int remainingBytesToCopy = length;
65
66 size_t srcCursor = 0;
67 size_t dstCursor = mCursor;
68 size_t currAllocLen = mSmallAllocs.Length();
69
70 while (remainingBytesToCopy > 0) {
71 if (dstCursor >= mSmallAllocs.Length() * mChunkSize) {
72 if (!AllocChunk()) {
73 // Allocation failed, so roll back to the state at the start of this
74 // Write() call and abort.
75 while (mSmallAllocs.Length() > currAllocLen) {
76 RefCountedShmem shm = mSmallAllocs.PopLastElement();
77 RefCountedShm::Dealloc(mShmAllocator, shm);
78 }
79 MOZ_ASSERT(mSmallAllocs.Length() == currAllocLen);
80 return layers::OffsetRange(0, start, 0);
81 }
82 // Allocation succeeded, so dstCursor should now be pointing to
83 // something inside the allocation buffer
84 MOZ_ASSERT(dstCursor < (mSmallAllocs.Length() * mChunkSize));
85 }
86
87 const size_t dstMaxOffset = mChunkSize * mSmallAllocs.Length();
88 const size_t dstBaseOffset = mChunkSize * (mSmallAllocs.Length() - 1);
89
90 MOZ_ASSERT(dstCursor >= dstBaseOffset);
91 MOZ_ASSERT(dstCursor <= dstMaxOffset);
92
93 size_t availableRange = dstMaxOffset - dstCursor;
94 size_t copyRange = std::min<int>(availableRange, remainingBytesToCopy);
95
96 uint8_t* srcPtr = &aBytes[srcCursor];
97 uint8_t* dstPtr = RefCountedShm::GetBytes(mSmallAllocs.LastElement()) +
98 (dstCursor - dstBaseOffset);
99
100 memcpy(dstPtr, srcPtr, copyRange);
101
102 srcCursor += copyRange;
103 dstCursor += copyRange;
104 remainingBytesToCopy -= copyRange;
105
106 // sanity check
107 MOZ_ASSERT(remainingBytesToCopy >= 0);
108 }
109
110 mCursor += length;
111
112 return layers::OffsetRange(0, start, length);
113 }
114
AllocChunk()115 bool ShmSegmentsWriter::AllocChunk() {
116 RefCountedShmem shm;
117 if (!mShmAllocator->AllocResourceShmem(mChunkSize, shm)) {
118 gfxCriticalNote << "ShmSegmentsWriter failed to allocate chunk #"
119 << mSmallAllocs.Length();
120 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate chunk");
121 return false;
122 }
123 RefCountedShm::AddRef(shm);
124 mSmallAllocs.AppendElement(shm);
125 return true;
126 }
127
AllocLargeChunk(size_t aSize)128 layers::OffsetRange ShmSegmentsWriter::AllocLargeChunk(size_t aSize) {
129 ipc::Shmem shm;
130 auto shmType = ipc::SharedMemory::SharedMemoryType::TYPE_BASIC;
131 if (!mShmAllocator->AllocShmem(aSize, shmType, &shm)) {
132 gfxCriticalNote
133 << "ShmSegmentsWriter failed to allocate large chunk of size " << aSize;
134 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate large chunk");
135 return layers::OffsetRange(0, 0, 0);
136 }
137 mLargeAllocs.AppendElement(shm);
138
139 return layers::OffsetRange(mLargeAllocs.Length(), 0, aSize);
140 }
141
Flush(nsTArray<RefCountedShmem> & aSmallAllocs,nsTArray<ipc::Shmem> & aLargeAllocs)142 void ShmSegmentsWriter::Flush(nsTArray<RefCountedShmem>& aSmallAllocs,
143 nsTArray<ipc::Shmem>& aLargeAllocs) {
144 MOZ_ASSERT(aSmallAllocs.IsEmpty());
145 MOZ_ASSERT(aLargeAllocs.IsEmpty());
146 aSmallAllocs = std::move(mSmallAllocs);
147 aLargeAllocs = std::move(mLargeAllocs);
148 mCursor = 0;
149 }
150
IsEmpty() const151 bool ShmSegmentsWriter::IsEmpty() const { return mCursor == 0; }
152
Clear()153 void ShmSegmentsWriter::Clear() {
154 if (mShmAllocator) {
155 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mSmallAllocs);
156 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mLargeAllocs);
157 }
158 mCursor = 0;
159 }
160
ShmSegmentsReader(const nsTArray<RefCountedShmem> & aSmallShmems,const nsTArray<ipc::Shmem> & aLargeShmems)161 ShmSegmentsReader::ShmSegmentsReader(
162 const nsTArray<RefCountedShmem>& aSmallShmems,
163 const nsTArray<ipc::Shmem>& aLargeShmems)
164 : mSmallAllocs(aSmallShmems), mLargeAllocs(aLargeShmems), mChunkSize(0) {
165 if (mSmallAllocs.IsEmpty()) {
166 return;
167 }
168
169 mChunkSize = RefCountedShm::GetSize(mSmallAllocs[0]);
170
171 // Check that all shmems are readable and have the same size. If anything
172 // isn't right, set mChunkSize to zero which signifies that the reader is
173 // in an invalid state and Read calls will return false;
174 for (const auto& shm : mSmallAllocs) {
175 if (!RefCountedShm::IsValid(shm) ||
176 RefCountedShm::GetSize(shm) != mChunkSize ||
177 RefCountedShm::GetBytes(shm) == nullptr) {
178 mChunkSize = 0;
179 return;
180 }
181 }
182
183 for (const auto& shm : mLargeAllocs) {
184 if (!shm.IsReadable() || shm.get<uint8_t>() == nullptr) {
185 mChunkSize = 0;
186 return;
187 }
188 }
189 }
190
ReadLarge(const layers::OffsetRange & aRange,wr::Vec<uint8_t> & aInto)191 bool ShmSegmentsReader::ReadLarge(const layers::OffsetRange& aRange,
192 wr::Vec<uint8_t>& aInto) {
193 // source = zero is for small allocs.
194 MOZ_RELEASE_ASSERT(aRange.source() != 0);
195 if (aRange.source() > mLargeAllocs.Length()) {
196 return false;
197 }
198 size_t id = aRange.source() - 1;
199 const ipc::Shmem& shm = mLargeAllocs[id];
200 if (shm.Size<uint8_t>() < aRange.length()) {
201 return false;
202 }
203
204 uint8_t* srcPtr = shm.get<uint8_t>();
205 aInto.PushBytes(Range<uint8_t>(srcPtr, aRange.length()));
206
207 return true;
208 }
209
Read(const layers::OffsetRange & aRange,wr::Vec<uint8_t> & aInto)210 bool ShmSegmentsReader::Read(const layers::OffsetRange& aRange,
211 wr::Vec<uint8_t>& aInto) {
212 if (aRange.length() == 0) {
213 return true;
214 }
215
216 if (aRange.source() != 0) {
217 return ReadLarge(aRange, aInto);
218 }
219
220 if (mChunkSize == 0) {
221 return false;
222 }
223
224 if (aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) {
225 return false;
226 }
227
228 size_t initialLength = aInto.Length();
229
230 size_t srcCursor = aRange.start();
231 size_t remainingBytesToCopy = aRange.length();
232 while (remainingBytesToCopy > 0) {
233 const size_t shm_idx = srcCursor / mChunkSize;
234 const size_t ptrOffset = srcCursor % mChunkSize;
235 const size_t copyRange =
236 std::min(remainingBytesToCopy, mChunkSize - ptrOffset);
237 uint8_t* srcPtr =
238 RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset;
239
240 aInto.PushBytes(Range<uint8_t>(srcPtr, copyRange));
241
242 srcCursor += copyRange;
243 remainingBytesToCopy -= copyRange;
244 }
245
246 return aInto.Length() - initialLength == aRange.length();
247 }
248
GetReadPointerLarge(const layers::OffsetRange & aRange)249 Maybe<Range<uint8_t>> ShmSegmentsReader::GetReadPointerLarge(
250 const layers::OffsetRange& aRange) {
251 // source = zero is for small allocs.
252 MOZ_RELEASE_ASSERT(aRange.source() != 0);
253 if (aRange.source() > mLargeAllocs.Length()) {
254 return Nothing();
255 }
256 size_t id = aRange.source() - 1;
257 const ipc::Shmem& shm = mLargeAllocs[id];
258 if (shm.Size<uint8_t>() < aRange.length()) {
259 return Nothing();
260 }
261
262 uint8_t* srcPtr = shm.get<uint8_t>();
263 return Some(Range<uint8_t>(srcPtr, aRange.length()));
264 }
265
GetReadPointer(const layers::OffsetRange & aRange)266 Maybe<Range<uint8_t>> ShmSegmentsReader::GetReadPointer(
267 const layers::OffsetRange& aRange) {
268 if (aRange.length() == 0) {
269 return Some(Range<uint8_t>());
270 }
271
272 if (aRange.source() != 0) {
273 return GetReadPointerLarge(aRange);
274 }
275
276 if (mChunkSize == 0 ||
277 aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) {
278 return Nothing();
279 }
280
281 size_t srcCursor = aRange.start();
282 size_t remainingBytesToCopy = aRange.length();
283 const size_t shm_idx = srcCursor / mChunkSize;
284 const size_t ptrOffset = srcCursor % mChunkSize;
285 // Return nothing if we can't return a pointer to the full range
286 if (mChunkSize - ptrOffset < remainingBytesToCopy) {
287 return Nothing();
288 }
289 uint8_t* srcPtr = RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset;
290 return Some(Range<uint8_t>(srcPtr, remainingBytesToCopy));
291 }
292
IpcResourceUpdateQueue(layers::WebRenderBridgeChild * aAllocator,size_t aChunkSize)293 IpcResourceUpdateQueue::IpcResourceUpdateQueue(
294 layers::WebRenderBridgeChild* aAllocator, size_t aChunkSize)
295 : mWriter(aAllocator, aChunkSize) {}
296
IpcResourceUpdateQueue(IpcResourceUpdateQueue && aOther)297 IpcResourceUpdateQueue::IpcResourceUpdateQueue(
298 IpcResourceUpdateQueue&& aOther) noexcept
299 : mWriter(std::move(aOther.mWriter)),
300 mUpdates(std::move(aOther.mUpdates)) {}
301
operator =(IpcResourceUpdateQueue && aOther)302 IpcResourceUpdateQueue& IpcResourceUpdateQueue::operator=(
303 IpcResourceUpdateQueue&& aOther) noexcept {
304 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
305 mWriter = std::move(aOther.mWriter);
306 mUpdates = std::move(aOther.mUpdates);
307 return *this;
308 }
309
ReplaceResources(IpcResourceUpdateQueue && aOther)310 void IpcResourceUpdateQueue::ReplaceResources(IpcResourceUpdateQueue&& aOther) {
311 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
312 mWriter = std::move(aOther.mWriter);
313 mUpdates = std::move(aOther.mUpdates);
314 }
315
AddImage(ImageKey key,const ImageDescriptor & aDescriptor,Range<uint8_t> aBytes)316 bool IpcResourceUpdateQueue::AddImage(ImageKey key,
317 const ImageDescriptor& aDescriptor,
318 Range<uint8_t> aBytes) {
319 auto bytes = mWriter.Write(aBytes);
320 if (!bytes.length()) {
321 return false;
322 }
323 mUpdates.AppendElement(layers::OpAddImage(aDescriptor, bytes, 0, key));
324 return true;
325 }
326
AddBlobImage(BlobImageKey key,const ImageDescriptor & aDescriptor,Range<uint8_t> aBytes,ImageIntRect aVisibleRect)327 bool IpcResourceUpdateQueue::AddBlobImage(BlobImageKey key,
328 const ImageDescriptor& aDescriptor,
329 Range<uint8_t> aBytes,
330 ImageIntRect aVisibleRect) {
331 MOZ_RELEASE_ASSERT(aDescriptor.width > 0 && aDescriptor.height > 0);
332 auto bytes = mWriter.Write(aBytes);
333 if (!bytes.length()) {
334 return false;
335 }
336 mUpdates.AppendElement(
337 layers::OpAddBlobImage(aDescriptor, bytes, aVisibleRect, 0, key));
338 return true;
339 }
340
AddPrivateExternalImage(wr::ExternalImageId aExtId,wr::ImageKey aKey,wr::ImageDescriptor aDesc)341 void IpcResourceUpdateQueue::AddPrivateExternalImage(
342 wr::ExternalImageId aExtId, wr::ImageKey aKey, wr::ImageDescriptor aDesc) {
343 mUpdates.AppendElement(
344 layers::OpAddPrivateExternalImage(aExtId, aKey, aDesc));
345 }
346
AddSharedExternalImage(wr::ExternalImageId aExtId,wr::ImageKey aKey)347 void IpcResourceUpdateQueue::AddSharedExternalImage(wr::ExternalImageId aExtId,
348 wr::ImageKey aKey) {
349 mUpdates.AppendElement(layers::OpAddSharedExternalImage(aExtId, aKey));
350 }
351
PushExternalImageForTexture(wr::ExternalImageId aExtId,wr::ImageKey aKey,layers::TextureClient * aTexture,bool aIsUpdate)352 void IpcResourceUpdateQueue::PushExternalImageForTexture(
353 wr::ExternalImageId aExtId, wr::ImageKey aKey,
354 layers::TextureClient* aTexture, bool aIsUpdate) {
355 MOZ_ASSERT(aTexture);
356 MOZ_ASSERT(aTexture->GetIPDLActor());
357 MOZ_RELEASE_ASSERT(aTexture->GetIPDLActor()->GetIPCChannel() ==
358 mWriter.WrBridge()->GetIPCChannel());
359 mUpdates.AppendElement(layers::OpPushExternalImageForTexture(
360 aExtId, aKey, nullptr, aTexture->GetIPDLActor(), aIsUpdate));
361 }
362
UpdateImageBuffer(ImageKey aKey,const ImageDescriptor & aDescriptor,Range<uint8_t> aBytes)363 bool IpcResourceUpdateQueue::UpdateImageBuffer(
364 ImageKey aKey, const ImageDescriptor& aDescriptor, Range<uint8_t> aBytes) {
365 auto bytes = mWriter.Write(aBytes);
366 if (!bytes.length()) {
367 return false;
368 }
369 mUpdates.AppendElement(layers::OpUpdateImage(aDescriptor, bytes, aKey));
370 return true;
371 }
372
UpdateBlobImage(BlobImageKey aKey,const ImageDescriptor & aDescriptor,Range<uint8_t> aBytes,ImageIntRect aVisibleRect,ImageIntRect aDirtyRect)373 bool IpcResourceUpdateQueue::UpdateBlobImage(BlobImageKey aKey,
374 const ImageDescriptor& aDescriptor,
375 Range<uint8_t> aBytes,
376 ImageIntRect aVisibleRect,
377 ImageIntRect aDirtyRect) {
378 MOZ_ASSERT(aVisibleRect.width > 0 && aVisibleRect.height > 0);
379
380 auto bytes = mWriter.Write(aBytes);
381 if (!bytes.length()) {
382 return false;
383 }
384 mUpdates.AppendElement(layers::OpUpdateBlobImage(aDescriptor, bytes, aKey,
385 aVisibleRect, aDirtyRect));
386 return true;
387 }
388
UpdatePrivateExternalImage(wr::ExternalImageId aExtId,wr::ImageKey aKey,const wr::ImageDescriptor & aDesc,ImageIntRect aDirtyRect)389 void IpcResourceUpdateQueue::UpdatePrivateExternalImage(
390 wr::ExternalImageId aExtId, wr::ImageKey aKey,
391 const wr::ImageDescriptor& aDesc, ImageIntRect aDirtyRect) {
392 mUpdates.AppendElement(
393 layers::OpUpdatePrivateExternalImage(aExtId, aKey, aDesc, aDirtyRect));
394 }
395
UpdateSharedExternalImage(wr::ExternalImageId aExtId,wr::ImageKey aKey,ImageIntRect aDirtyRect)396 void IpcResourceUpdateQueue::UpdateSharedExternalImage(
397 wr::ExternalImageId aExtId, wr::ImageKey aKey, ImageIntRect aDirtyRect) {
398 mUpdates.AppendElement(
399 layers::OpUpdateSharedExternalImage(aExtId, aKey, aDirtyRect));
400 }
401
SetBlobImageVisibleArea(wr::BlobImageKey aKey,const ImageIntRect & aArea)402 void IpcResourceUpdateQueue::SetBlobImageVisibleArea(
403 wr::BlobImageKey aKey, const ImageIntRect& aArea) {
404 mUpdates.AppendElement(layers::OpSetBlobImageVisibleArea(aArea, aKey));
405 }
406
DeleteImage(ImageKey aKey)407 void IpcResourceUpdateQueue::DeleteImage(ImageKey aKey) {
408 mUpdates.AppendElement(layers::OpDeleteImage(aKey));
409 }
410
DeleteBlobImage(BlobImageKey aKey)411 void IpcResourceUpdateQueue::DeleteBlobImage(BlobImageKey aKey) {
412 mUpdates.AppendElement(layers::OpDeleteBlobImage(aKey));
413 }
414
AddRawFont(wr::FontKey aKey,Range<uint8_t> aBytes,uint32_t aIndex)415 bool IpcResourceUpdateQueue::AddRawFont(wr::FontKey aKey, Range<uint8_t> aBytes,
416 uint32_t aIndex) {
417 auto bytes = mWriter.Write(aBytes);
418 if (!bytes.length()) {
419 return false;
420 }
421 mUpdates.AppendElement(layers::OpAddRawFont(bytes, aIndex, aKey));
422 return true;
423 }
424
AddFontDescriptor(wr::FontKey aKey,Range<uint8_t> aBytes,uint32_t aIndex)425 bool IpcResourceUpdateQueue::AddFontDescriptor(wr::FontKey aKey,
426 Range<uint8_t> aBytes,
427 uint32_t aIndex) {
428 auto bytes = mWriter.Write(aBytes);
429 if (!bytes.length()) {
430 return false;
431 }
432 mUpdates.AppendElement(layers::OpAddFontDescriptor(bytes, aIndex, aKey));
433 return true;
434 }
435
DeleteFont(wr::FontKey aKey)436 void IpcResourceUpdateQueue::DeleteFont(wr::FontKey aKey) {
437 mUpdates.AppendElement(layers::OpDeleteFont(aKey));
438 }
439
AddFontInstance(wr::FontInstanceKey aKey,wr::FontKey aFontKey,float aGlyphSize,const wr::FontInstanceOptions * aOptions,const wr::FontInstancePlatformOptions * aPlatformOptions,Range<const gfx::FontVariation> aVariations)440 void IpcResourceUpdateQueue::AddFontInstance(
441 wr::FontInstanceKey aKey, wr::FontKey aFontKey, float aGlyphSize,
442 const wr::FontInstanceOptions* aOptions,
443 const wr::FontInstancePlatformOptions* aPlatformOptions,
444 Range<const gfx::FontVariation> aVariations) {
445 auto bytes = mWriter.WriteAsBytes(aVariations);
446 mUpdates.AppendElement(layers::OpAddFontInstance(
447 aOptions ? Some(*aOptions) : Nothing(),
448 aPlatformOptions ? Some(*aPlatformOptions) : Nothing(), bytes, aKey,
449 aFontKey, aGlyphSize));
450 }
451
DeleteFontInstance(wr::FontInstanceKey aKey)452 void IpcResourceUpdateQueue::DeleteFontInstance(wr::FontInstanceKey aKey) {
453 mUpdates.AppendElement(layers::OpDeleteFontInstance(aKey));
454 }
455
Flush(nsTArray<layers::OpUpdateResource> & aUpdates,nsTArray<layers::RefCountedShmem> & aSmallAllocs,nsTArray<ipc::Shmem> & aLargeAllocs)456 void IpcResourceUpdateQueue::Flush(
457 nsTArray<layers::OpUpdateResource>& aUpdates,
458 nsTArray<layers::RefCountedShmem>& aSmallAllocs,
459 nsTArray<ipc::Shmem>& aLargeAllocs) {
460 aUpdates = std::move(mUpdates);
461 mWriter.Flush(aSmallAllocs, aLargeAllocs);
462 }
463
IsEmpty() const464 bool IpcResourceUpdateQueue::IsEmpty() const {
465 if (mUpdates.Length() == 0) {
466 MOZ_ASSERT(mWriter.IsEmpty());
467 return true;
468 }
469 return false;
470 }
471
Clear()472 void IpcResourceUpdateQueue::Clear() {
473 mWriter.Clear();
474 mUpdates.Clear();
475 }
476
477 // static
ReleaseShmems(ipc::IProtocol * aShmAllocator,nsTArray<layers::RefCountedShmem> & aShms)478 void IpcResourceUpdateQueue::ReleaseShmems(
479 ipc::IProtocol* aShmAllocator, nsTArray<layers::RefCountedShmem>& aShms) {
480 for (auto& shm : aShms) {
481 if (RefCountedShm::IsValid(shm) && RefCountedShm::Release(shm) == 0) {
482 RefCountedShm::Dealloc(aShmAllocator, shm);
483 }
484 }
485 aShms.Clear();
486 }
487
488 // static
ReleaseShmems(ipc::IProtocol * aShmAllocator,nsTArray<ipc::Shmem> & aShms)489 void IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator,
490 nsTArray<ipc::Shmem>& aShms) {
491 for (auto& shm : aShms) {
492 aShmAllocator->DeallocShmem(shm);
493 }
494 aShms.Clear();
495 }
496
497 } // namespace wr
498 } // namespace mozilla
499