1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /* A type/length-parametrized vector class. */
8 
9 #ifndef mozilla_Vector_h
10 #define mozilla_Vector_h
11 
12 #include <new>  // for placement new
13 #include <utility>
14 
15 #include "mozilla/Alignment.h"
16 #include "mozilla/AllocPolicy.h"
17 #include "mozilla/ArrayUtils.h"  // for PointerRangeSize
18 #include "mozilla/Assertions.h"
19 #include "mozilla/Attributes.h"
20 #include "mozilla/MathAlgorithms.h"
21 #include "mozilla/MemoryReporting.h"
22 #include "mozilla/OperatorNewExtensions.h"
23 #include "mozilla/ReentrancyGuard.h"
24 #include "mozilla/Span.h"
25 #include "mozilla/TemplateLib.h"
26 #include "mozilla/TypeTraits.h"
27 
28 namespace mozilla {
29 
30 template <typename T, size_t N, class AllocPolicy>
31 class Vector;
32 
33 namespace detail {
34 
35 /*
36  * Check that the given capacity wastes the minimal amount of space if
37  * allocated on the heap. This means that aCapacity*sizeof(T) is as close to a
38  * power-of-two as possible. growStorageBy() is responsible for ensuring this.
39  */
40 template <typename T>
CapacityHasExcessSpace(size_t aCapacity)41 static bool CapacityHasExcessSpace(size_t aCapacity) {
42   size_t size = aCapacity * sizeof(T);
43   return RoundUpPow2(size) - size >= sizeof(T);
44 }
45 
46 /*
47  * This template class provides a default implementation for vector operations
48  * when the element type is not known to be a POD, as judged by IsPod.
49  */
50 template <typename T, size_t N, class AP, bool IsPod>
51 struct VectorImpl {
52   /*
53    * Constructs an object in the uninitialized memory at *aDst with aArgs.
54    */
55   template <typename... Args>
56   MOZ_NONNULL(1)
new_VectorImpl57   static inline void new_(T* aDst, Args&&... aArgs) {
58     new (KnownNotNull, aDst) T(std::forward<Args>(aArgs)...);
59   }
60 
61   /* Destroys constructed objects in the range [aBegin, aEnd). */
destroyVectorImpl62   static inline void destroy(T* aBegin, T* aEnd) {
63     MOZ_ASSERT(aBegin <= aEnd);
64     for (T* p = aBegin; p < aEnd; ++p) {
65       p->~T();
66     }
67   }
68 
69   /* Constructs objects in the uninitialized range [aBegin, aEnd). */
initializeVectorImpl70   static inline void initialize(T* aBegin, T* aEnd) {
71     MOZ_ASSERT(aBegin <= aEnd);
72     for (T* p = aBegin; p < aEnd; ++p) {
73       new_(p);
74     }
75   }
76 
77   /*
78    * Copy-constructs objects in the uninitialized range
79    * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
80    */
81   template <typename U>
copyConstructVectorImpl82   static inline void copyConstruct(T* aDst, const U* aSrcStart,
83                                    const U* aSrcEnd) {
84     MOZ_ASSERT(aSrcStart <= aSrcEnd);
85     for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
86       new_(aDst, *p);
87     }
88   }
89 
90   /*
91    * Move-constructs objects in the uninitialized range
92    * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
93    */
94   template <typename U>
moveConstructVectorImpl95   static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd) {
96     MOZ_ASSERT(aSrcStart <= aSrcEnd);
97     for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
98       new_(aDst, std::move(*p));
99     }
100   }
101 
102   /*
103    * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
104    * the same object aU.
105    */
106   template <typename U>
copyConstructNVectorImpl107   static inline void copyConstructN(T* aDst, size_t aN, const U& aU) {
108     for (T* end = aDst + aN; aDst < end; ++aDst) {
109       new_(aDst, aU);
110     }
111   }
112 
113   /*
114    * Grows the given buffer to have capacity aNewCap, preserving the objects
115    * constructed in the range [begin, end) and updating aV. Assumes that (1)
116    * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
117    * not overflow.
118    */
growToVectorImpl119   [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
120                                           size_t aNewCap) {
121     MOZ_ASSERT(!aV.usingInlineStorage());
122     MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
123     T* newbuf = aV.template pod_malloc<T>(aNewCap);
124     if (MOZ_UNLIKELY(!newbuf)) {
125       return false;
126     }
127     T* dst = newbuf;
128     T* src = aV.beginNoCheck();
129     for (; src < aV.endNoCheck(); ++dst, ++src) {
130       new_(dst, std::move(*src));
131     }
132     VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
133     aV.free_(aV.mBegin, aV.mTail.mCapacity);
134     aV.mBegin = newbuf;
135     /* aV.mLength is unchanged. */
136     aV.mTail.mCapacity = aNewCap;
137     return true;
138   }
139 };
140 
141 /*
142  * This partial template specialization provides a default implementation for
143  * vector operations when the element type is known to be a POD, as judged by
144  * IsPod.
145  */
146 template <typename T, size_t N, class AP>
147 struct VectorImpl<T, N, AP, true> {
148   template <typename... Args>
149   MOZ_NONNULL(1)
150   static inline void new_(T* aDst, Args&&... aArgs) {
151     // Explicitly construct a local object instead of using a temporary since
152     // T(args...) will be treated like a C-style cast in the unary case and
153     // allow unsafe conversions. Both forms should be equivalent to an
154     // optimizing compiler.
155     T temp(std::forward<Args>(aArgs)...);
156     *aDst = temp;
157   }
158 
159   static inline void destroy(T*, T*) {}
160 
161   static inline void initialize(T* aBegin, T* aEnd) {
162     /*
163      * You would think that memset would be a big win (or even break even)
164      * when we know T is a POD. But currently it's not. This is probably
165      * because |append| tends to be given small ranges and memset requires
166      * a function call that doesn't get inlined.
167      *
168      * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
169      */
170     MOZ_ASSERT(aBegin <= aEnd);
171     for (T* p = aBegin; p < aEnd; ++p) {
172       new_(p);
173     }
174   }
175 
176   template <typename U>
177   static inline void copyConstruct(T* aDst, const U* aSrcStart,
178                                    const U* aSrcEnd) {
179     /*
180      * See above memset comment. Also, notice that copyConstruct is
181      * currently templated (T != U), so memcpy won't work without
182      * requiring T == U.
183      *
184      * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
185      */
186     MOZ_ASSERT(aSrcStart <= aSrcEnd);
187     for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
188       new_(aDst, *p);
189     }
190   }
191 
192   template <typename U>
193   static inline void moveConstruct(T* aDst, const U* aSrcStart,
194                                    const U* aSrcEnd) {
195     copyConstruct(aDst, aSrcStart, aSrcEnd);
196   }
197 
198   static inline void copyConstructN(T* aDst, size_t aN, const T& aT) {
199     for (T* end = aDst + aN; aDst < end; ++aDst) {
200       new_(aDst, aT);
201     }
202   }
203 
204   [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
205                                           size_t aNewCap) {
206     MOZ_ASSERT(!aV.usingInlineStorage());
207     MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
208     T* newbuf =
209         aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
210     if (MOZ_UNLIKELY(!newbuf)) {
211       return false;
212     }
213     aV.mBegin = newbuf;
214     /* aV.mLength is unchanged. */
215     aV.mTail.mCapacity = aNewCap;
216     return true;
217   }
218 };
219 
220 // A struct for TestVector.cpp to access private internal fields.
221 // DO NOT DEFINE IN YOUR OWN CODE.
222 struct VectorTesting;
223 
224 }  // namespace detail
225 
226 /*
227  * STL-like container providing a short-lived, dynamic buffer.  Vector calls the
228  * constructors/destructors of all elements stored in its internal buffer, so
229  * non-PODs may be safely used.  Additionally, Vector will store the first N
230  * elements in-place before resorting to dynamic allocation.
231  *
232  * T requirements:
233  *  - default and copy constructible, assignable, destructible
234  *  - operations do not throw
235  * MinInlineCapacity requirements:
236  *  - any value, however, MinInlineCapacity is clamped to min/max values
237  * AllocPolicy:
238  *  - see "Allocation policies" in AllocPolicy.h (defaults to
239  *    mozilla::MallocAllocPolicy)
240  *
241  * Vector is not reentrant: T member functions called during Vector member
242  * functions must not call back into the same object!
243  */
244 template <typename T, size_t MinInlineCapacity = 0,
245           class AllocPolicy = MallocAllocPolicy>
246 class MOZ_NON_PARAM Vector final : private AllocPolicy {
247   /* utilities */
248 
249   static constexpr bool kElemIsPod = IsPod<T>::value;
250   typedef detail::VectorImpl<T, MinInlineCapacity, AllocPolicy, kElemIsPod>
251       Impl;
252   friend struct detail::VectorImpl<T, MinInlineCapacity, AllocPolicy,
253                                    kElemIsPod>;
254 
255   friend struct detail::VectorTesting;
256 
257   [[nodiscard]] bool growStorageBy(size_t aIncr);
258   [[nodiscard]] bool convertToHeapStorage(size_t aNewCap);
259   [[nodiscard]] bool maybeCheckSimulatedOOM(size_t aRequestedSize);
260 
261   /* magic constants */
262 
263   /**
264    * The maximum space allocated for inline element storage.
265    *
266    * We reduce space by what the AllocPolicy base class and prior Vector member
267    * fields likely consume to attempt to play well with binary size classes.
268    */
269   static constexpr size_t kMaxInlineBytes =
270       1024 -
271       (sizeof(AllocPolicy) + sizeof(T*) + sizeof(size_t) + sizeof(size_t));
272 
273   /**
274    * The number of T elements of inline capacity built into this Vector.  This
275    * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
276    *
277    * We use a partially-specialized template (not explicit specialization, which
278    * is only allowed at namespace scope) to compute this value.  The benefit is
279    * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
280    * defined at the time |Vector<T>| appears, if no inline storage is requested.
281    */
282   template <size_t MinimumInlineCapacity, size_t Dummy>
283   struct ComputeCapacity {
284     static constexpr size_t value =
285         tl::Min<MinimumInlineCapacity, kMaxInlineBytes / sizeof(T)>::value;
286   };
287 
288   template <size_t Dummy>
289   struct ComputeCapacity<0, Dummy> {
290     static constexpr size_t value = 0;
291   };
292 
293   /** The actual inline capacity in number of elements T.  This may be zero! */
294   static constexpr size_t kInlineCapacity =
295       ComputeCapacity<MinInlineCapacity, 0>::value;
296 
297   /* member data */
298 
299   /*
300    * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
301    * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
302    * mLength, mBegin + mCapacity) holds uninitialized memory. The range
303    * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
304    * previously allocated by a call to reserve().
305    */
306   T* mBegin;
307 
308   /* Number of elements in the vector. */
309   size_t mLength;
310 
311   /*
312    * Memory used to store capacity, reserved element count (debug builds only),
313    * and inline storage.  The simple "answer" is:
314    *
315    *   size_t mCapacity;
316    *   #ifdef DEBUG
317    *   size_t mReserved;
318    *   #endif
319    *   alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
320    *
321    * but there are complications.  First, C++ forbids zero-sized arrays that
322    * might result.  Second, we don't want zero capacity to affect Vector's size
323    * (even empty classes take up a byte, unless they're base classes).
324    *
325    * Yet again, we eliminate the zero-sized array using partial specialization.
326    * And we eliminate potential size hit by putting capacity/reserved in one
327    * struct, then putting the array (if any) in a derived struct.  If no array
328    * is needed, the derived struct won't consume extra space.
329    */
330   struct CapacityAndReserved {
331     explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
332         : mCapacity(aCapacity)
333 #ifdef DEBUG
334           ,
335           mReserved(aReserved)
336 #endif
337     {
338     }
339     CapacityAndReserved() = default;
340 
341     /* Max number of elements storable in the vector without resizing. */
342     size_t mCapacity;
343 
344 #ifdef DEBUG
345     /* Max elements of reserved or used space in this vector. */
346     size_t mReserved;
347 #endif
348   };
349 
350 // Silence warnings about this struct possibly being padded dued to the
351 // alignas() in it -- there's nothing we can do to avoid it.
352 #ifdef _MSC_VER
353 #  pragma warning(push)
354 #  pragma warning(disable : 4324)
355 #endif  // _MSC_VER
356 
357   template <size_t Capacity, size_t Dummy>
358   struct CRAndStorage : CapacityAndReserved {
359     explicit CRAndStorage(size_t aCapacity, size_t aReserved)
360         : CapacityAndReserved(aCapacity, aReserved) {}
361     CRAndStorage() = default;
362 
363     alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
364 
365     // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
366     // T*.  Indirecting through this function addresses the problem.
367     void* data() { return mBytes; }
368 
369     T* storage() { return static_cast<T*>(data()); }
370   };
371 
372   template <size_t Dummy>
373   struct CRAndStorage<0, Dummy> : CapacityAndReserved {
374     explicit CRAndStorage(size_t aCapacity, size_t aReserved)
375         : CapacityAndReserved(aCapacity, aReserved) {}
376     CRAndStorage() = default;
377 
378     T* storage() {
379       // If this returns |nullptr|, functions like |Vector::begin()| would too,
380       // breaking callers that pass a vector's elements as pointer/length to
381       // code that bounds its operation by length but (even just as a sanity
382       // check) always wants a non-null pointer.  Fake up an aligned, non-null
383       // pointer to support these callers.
384       return reinterpret_cast<T*>(sizeof(T));
385     }
386   };
387 
388   CRAndStorage<kInlineCapacity, 0> mTail;
389 
390 #ifdef _MSC_VER
391 #  pragma warning(pop)
392 #endif  // _MSC_VER
393 
394 #ifdef DEBUG
395   friend class ReentrancyGuard;
396   bool mEntered;
397 #endif
398 
399   /* private accessors */
400 
401   bool usingInlineStorage() const {
402     return mBegin == const_cast<Vector*>(this)->inlineStorage();
403   }
404 
405   T* inlineStorage() { return mTail.storage(); }
406 
407   T* beginNoCheck() const { return mBegin; }
408 
409   T* endNoCheck() { return mBegin + mLength; }
410 
411   const T* endNoCheck() const { return mBegin + mLength; }
412 
413 #ifdef DEBUG
414   /**
415    * The amount of explicitly allocated space in this vector that is immediately
416    * available to be filled by appending additional elements.  This value is
417    * always greater than or equal to |length()| -- the vector's actual elements
418    * are implicitly reserved.  This value is always less than or equal to
419    * |capacity()|.  It may be explicitly increased using the |reserve()| method.
420    */
421   size_t reserved() const {
422     MOZ_ASSERT(mLength <= mTail.mReserved);
423     MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
424     return mTail.mReserved;
425   }
426 #endif
427 
428   bool internalEnsureCapacity(size_t aNeeded);
429 
430   /* Append operations guaranteed to succeed due to pre-reserved space. */
431   template <typename U>
432   void internalAppend(U&& aU);
433   template <typename U, size_t O, class BP>
434   void internalAppendAll(const Vector<U, O, BP>& aU);
435   void internalAppendN(const T& aT, size_t aN);
436   template <typename U>
437   void internalAppend(const U* aBegin, size_t aLength);
438   template <typename U>
439   void internalMoveAppend(U* aBegin, size_t aLength);
440 
441  public:
442   static const size_t sMaxInlineStorage = MinInlineCapacity;
443 
444   typedef T ElementType;
445 
446   explicit Vector(AllocPolicy = AllocPolicy());
447   Vector(Vector&&);            /* Move constructor. */
448   Vector& operator=(Vector&&); /* Move assignment. */
449   ~Vector();
450 
451   /* accessors */
452 
453   const AllocPolicy& allocPolicy() const { return *this; }
454 
455   AllocPolicy& allocPolicy() { return *this; }
456 
457   enum { InlineLength = MinInlineCapacity };
458 
459   size_t length() const { return mLength; }
460 
461   bool empty() const { return mLength == 0; }
462 
463   size_t capacity() const { return mTail.mCapacity; }
464 
465   T* begin() {
466     MOZ_ASSERT(!mEntered);
467     return mBegin;
468   }
469 
470   const T* begin() const {
471     MOZ_ASSERT(!mEntered);
472     return mBegin;
473   }
474 
475   T* end() {
476     MOZ_ASSERT(!mEntered);
477     return mBegin + mLength;
478   }
479 
480   const T* end() const {
481     MOZ_ASSERT(!mEntered);
482     return mBegin + mLength;
483   }
484 
485   T& operator[](size_t aIndex) {
486     MOZ_ASSERT(!mEntered);
487     MOZ_ASSERT(aIndex < mLength);
488     return begin()[aIndex];
489   }
490 
491   const T& operator[](size_t aIndex) const {
492     MOZ_ASSERT(!mEntered);
493     MOZ_ASSERT(aIndex < mLength);
494     return begin()[aIndex];
495   }
496 
497   T& back() {
498     MOZ_ASSERT(!mEntered);
499     MOZ_ASSERT(!empty());
500     return *(end() - 1);
501   }
502 
503   const T& back() const {
504     MOZ_ASSERT(!mEntered);
505     MOZ_ASSERT(!empty());
506     return *(end() - 1);
507   }
508 
509   operator mozilla::Span<const T>() const {
510     // Explicitly specify template argument here to avoid instantiating Span<T>
511     // first and then implicitly converting to Span<const T>
512     return mozilla::Span<const T>{mBegin, mLength};
513   }
514 
515   operator mozilla::Span<T>() { return mozilla::Span{mBegin, mLength}; }
516 
517   class Range {
518     friend class Vector;
519     T* mCur;
520     T* mEnd;
521     Range(T* aCur, T* aEnd) : mCur(aCur), mEnd(aEnd) {
522       MOZ_ASSERT(aCur <= aEnd);
523     }
524 
525    public:
526     bool empty() const { return mCur == mEnd; }
527     size_t remain() const { return PointerRangeSize(mCur, mEnd); }
528     T& front() const {
529       MOZ_ASSERT(!empty());
530       return *mCur;
531     }
532     void popFront() {
533       MOZ_ASSERT(!empty());
534       ++mCur;
535     }
536     T popCopyFront() {
537       MOZ_ASSERT(!empty());
538       return *mCur++;
539     }
540   };
541 
542   class ConstRange {
543     friend class Vector;
544     const T* mCur;
545     const T* mEnd;
546     ConstRange(const T* aCur, const T* aEnd) : mCur(aCur), mEnd(aEnd) {
547       MOZ_ASSERT(aCur <= aEnd);
548     }
549 
550    public:
551     bool empty() const { return mCur == mEnd; }
552     size_t remain() const { return PointerRangeSize(mCur, mEnd); }
553     const T& front() const {
554       MOZ_ASSERT(!empty());
555       return *mCur;
556     }
557     void popFront() {
558       MOZ_ASSERT(!empty());
559       ++mCur;
560     }
561     T popCopyFront() {
562       MOZ_ASSERT(!empty());
563       return *mCur++;
564     }
565   };
566 
567   Range all() { return Range(begin(), end()); }
568   ConstRange all() const { return ConstRange(begin(), end()); }
569 
570   /* mutators */
571 
572   /**
573    * Reverse the order of the elements in the vector in place.
574    */
575   void reverse();
576 
577   /**
578    * Given that the vector is empty, grow the internal capacity to |aRequest|,
579    * keeping the length 0.
580    */
581   [[nodiscard]] bool initCapacity(size_t aRequest);
582 
583   /**
584    * Given that the vector is empty, grow the internal capacity and length to
585    * |aRequest| leaving the elements' memory completely uninitialized (with all
586    * the associated hazards and caveats). This avoids the usual allocation-size
587    * rounding that happens in resize and overhead of initialization for elements
588    * that are about to be overwritten.
589    */
590   [[nodiscard]] bool initLengthUninitialized(size_t aRequest);
591 
592   /**
593    * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
594    * |aRequest - length()| elements, in any sequence of append/appendAll calls,
595    * is guaranteed to succeed.
596    *
597    * A request to reserve an amount less than the current length does not affect
598    * reserved space.
599    */
600   [[nodiscard]] bool reserve(size_t aRequest);
601 
602   /**
603    * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
604    * or unreserve storage for those elements.
605    */
606   void shrinkBy(size_t aIncr);
607 
608   /**
609    * Destroy elements in the range [aNewLength, end()). Does not deallocate
610    * or unreserve storage for those elements.
611    */
612   void shrinkTo(size_t aNewLength);
613 
614   /** Grow the vector by aIncr elements. */
615   [[nodiscard]] bool growBy(size_t aIncr);
616 
617   /** Call shrinkBy or growBy based on whether newSize > length(). */
618   [[nodiscard]] bool resize(size_t aNewLength);
619 
620   /**
621    * Increase the length of the vector, but don't initialize the new elements
622    * -- leave them as uninitialized memory.
623    */
624   [[nodiscard]] bool growByUninitialized(size_t aIncr);
625   void infallibleGrowByUninitialized(size_t aIncr);
626   [[nodiscard]] bool resizeUninitialized(size_t aNewLength);
627 
628   /** Shorthand for shrinkBy(length()). */
629   void clear();
630 
631   /** Clears and releases any heap-allocated storage. */
632   void clearAndFree();
633 
634   /**
635    * Shrinks the storage to drop excess capacity if possible.
636    *
637    * The return value indicates whether the operation succeeded, otherwise, it
638    * represents an OOM. The bool can be safely ignored unless you want to
639    * provide the guarantee that `length() == capacity()`.
640    *
641    * For PODs, it calls the AllocPolicy's pod_realloc. For non-PODs, it moves
642    * the elements into the new storage.
643    */
644   bool shrinkStorageToFit();
645 
646   /**
647    * If true, appending |aNeeded| elements won't reallocate elements storage.
648    * This *doesn't* mean that infallibleAppend may be used!  You still must
649    * reserve the extra space, even if this method indicates that appends won't
650    * need to reallocate elements storage.
651    */
652   bool canAppendWithoutRealloc(size_t aNeeded) const;
653 
654   /** Potentially fallible append operations. */
655 
656   /**
657    * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
658    * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
659    * not amused.")
660    */
661   template <typename U>
662   [[nodiscard]] bool append(U&& aU);
663 
664   /**
665    * Construct a T in-place as a new entry at the end of this vector.
666    */
667   template <typename... Args>
668   [[nodiscard]] bool emplaceBack(Args&&... aArgs) {
669     if (!growByUninitialized(1)) return false;
670     Impl::new_(&back(), std::forward<Args>(aArgs)...);
671     return true;
672   }
673 
674   template <typename U, size_t O, class BP>
675   [[nodiscard]] bool appendAll(const Vector<U, O, BP>& aU);
676   template <typename U, size_t O, class BP>
677   [[nodiscard]] bool appendAll(Vector<U, O, BP>&& aU);
678   [[nodiscard]] bool appendN(const T& aT, size_t aN);
679   template <typename U>
680   [[nodiscard]] bool append(const U* aBegin, const U* aEnd);
681   template <typename U>
682   [[nodiscard]] bool append(const U* aBegin, size_t aLength);
683   template <typename U>
684   [[nodiscard]] bool moveAppend(U* aBegin, U* aEnd);
685 
686   /*
687    * Guaranteed-infallible append operations for use upon vectors whose
688    * memory has been pre-reserved.  Don't use this if you haven't reserved the
689    * memory!
690    */
691   template <typename U>
692   void infallibleAppend(U&& aU) {
693     internalAppend(std::forward<U>(aU));
694   }
695   void infallibleAppendN(const T& aT, size_t aN) { internalAppendN(aT, aN); }
696   template <typename U>
697   void infallibleAppend(const U* aBegin, const U* aEnd) {
698     internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
699   }
700   template <typename U>
701   void infallibleAppend(const U* aBegin, size_t aLength) {
702     internalAppend(aBegin, aLength);
703   }
704   template <typename... Args>
705   void infallibleEmplaceBack(Args&&... aArgs) {
706     infallibleGrowByUninitialized(1);
707     Impl::new_(&back(), std::forward<Args>(aArgs)...);
708   }
709 
710   void popBack();
711 
712   T popCopy();
713 
714   /**
715    * If elements are stored in-place, return nullptr and leave this vector
716    * unmodified.
717    *
718    * Otherwise return this vector's elements buffer, and clear this vector as if
719    * by clearAndFree(). The caller now owns the buffer and is responsible for
720    * deallocating it consistent with this vector's AllocPolicy.
721    *
722    * N.B. Although a T*, only the range [0, length()) is constructed.
723    */
724   [[nodiscard]] T* extractRawBuffer();
725 
726   /**
727    * If elements are stored in-place, allocate a new buffer, move this vector's
728    * elements into it, and return that buffer.
729    *
730    * Otherwise return this vector's elements buffer. The caller now owns the
731    * buffer and is responsible for deallocating it consistent with this vector's
732    * AllocPolicy.
733    *
734    * This vector is cleared, as if by clearAndFree(), when this method
735    * succeeds. This method fails and returns nullptr only if new elements buffer
736    * allocation fails.
737    *
738    * N.B. Only the range [0, length()) of the returned buffer is constructed.
739    * If any of these elements are uninitialized (as growByUninitialized
740    * enables), behavior is undefined.
741    */
742   [[nodiscard]] T* extractOrCopyRawBuffer();
743 
744   /**
745    * Transfer ownership of an array of objects into the vector.  The caller
746    * must have allocated the array in accordance with this vector's
747    * AllocPolicy.
748    *
749    * N.B. This call assumes that there are no uninitialized elements in the
750    *      passed range [aP, aP + aLength). The range [aP + aLength, aP +
751    *      aCapacity) must be allocated uninitialized memory.
752    */
753   void replaceRawBuffer(T* aP, size_t aLength, size_t aCapacity);
754 
755   /**
756    * Transfer ownership of an array of objects into the vector.  The caller
757    * must have allocated the array in accordance with this vector's
758    * AllocPolicy.
759    *
760    * N.B. This call assumes that there are no uninitialized elements in the
761    *      passed array.
762    */
763   void replaceRawBuffer(T* aP, size_t aLength);
764 
765   /**
766    * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
767    * one position higher.  On success, |aP| should not be reused because it'll
768    * be a dangling pointer if reallocation of the vector storage occurred; the
769    * return value should be used instead.  On failure, nullptr is returned.
770    *
771    * Example usage:
772    *
773    *   if (!(p = vec.insert(p, val))) {
774    *     <handle failure>
775    *   }
776    *   <keep working with p>
777    *
778    * This is inherently a linear-time operation.  Be careful!
779    */
780   template <typename U>
781   [[nodiscard]] T* insert(T* aP, U&& aVal);
782 
783   /**
784    * Removes the element |aT|, which must fall in the bounds [begin, end),
785    * shifting existing elements from |aT + 1| onward one position lower.
786    */
787   void erase(T* aT);
788 
789   /**
790    * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
791    * [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
792    * position.
793    */
794   void erase(T* aBegin, T* aEnd);
795 
796   /**
797    * Removes all elements that satisfy the predicate, shifting existing elements
798    * lower to fill erased gaps.
799    */
800   template <typename Pred>
801   void eraseIf(Pred aPred);
802 
803   /**
804    * Removes all elements that compare equal to |aU|, shifting existing elements
805    * lower to fill erased gaps.
806    */
807   template <typename U>
808   void eraseIfEqual(const U& aU);
809 
810   /**
811    * Measure the size of the vector's heap-allocated storage.
812    */
813   size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
814 
815   /**
816    * Like sizeOfExcludingThis, but also measures the size of the vector
817    * object (which must be heap-allocated) itself.
818    */
819   size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
820 
821   void swap(Vector& aOther);
822 
823  private:
824   Vector(const Vector&) = delete;
825   void operator=(const Vector&) = delete;
826 };
827 
828 /* This does the re-entrancy check plus several other sanity checks. */
829 #define MOZ_REENTRANCY_GUARD_ET_AL                                         \
830   ReentrancyGuard g(*this);                                                \
831   MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
832   MOZ_ASSERT(reserved() <= mTail.mCapacity);                               \
833   MOZ_ASSERT(mLength <= reserved());                                       \
834   MOZ_ASSERT(mLength <= mTail.mCapacity)
835 
836 /* Vector Implementation */
837 
838 template <typename T, size_t N, class AP>
839 MOZ_ALWAYS_INLINE Vector<T, N, AP>::Vector(AP aAP)
840     : AP(std::move(aAP)),
841       mLength(0),
842       mTail(kInlineCapacity, 0)
843 #ifdef DEBUG
844       ,
845       mEntered(false)
846 #endif
847 {
848   mBegin = inlineStorage();
849 }
850 
851 /* Move constructor. */
852 template <typename T, size_t N, class AllocPolicy>
853 MOZ_ALWAYS_INLINE Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
854     : AllocPolicy(std::move(aRhs))
855 #ifdef DEBUG
856       ,
857       mEntered(false)
858 #endif
859 {
860   mLength = aRhs.mLength;
861   mTail.mCapacity = aRhs.mTail.mCapacity;
862 #ifdef DEBUG
863   mTail.mReserved = aRhs.mTail.mReserved;
864 #endif
865 
866   if (aRhs.usingInlineStorage()) {
867     /* We can't move the buffer over in this case, so copy elements. */
868     mBegin = inlineStorage();
869     Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
870     /*
871      * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
872      * The elements in its in-line storage still need to be destroyed.
873      */
874   } else {
875     /*
876      * Take src's buffer, and turn src into an empty vector using
877      * in-line storage.
878      */
879     mBegin = aRhs.mBegin;
880     aRhs.mBegin = aRhs.inlineStorage();
881     aRhs.mTail.mCapacity = kInlineCapacity;
882     aRhs.mLength = 0;
883 #ifdef DEBUG
884     aRhs.mTail.mReserved = 0;
885 #endif
886   }
887 }
888 
889 /* Move assignment. */
890 template <typename T, size_t N, class AP>
891 MOZ_ALWAYS_INLINE Vector<T, N, AP>& Vector<T, N, AP>::operator=(Vector&& aRhs) {
892   MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
893   this->~Vector();
894   new (KnownNotNull, this) Vector(std::move(aRhs));
895   return *this;
896 }
897 
898 template <typename T, size_t N, class AP>
899 MOZ_ALWAYS_INLINE Vector<T, N, AP>::~Vector() {
900   MOZ_REENTRANCY_GUARD_ET_AL;
901   Impl::destroy(beginNoCheck(), endNoCheck());
902   if (!usingInlineStorage()) {
903     this->free_(beginNoCheck(), mTail.mCapacity);
904   }
905 }
906 
907 template <typename T, size_t N, class AP>
908 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::reverse() {
909   MOZ_REENTRANCY_GUARD_ET_AL;
910   T* elems = mBegin;
911   size_t len = mLength;
912   size_t mid = len / 2;
913   for (size_t i = 0; i < mid; i++) {
914     std::swap(elems[i], elems[len - i - 1]);
915   }
916 }
917 
918 /*
919  * This function will create a new heap buffer with capacity aNewCap,
920  * move all elements in the inline buffer to this new buffer,
921  * and fail on OOM.
922  */
923 template <typename T, size_t N, class AP>
924 inline bool Vector<T, N, AP>::convertToHeapStorage(size_t aNewCap) {
925   MOZ_ASSERT(usingInlineStorage());
926 
927   /* Allocate buffer. */
928   MOZ_ASSERT(!detail::CapacityHasExcessSpace<T>(aNewCap));
929   T* newBuf = this->template pod_malloc<T>(aNewCap);
930   if (MOZ_UNLIKELY(!newBuf)) {
931     return false;
932   }
933 
934   /* Copy inline elements into heap buffer. */
935   Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
936   Impl::destroy(beginNoCheck(), endNoCheck());
937 
938   /* Switch in heap buffer. */
939   mBegin = newBuf;
940   /* mLength is unchanged. */
941   mTail.mCapacity = aNewCap;
942   return true;
943 }
944 
945 template <typename T, size_t N, class AP>
946 MOZ_NEVER_INLINE bool Vector<T, N, AP>::growStorageBy(size_t aIncr) {
947   MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
948 
949   /*
950    * When choosing a new capacity, its size should is as close to 2**N bytes
951    * as possible.  2**N-sized requests are best because they are unlikely to
952    * be rounded up by the allocator.  Asking for a 2**N number of elements
953    * isn't as good, because if sizeof(T) is not a power-of-two that would
954    * result in a non-2**N request size.
955    */
956 
957   size_t newCap;
958 
959   if (aIncr == 1) {
960     if (usingInlineStorage()) {
961       /* This case occurs in ~70--80% of the calls to this function. */
962       size_t newSize =
963           tl::RoundUpPow2<(kInlineCapacity + 1) * sizeof(T)>::value;
964       newCap = newSize / sizeof(T);
965       goto convert;
966     }
967 
968     if (mLength == 0) {
969       /* This case occurs in ~0--10% of the calls to this function. */
970       newCap = 1;
971       goto grow;
972     }
973 
974     /* This case occurs in ~15--20% of the calls to this function. */
975 
976     /*
977      * Will mLength * 4 *sizeof(T) overflow?  This condition limits a vector
978      * to 1GB of memory on a 32-bit system, which is a reasonable limit.  It
979      * also ensures that
980      *
981      *   static_cast<char*>(end()) - static_cast<char*>(begin())
982      *
983      * doesn't overflow ptrdiff_t (see bug 510319).
984      */
985     if (MOZ_UNLIKELY(mLength & tl::MulOverflowMask<4 * sizeof(T)>::value)) {
986       this->reportAllocOverflow();
987       return false;
988     }
989 
990     /*
991      * If we reach here, the existing capacity will have a size that is already
992      * as close to 2^N as sizeof(T) will allow.  Just double the capacity, and
993      * then there might be space for one more element.
994      */
995     newCap = mLength * 2;
996     if (detail::CapacityHasExcessSpace<T>(newCap)) {
997       newCap += 1;
998     }
999   } else {
1000     /* This case occurs in ~2% of the calls to this function. */
1001     size_t newMinCap = mLength + aIncr;
1002 
1003     /* Did mLength + aIncr overflow?  Will newCap * sizeof(T) overflow? */
1004     if (MOZ_UNLIKELY(newMinCap < mLength ||
1005                      newMinCap & tl::MulOverflowMask<2 * sizeof(T)>::value)) {
1006       this->reportAllocOverflow();
1007       return false;
1008     }
1009 
1010     size_t newMinSize = newMinCap * sizeof(T);
1011     size_t newSize = RoundUpPow2(newMinSize);
1012     newCap = newSize / sizeof(T);
1013   }
1014 
1015   if (usingInlineStorage()) {
1016   convert:
1017     return convertToHeapStorage(newCap);
1018   }
1019 
1020 grow:
1021   return Impl::growTo(*this, newCap);
1022 }
1023 
1024 template <typename T, size_t N, class AP>
1025 inline bool Vector<T, N, AP>::initCapacity(size_t aRequest) {
1026   MOZ_ASSERT(empty());
1027   MOZ_ASSERT(usingInlineStorage());
1028   if (aRequest == 0) {
1029     return true;
1030   }
1031   T* newbuf = this->template pod_malloc<T>(aRequest);
1032   if (MOZ_UNLIKELY(!newbuf)) {
1033     return false;
1034   }
1035   mBegin = newbuf;
1036   mTail.mCapacity = aRequest;
1037 #ifdef DEBUG
1038   mTail.mReserved = aRequest;
1039 #endif
1040   return true;
1041 }
1042 
1043 template <typename T, size_t N, class AP>
1044 inline bool Vector<T, N, AP>::initLengthUninitialized(size_t aRequest) {
1045   if (!initCapacity(aRequest)) {
1046     return false;
1047   }
1048   infallibleGrowByUninitialized(aRequest);
1049   return true;
1050 }
1051 
1052 template <typename T, size_t N, class AP>
1053 inline bool Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize) {
1054   if (aRequestedSize <= N) {
1055     return true;
1056   }
1057 
1058 #ifdef DEBUG
1059   if (aRequestedSize <= mTail.mReserved) {
1060     return true;
1061   }
1062 #endif
1063 
1064   return allocPolicy().checkSimulatedOOM();
1065 }
1066 
1067 template <typename T, size_t N, class AP>
1068 inline bool Vector<T, N, AP>::reserve(size_t aRequest) {
1069   MOZ_REENTRANCY_GUARD_ET_AL;
1070   if (aRequest > mTail.mCapacity) {
1071     if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
1072       return false;
1073     }
1074   } else if (!maybeCheckSimulatedOOM(aRequest)) {
1075     return false;
1076   }
1077 #ifdef DEBUG
1078   if (aRequest > mTail.mReserved) {
1079     mTail.mReserved = aRequest;
1080   }
1081   MOZ_ASSERT(mLength <= mTail.mReserved);
1082   MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1083 #endif
1084   return true;
1085 }
1086 
1087 template <typename T, size_t N, class AP>
1088 inline void Vector<T, N, AP>::shrinkBy(size_t aIncr) {
1089   MOZ_REENTRANCY_GUARD_ET_AL;
1090   MOZ_ASSERT(aIncr <= mLength);
1091   Impl::destroy(endNoCheck() - aIncr, endNoCheck());
1092   mLength -= aIncr;
1093 }
1094 
1095 template <typename T, size_t N, class AP>
1096 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::shrinkTo(size_t aNewLength) {
1097   MOZ_ASSERT(aNewLength <= mLength);
1098   shrinkBy(mLength - aNewLength);
1099 }
1100 
1101 template <typename T, size_t N, class AP>
1102 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growBy(size_t aIncr) {
1103   MOZ_REENTRANCY_GUARD_ET_AL;
1104   if (aIncr > mTail.mCapacity - mLength) {
1105     if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1106       return false;
1107     }
1108   } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1109     return false;
1110   }
1111   MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
1112   T* newend = endNoCheck() + aIncr;
1113   Impl::initialize(endNoCheck(), newend);
1114   mLength += aIncr;
1115 #ifdef DEBUG
1116   if (mLength > mTail.mReserved) {
1117     mTail.mReserved = mLength;
1118   }
1119 #endif
1120   return true;
1121 }
1122 
1123 template <typename T, size_t N, class AP>
1124 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growByUninitialized(size_t aIncr) {
1125   MOZ_REENTRANCY_GUARD_ET_AL;
1126   if (aIncr > mTail.mCapacity - mLength) {
1127     if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1128       return false;
1129     }
1130   } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1131     return false;
1132   }
1133 #ifdef DEBUG
1134   if (mLength + aIncr > mTail.mReserved) {
1135     mTail.mReserved = mLength + aIncr;
1136   }
1137 #endif
1138   infallibleGrowByUninitialized(aIncr);
1139   return true;
1140 }
1141 
1142 template <typename T, size_t N, class AP>
1143 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::infallibleGrowByUninitialized(
1144     size_t aIncr) {
1145   MOZ_ASSERT(mLength + aIncr <= reserved());
1146   mLength += aIncr;
1147 }
1148 
1149 template <typename T, size_t N, class AP>
1150 inline bool Vector<T, N, AP>::resize(size_t aNewLength) {
1151   size_t curLength = mLength;
1152   if (aNewLength > curLength) {
1153     return growBy(aNewLength - curLength);
1154   }
1155   shrinkBy(curLength - aNewLength);
1156   return true;
1157 }
1158 
1159 template <typename T, size_t N, class AP>
1160 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::resizeUninitialized(
1161     size_t aNewLength) {
1162   size_t curLength = mLength;
1163   if (aNewLength > curLength) {
1164     return growByUninitialized(aNewLength - curLength);
1165   }
1166   shrinkBy(curLength - aNewLength);
1167   return true;
1168 }
1169 
1170 template <typename T, size_t N, class AP>
1171 inline void Vector<T, N, AP>::clear() {
1172   MOZ_REENTRANCY_GUARD_ET_AL;
1173   Impl::destroy(beginNoCheck(), endNoCheck());
1174   mLength = 0;
1175 }
1176 
1177 template <typename T, size_t N, class AP>
1178 inline void Vector<T, N, AP>::clearAndFree() {
1179   clear();
1180 
1181   if (usingInlineStorage()) {
1182     return;
1183   }
1184   this->free_(beginNoCheck(), mTail.mCapacity);
1185   mBegin = inlineStorage();
1186   mTail.mCapacity = kInlineCapacity;
1187 #ifdef DEBUG
1188   mTail.mReserved = 0;
1189 #endif
1190 }
1191 
1192 template <typename T, size_t N, class AP>
1193 inline bool Vector<T, N, AP>::shrinkStorageToFit() {
1194   MOZ_REENTRANCY_GUARD_ET_AL;
1195 
1196   const auto length = this->length();
1197   if (usingInlineStorage() || length == capacity()) {
1198     return true;
1199   }
1200 
1201   if (!length) {
1202     this->free_(beginNoCheck(), mTail.mCapacity);
1203     mBegin = inlineStorage();
1204     mTail.mCapacity = kInlineCapacity;
1205 #ifdef DEBUG
1206     mTail.mReserved = 0;
1207 #endif
1208     return true;
1209   }
1210 
1211   T* newBuf;
1212   size_t newCap;
1213   if (length <= kInlineCapacity) {
1214     newBuf = inlineStorage();
1215     newCap = kInlineCapacity;
1216   } else {
1217     if (kElemIsPod) {
1218       newBuf = this->template pod_realloc<T>(beginNoCheck(), mTail.mCapacity,
1219                                              length);
1220     } else {
1221       newBuf = this->template pod_malloc<T>(length);
1222     }
1223     if (MOZ_UNLIKELY(!newBuf)) {
1224       return false;
1225     }
1226     newCap = length;
1227   }
1228   if (!kElemIsPod || newBuf == inlineStorage()) {
1229     Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
1230     Impl::destroy(beginNoCheck(), endNoCheck());
1231   }
1232   if (!kElemIsPod) {
1233     this->free_(beginNoCheck(), mTail.mCapacity);
1234   }
1235   mBegin = newBuf;
1236   mTail.mCapacity = newCap;
1237 #ifdef DEBUG
1238   mTail.mReserved = length;
1239 #endif
1240   return true;
1241 }
1242 
1243 template <typename T, size_t N, class AP>
1244 inline bool Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const {
1245   return mLength + aNeeded <= mTail.mCapacity;
1246 }
1247 
1248 template <typename T, size_t N, class AP>
1249 template <typename U, size_t O, class BP>
1250 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendAll(
1251     const Vector<U, O, BP>& aOther) {
1252   internalAppend(aOther.begin(), aOther.length());
1253 }
1254 
1255 template <typename T, size_t N, class AP>
1256 template <typename U>
1257 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(U&& aU) {
1258   MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
1259   MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1260   Impl::new_(endNoCheck(), std::forward<U>(aU));
1261   ++mLength;
1262 }
1263 
1264 template <typename T, size_t N, class AP>
1265 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded) {
1266   MOZ_REENTRANCY_GUARD_ET_AL;
1267   if (mLength + aNeeded > mTail.mCapacity) {
1268     if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1269       return false;
1270     }
1271   } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1272     return false;
1273   }
1274 #ifdef DEBUG
1275   if (mLength + aNeeded > mTail.mReserved) {
1276     mTail.mReserved = mLength + aNeeded;
1277   }
1278 #endif
1279   internalAppendN(aT, aNeeded);
1280   return true;
1281 }
1282 
1283 template <typename T, size_t N, class AP>
1284 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendN(const T& aT,
1285                                                          size_t aNeeded) {
1286   MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
1287   MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1288   Impl::copyConstructN(endNoCheck(), aNeeded, aT);
1289   mLength += aNeeded;
1290 }
1291 
1292 template <typename T, size_t N, class AP>
1293 template <typename U>
1294 inline T* Vector<T, N, AP>::insert(T* aP, U&& aVal) {
1295   MOZ_ASSERT(begin() <= aP);
1296   MOZ_ASSERT(aP <= end());
1297   size_t pos = aP - begin();
1298   MOZ_ASSERT(pos <= mLength);
1299   size_t oldLength = mLength;
1300   if (pos == oldLength) {
1301     if (!append(std::forward<U>(aVal))) {
1302       return nullptr;
1303     }
1304   } else {
1305     T oldBack = std::move(back());
1306     if (!append(std::move(oldBack))) {
1307       return nullptr;
1308     }
1309     for (size_t i = oldLength - 1; i > pos; --i) {
1310       (*this)[i] = std::move((*this)[i - 1]);
1311     }
1312     (*this)[pos] = std::forward<U>(aVal);
1313   }
1314   return begin() + pos;
1315 }
1316 
1317 template <typename T, size_t N, class AP>
1318 inline void Vector<T, N, AP>::erase(T* aIt) {
1319   MOZ_ASSERT(begin() <= aIt);
1320   MOZ_ASSERT(aIt < end());
1321   while (aIt + 1 < end()) {
1322     *aIt = std::move(*(aIt + 1));
1323     ++aIt;
1324   }
1325   popBack();
1326 }
1327 
1328 template <typename T, size_t N, class AP>
1329 inline void Vector<T, N, AP>::erase(T* aBegin, T* aEnd) {
1330   MOZ_ASSERT(begin() <= aBegin);
1331   MOZ_ASSERT(aBegin <= aEnd);
1332   MOZ_ASSERT(aEnd <= end());
1333   while (aEnd < end()) {
1334     *aBegin++ = std::move(*aEnd++);
1335   }
1336   shrinkBy(aEnd - aBegin);
1337 }
1338 
1339 template <typename T, size_t N, class AP>
1340 template <typename Pred>
1341 void Vector<T, N, AP>::eraseIf(Pred aPred) {
1342   // remove_if finds the first element to be erased, and then efficiently move-
1343   // assigns elements to effectively overwrite elements that satisfy the
1344   // predicate. It returns the new end pointer, after which there are only
1345   // moved-from elements ready to be destroyed, so we just need to shrink the
1346   // vector accordingly.
1347   T* newEnd = std::remove_if(begin(), end(),
1348                              [&aPred](const T& aT) { return aPred(aT); });
1349   MOZ_ASSERT(newEnd <= end());
1350   shrinkBy(end() - newEnd);
1351 }
1352 
1353 template <typename T, size_t N, class AP>
1354 template <typename U>
1355 void Vector<T, N, AP>::eraseIfEqual(const U& aU) {
1356   return eraseIf([&aU](const T& aT) { return aT == aU; });
1357 }
1358 
1359 template <typename T, size_t N, class AP>
1360 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::internalEnsureCapacity(
1361     size_t aNeeded) {
1362   if (mLength + aNeeded > mTail.mCapacity) {
1363     if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1364       return false;
1365     }
1366   } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1367     return false;
1368   }
1369 #ifdef DEBUG
1370   if (mLength + aNeeded > mTail.mReserved) {
1371     mTail.mReserved = mLength + aNeeded;
1372   }
1373 #endif
1374   return true;
1375 }
1376 
1377 template <typename T, size_t N, class AP>
1378 template <typename U>
1379 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1380                                                 const U* aInsEnd) {
1381   MOZ_REENTRANCY_GUARD_ET_AL;
1382   const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
1383   if (!internalEnsureCapacity(needed)) {
1384     return false;
1385   }
1386   internalAppend(aInsBegin, needed);
1387   return true;
1388 }
1389 
1390 template <typename T, size_t N, class AP>
1391 template <typename U>
1392 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(const U* aInsBegin,
1393                                                         size_t aInsLength) {
1394   MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
1395   MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1396   Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
1397   mLength += aInsLength;
1398 }
1399 
1400 template <typename T, size_t N, class AP>
1401 template <typename U>
1402 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::moveAppend(U* aInsBegin, U* aInsEnd) {
1403   MOZ_REENTRANCY_GUARD_ET_AL;
1404   const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
1405   if (!internalEnsureCapacity(needed)) {
1406     return false;
1407   }
1408   internalMoveAppend(aInsBegin, needed);
1409   return true;
1410 }
1411 
1412 template <typename T, size_t N, class AP>
1413 template <typename U>
1414 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalMoveAppend(U* aInsBegin,
1415                                                             size_t aInsLength) {
1416   MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
1417   MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1418   Impl::moveConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
1419   mLength += aInsLength;
1420 }
1421 
1422 template <typename T, size_t N, class AP>
1423 template <typename U>
1424 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(U&& aU) {
1425   MOZ_REENTRANCY_GUARD_ET_AL;
1426   if (mLength == mTail.mCapacity) {
1427     if (MOZ_UNLIKELY(!growStorageBy(1))) {
1428       return false;
1429     }
1430   } else if (!maybeCheckSimulatedOOM(mLength + 1)) {
1431     return false;
1432   }
1433 #ifdef DEBUG
1434   if (mLength + 1 > mTail.mReserved) {
1435     mTail.mReserved = mLength + 1;
1436   }
1437 #endif
1438   internalAppend(std::forward<U>(aU));
1439   return true;
1440 }
1441 
1442 template <typename T, size_t N, class AP>
1443 template <typename U, size_t O, class BP>
1444 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(
1445     const Vector<U, O, BP>& aOther) {
1446   return append(aOther.begin(), aOther.length());
1447 }
1448 
1449 template <typename T, size_t N, class AP>
1450 template <typename U, size_t O, class BP>
1451 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(Vector<U, O, BP>&& aOther) {
1452   if (empty() && capacity() < aOther.length()) {
1453     *this = std::move(aOther);
1454     return true;
1455   }
1456 
1457   if (moveAppend(aOther.begin(), aOther.end())) {
1458     aOther.clearAndFree();
1459     return true;
1460   }
1461 
1462   return false;
1463 }
1464 
1465 template <typename T, size_t N, class AP>
1466 template <class U>
1467 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1468                                                 size_t aInsLength) {
1469   return append(aInsBegin, aInsBegin + aInsLength);
1470 }
1471 
1472 template <typename T, size_t N, class AP>
1473 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::popBack() {
1474   MOZ_REENTRANCY_GUARD_ET_AL;
1475   MOZ_ASSERT(!empty());
1476   --mLength;
1477   endNoCheck()->~T();
1478 }
1479 
1480 template <typename T, size_t N, class AP>
1481 MOZ_ALWAYS_INLINE T Vector<T, N, AP>::popCopy() {
1482   T ret = back();
1483   popBack();
1484   return ret;
1485 }
1486 
1487 template <typename T, size_t N, class AP>
1488 inline T* Vector<T, N, AP>::extractRawBuffer() {
1489   MOZ_REENTRANCY_GUARD_ET_AL;
1490 
1491   if (usingInlineStorage()) {
1492     return nullptr;
1493   }
1494 
1495   T* ret = mBegin;
1496   mBegin = inlineStorage();
1497   mLength = 0;
1498   mTail.mCapacity = kInlineCapacity;
1499 #ifdef DEBUG
1500   mTail.mReserved = 0;
1501 #endif
1502   return ret;
1503 }
1504 
1505 template <typename T, size_t N, class AP>
1506 inline T* Vector<T, N, AP>::extractOrCopyRawBuffer() {
1507   if (T* ret = extractRawBuffer()) {
1508     return ret;
1509   }
1510 
1511   MOZ_REENTRANCY_GUARD_ET_AL;
1512 
1513   T* copy = this->template pod_malloc<T>(mLength);
1514   if (!copy) {
1515     return nullptr;
1516   }
1517 
1518   Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
1519   Impl::destroy(beginNoCheck(), endNoCheck());
1520   mBegin = inlineStorage();
1521   mLength = 0;
1522   mTail.mCapacity = kInlineCapacity;
1523 #ifdef DEBUG
1524   mTail.mReserved = 0;
1525 #endif
1526   return copy;
1527 }
1528 
1529 template <typename T, size_t N, class AP>
1530 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength,
1531                                                size_t aCapacity) {
1532   MOZ_REENTRANCY_GUARD_ET_AL;
1533 
1534   /* Destroy what we have. */
1535   Impl::destroy(beginNoCheck(), endNoCheck());
1536   if (!usingInlineStorage()) {
1537     this->free_(beginNoCheck(), mTail.mCapacity);
1538   }
1539 
1540   /* Take in the new buffer. */
1541   if (aCapacity <= kInlineCapacity) {
1542     /*
1543      * We convert to inline storage if possible, even though aP might
1544      * otherwise be acceptable.  Maybe this behaviour should be
1545      * specifiable with an argument to this function.
1546      */
1547     mBegin = inlineStorage();
1548     mLength = aLength;
1549     mTail.mCapacity = kInlineCapacity;
1550     Impl::moveConstruct(mBegin, aP, aP + aLength);
1551     Impl::destroy(aP, aP + aLength);
1552     this->free_(aP, aCapacity);
1553   } else {
1554     mBegin = aP;
1555     mLength = aLength;
1556     mTail.mCapacity = aCapacity;
1557   }
1558 #ifdef DEBUG
1559   mTail.mReserved = aCapacity;
1560 #endif
1561 }
1562 
1563 template <typename T, size_t N, class AP>
1564 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength) {
1565   replaceRawBuffer(aP, aLength, aLength);
1566 }
1567 
1568 template <typename T, size_t N, class AP>
1569 inline size_t Vector<T, N, AP>::sizeOfExcludingThis(
1570     MallocSizeOf aMallocSizeOf) const {
1571   return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
1572 }
1573 
1574 template <typename T, size_t N, class AP>
1575 inline size_t Vector<T, N, AP>::sizeOfIncludingThis(
1576     MallocSizeOf aMallocSizeOf) const {
1577   return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
1578 }
1579 
1580 template <typename T, size_t N, class AP>
1581 inline void Vector<T, N, AP>::swap(Vector& aOther) {
1582   static_assert(N == 0, "still need to implement this for N != 0");
1583 
1584   // This only works when inline storage is always empty.
1585   if (!usingInlineStorage() && aOther.usingInlineStorage()) {
1586     aOther.mBegin = mBegin;
1587     mBegin = inlineStorage();
1588   } else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
1589     mBegin = aOther.mBegin;
1590     aOther.mBegin = aOther.inlineStorage();
1591   } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
1592     std::swap(mBegin, aOther.mBegin);
1593   } else {
1594     // This case is a no-op, since we'd set both to use their inline storage.
1595   }
1596 
1597   std::swap(mLength, aOther.mLength);
1598   std::swap(mTail.mCapacity, aOther.mTail.mCapacity);
1599 #ifdef DEBUG
1600   std::swap(mTail.mReserved, aOther.mTail.mReserved);
1601 #endif
1602 }
1603 
1604 }  // namespace mozilla
1605 
1606 #endif /* mozilla_Vector_h */
1607