1 /////////////////////////////////////////////////////////////////////////////
2 // Copyright (c) Electronic Arts Inc. All rights reserved.
3 /////////////////////////////////////////////////////////////////////////////
4 
5 ///////////////////////////////////////////////////////////////////////////////
6 // A ring buffer is a FIFO (first-in, first-out) container which acts
7 // much like a queue. The difference is that a ring buffer is implemented
8 // via chasing pointers around a given container instead of like queue
9 // adds to the writes to the end of the container are reads from the begin.
10 // The benefit of a ring buffer is that memory allocations don't occur
11 // and new elements are neither added nor removed from the container.
12 // Elements in the container are simply assigned values in circles around
13 // the container.
14 ///////////////////////////////////////////////////////////////////////////////
15 
16 
17 #ifndef EASTL_RING_BUFFER_H
18 #define EASTL_RING_BUFFER_H
19 
20 
21 #include <EASTL/internal/config.h>
22 #include <EASTL/iterator.h>
23 #include <EASTL/vector.h>
24 #include <EASTL/initializer_list.h>
25 #include <stddef.h>
26 
27 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
28 	#pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
29 #endif
30 
31 
32 
33 namespace eastl
34 {
35 	/// EASTL_RING_BUFFER_DEFAULT_NAME
36 	///
37 	/// Defines a default container name in the absence of a user-provided name.
38 	///
39 	#ifndef EASTL_RING_BUFFER_DEFAULT_NAME
40 		#define EASTL_RING_BUFFER_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " ring_buffer" // Unless the user overrides something, this is "EASTL ring_buffer".
41 	#endif
42 
43 	/// EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
44 	///
45 	#ifndef EASTL_RING_BUFFER_DEFAULT_ALLOCATOR
46 		#define EASTL_RING_BUFFER_DEFAULT_ALLOCATOR allocator_type(EASTL_RING_BUFFER_DEFAULT_NAME)
47 	#endif
48 
49 
50 	/// ring_buffer_iterator
51 	///
52 	/// We force this iterator to act like a random access iterator even if
53 	/// the underlying container doesn't support random access iteration.
54 	/// Any BidirectionalIterator can be a RandomAccessIterator; it just
55 	/// might be inefficient in some cases.
56 	///
57 	template <typename T, typename Pointer, typename Reference, typename Container>
58 	struct ring_buffer_iterator
59 	{
60 	public:
61 		typedef ring_buffer_iterator<T, Pointer, Reference, Container>   this_type;
62 		typedef T                                                        value_type;
63 		typedef Pointer                                                  pointer;
64 		typedef Reference                                                reference;
65 		typedef typename Container::size_type                            size_type;
66 		typedef typename Container::difference_type                      difference_type;
67 		typedef typename Container::iterator                             container_iterator;
68 		typedef typename Container::const_iterator                       container_const_iterator;
69 		typedef ring_buffer_iterator<T, T*, T&, Container>               iterator;
70 		typedef ring_buffer_iterator<T, const T*, const T&, Container>   const_iterator;
71 		typedef EASTL_ITC_NS::random_access_iterator_tag                 iterator_category;
72 
73 	public:
74 		Container*         mpContainer;
75 		container_iterator mContainerIterator;
76 
77 	public:
78 		ring_buffer_iterator();
79 		ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator);
80 		ring_buffer_iterator(const iterator& x);
81 
82 		ring_buffer_iterator& operator=(const iterator& x);
83 
84 		reference operator*() const;
85 		pointer   operator->() const;
86 
87 		this_type& operator++();
88 		this_type  operator++(int);
89 
90 		this_type& operator--();
91 		this_type  operator--(int);
92 
93 		this_type& operator+=(difference_type n);
94 		this_type& operator-=(difference_type n);
95 
96 		this_type operator+(difference_type n) const;
97 		this_type operator-(difference_type n) const;
98 
99 	protected:
100 		void increment(difference_type n, EASTL_ITC_NS::input_iterator_tag);
101 		void increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag);
102 
103 	}; // struct ring_buffer_iterator
104 
105 
106 
107 	/// ring_buffer
108 	///
109 	/// Implements a ring buffer via a given container type, which would
110 	/// typically be a vector or array, though any container which supports
111 	/// bidirectional iteration would work.
112 	///
113 	/// A ring buffer is a FIFO (first-in, first-out) container which acts
114 	/// much like a queue. The difference is that a ring buffer is implemented
115 	/// via chasing pointers around a container and moving the read and write
116 	/// positions forward (and possibly wrapping around) as the container is
117 	/// read and written via pop_front and push_back.
118 	///
119 	/// The benefit of a ring buffer is that memory allocations don't occur
120 	/// and new elements are neither added nor removed from the container.
121 	/// Elements in the container are simply assigned values in circles around
122 	/// the container.
123 	///
124 	/// ring_buffer is different from other containers -- including adapter
125 	/// containers -- in how iteration is done. Iteration of a ring buffer
126 	/// starts at the current begin position, proceeds to the end of the underlying
127 	/// container, and continues at the begin of the underlying container until
128 	/// the ring buffer's current end position. Thus a ring_buffer does
129 	/// indeed have a begin and an end, though the values of begin and end
130 	/// chase each other around the container. An empty ring_buffer is one
131 	/// in which end == begin, and a full ring_buffer is one in which
132 	/// end + 1 == begin.
133 	///
134 	/// Example of a ring buffer layout, where + indicates queued items:
135 	///    ++++++++++--------------------------------+++++++++
136 	///              ^                               ^
137 	///              end                             begin
138 	///
139 	/// Empty ring buffer:
140 	///    ---------------------------------------------------
141 	///                              ^
142 	///                          begin / end
143 	///
144 	/// Full ring buffer. Note that one item is necessarily unused; it is
145 	/// analagous to a '\0' at the end of a C string:
146 	///    +++++++++++++++++++++++++++++++++++++++++-+++++++++
147 	///                                             ^^
148 	///                                           end begin
149 	///
150 	/// A push_back operation on a ring buffer assigns the new value to end.
151 	/// If there is no more space in the buffer, this will result in begin
152 	/// being overwritten and the begin position being moved foward one position.
153 	/// The user can use the full() function to detect this condition.
154 	/// Note that elements in a ring buffer are not created or destroyed as
155 	/// their are added and removed; they are merely assigned. Only on
156 	/// container construction and destruction are any elements created and
157 	/// destroyed.
158 	///
159 	/// The ring buffer can be used in either direction. By this we mean that
160 	/// you can use push_back to add items and pop_front to remove them; or you can
161 	/// use push_front to add items and pop_back to remove them. You aren't
162 	/// limited to these operations; you can push or pop from either side
163 	/// arbitrarily and you can insert or erase anywhere in the container.
164 	///
165 	/// The ring buffer requires the user to specify a Container type, which
166 	/// by default is vector. However, any container with bidirectional iterators
167 	/// will work, such as list, deque, string or any of the fixed_* versions
168 	/// of these containers, such as fixed_string. Since ring buffer works via copying
169 	/// elements instead of allocating and freeing nodes, inserting in the middle
170 	/// of a ring buffer based on list (instead of vector) is no more efficient.
171 	///
172 	/// To use the ring buffer, its container must be resized to the desired
173 	/// ring buffer size. Changing the size of a ring buffer may cause ring
174 	/// buffer iterators to invalidate.
175 	///
176 	/// An alternative to using a ring buffer is to use a list with a user-created
177 	/// node pool and custom allocator. There are various tradeoffs that result from this.
178 	///
179 	/// Example usage:
180 	///     ring_buffer< int, list<int> > rb(100);
181 	///     rb.push_back(1);
182 	///
183 	/// Example usage:
184 	///     // Example of creating an on-screen debug log that shows 16
185 	///     // strings at a time and scrolls older strings away.
186 	///
187 	///     // Create ring buffer of 16 strings.
188 	///     ring_buffer< string, vector<string> > debugLogText(16);
189 	///
190 	///     // Reserve 128 chars for each line. This can make it so that no
191 	///     // runtime memory allocations occur.
192 	///     for(vector<string>::iterator it = debugLogText.get_container().begin(),
193 	///         itEnd = debugLogText.get_container().end(); it != itEnd; ++it)
194 	///     {
195 	///         (*it).reserve(128);
196 	///     }
197 	///
198 	///     // Add a new string, using push_front() and front() instead of
199 	///     // push_front(str) in order to avoid creating a temporary str.
200 	///     debugLogText.push_front();
201 	///     debugLogText.front() = "Player fired weapon";
202 	///
203 	template <typename T, typename Container = eastl::vector<T>, typename Allocator = typename Container::allocator_type>
204 	class ring_buffer
205 	{
206 	public:
207 		typedef ring_buffer<T, Container, Allocator>                   this_type;
208 		typedef Container                                              container_type;
209 		typedef Allocator                                              allocator_type;
210 
211 		typedef typename Container::value_type                         value_type;
212 		typedef typename Container::reference                          reference;
213 		typedef typename Container::const_reference                    const_reference;
214 		typedef typename Container::size_type                          size_type;
215 		typedef typename Container::difference_type                    difference_type;
216 		typedef typename Container::iterator                           container_iterator;
217 		typedef typename Container::const_iterator                     container_const_iterator;
218 		typedef ring_buffer_iterator<T, T*, T&, Container>             iterator;
219 		typedef ring_buffer_iterator<T, const T*, const T&, Container> const_iterator;
220 		typedef eastl::reverse_iterator<iterator>                      reverse_iterator;
221 		typedef eastl::reverse_iterator<const_iterator>                const_reverse_iterator;
222 
223 	public:                             // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness.
224 		Container          c;           // We follow the naming convention established for stack, queue, priority_queue and name this 'c'. This variable must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
225 
226 	protected:
227 		container_iterator mBegin;      // We keep track of where our begin and end are by using Container iterators.
228 		container_iterator mEnd;
229 		size_type          mSize;
230 
231 	public:
232 		// There currently isn't a ring_buffer constructor that specifies an initial size, unlike other containers.
233 		explicit ring_buffer(size_type cap = 0);                                // Construct with an initial capacity (but size of 0).
234 		explicit ring_buffer(size_type cap, const allocator_type& allocator);
235 		explicit ring_buffer(const Container& x);
236 		explicit ring_buffer(const allocator_type& allocator);
237 		ring_buffer(const this_type& x);
238 		ring_buffer(this_type&& x);
239 		ring_buffer(this_type&& x, const allocator_type& allocator);
240 		ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator = EASTL_RING_BUFFER_DEFAULT_ALLOCATOR); // This function sets the capacity to be equal to the size of the initializer list.
241 
242 		// No destructor necessary. Default will do.
243 
244 		this_type& operator=(const this_type& x);
245 		this_type& operator=(std::initializer_list<value_type> ilist);
246 		this_type& operator=(this_type&& x);
247 
248 		template <typename InputIterator>
249 		void assign(InputIterator first, InputIterator last);
250 
251 		void swap(this_type& x);
252 
253 		iterator               begin()          EA_NOEXCEPT;
254 		const_iterator         begin()    const EA_NOEXCEPT;
255 		const_iterator         cbegin()   const EA_NOEXCEPT;
256 
257 		iterator               end()            EA_NOEXCEPT;
258 		const_iterator         end()      const EA_NOEXCEPT;
259 		const_iterator         cend()     const EA_NOEXCEPT;
260 
261 		reverse_iterator       rbegin()         EA_NOEXCEPT;
262 		const_reverse_iterator rbegin()   const EA_NOEXCEPT;
263 		const_reverse_iterator crbegin()  const EA_NOEXCEPT;
264 
265 		reverse_iterator       rend()           EA_NOEXCEPT;
266 		const_reverse_iterator rend()     const EA_NOEXCEPT;
267 		const_reverse_iterator crend()    const EA_NOEXCEPT;
268 
269 		bool                   empty()    const EA_NOEXCEPT;
270 		bool                   full()     const EA_NOEXCEPT;
271 		size_type              size()     const EA_NOEXCEPT;
272 		size_type              capacity() const EA_NOEXCEPT;
273 
274 		void                   resize(size_type n);
275 		void                   set_capacity(size_type n); // Sets the capacity to the given value, including values less than the current capacity. Adjusts the size downward if n < size, by throwing out the oldest elements in the buffer.
276 		void                   reserve(size_type n);      // Reserve a given capacity. Doesn't decrease the capacity; it only increases it (for compatibility with other containers' behavior).
277 
278 		reference       front();
279 		const_reference front() const;
280 
281 		reference       back();
282 		const_reference back() const;
283 
284 		void            push_back(const value_type& value);
285 		reference       push_back();
286 
287 		void            push_front(const value_type& value);
288 		reference       push_front();
289 
290 		void            pop_back();
291 		void            pop_front();
292 
293 		reference       operator[](size_type n);
294 		const_reference operator[](size_type n) const;
295 
296 		// To consider:
297 		// size_type read(value_type* pDestination, size_type nCount);
298 		// size_type read(iterator** pPosition1, iterator** pPosition2, size_type& nCount1, size_type& nCount2);
299 
300 		/* To do:
301 			template <class... Args>
302 			void emplace_front(Args&&... args);
303 
304 			template <class... Args>
305 			void emplace_back(Args&&... args);
306 
307 			template <class... Args>
308 			iterator emplace(const_iterator position, Args&&... args);
309 		*/
310 
311 		iterator insert(const_iterator position, const value_type& value);
312 		void     insert(const_iterator position, size_type n, const value_type& value);
313 		void     insert(const_iterator position, std::initializer_list<value_type> ilist);
314 
315 		template <typename InputIterator>
316 		void insert(const_iterator position, InputIterator first, InputIterator last);
317 
318 		iterator         erase(const_iterator position);
319 		iterator         erase(const_iterator first, const_iterator last);
320 		reverse_iterator erase(const_reverse_iterator position);
321 		reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
322 
323 		void clear();
324 
325 		container_type&       get_container();
326 		const container_type& get_container() const;
327 
328 		bool validate() const;
329 		int  validate_iterator(const_iterator i) const;
330 
331 	protected:
332 		//size_type DoGetSize(EASTL_ITC_NS::input_iterator_tag) const;
333 		//size_type DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const;
334 
335 	}; // class ring_buffer
336 
337 
338 
339 
340 	///////////////////////////////////////////////////////////////////////
341 	// ring_buffer_iterator
342 	///////////////////////////////////////////////////////////////////////
343 
344 	template <typename T, typename Pointer, typename Reference, typename Container>
ring_buffer_iterator()345 	ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator()
346 		: mpContainer(NULL), mContainerIterator()
347 	{
348 	}
349 
350 
351 	template <typename T, typename Pointer, typename Reference, typename Container>
ring_buffer_iterator(Container * pContainer,const container_iterator & containerIterator)352 	ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator)
353 		: mpContainer(pContainer), mContainerIterator(containerIterator)
354 	{
355 	}
356 
357 
358 	template <typename T, typename Pointer, typename Reference, typename Container>
ring_buffer_iterator(const iterator & x)359 	ring_buffer_iterator<T, Pointer, Reference, Container>::ring_buffer_iterator(const iterator& x)
360 		: mpContainer(x.mpContainer), mContainerIterator(x.mContainerIterator)
361 	{
362 	}
363 
364 
365 	template <typename T, typename Pointer, typename Reference, typename Container>
366 	ring_buffer_iterator<T, Pointer, Reference, Container>&
367 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator=(const iterator& x)
368 	{
369 		mpContainer        = x.mpContainer;
370 		mContainerIterator = x.mContainerIterator;
371 		return *this;
372 	}
373 
374 	template <typename T, typename Pointer, typename Reference, typename Container>
375 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::reference
376 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator*() const
377 	{
378 		return *mContainerIterator;
379 	}
380 
381 
382 	template <typename T, typename Pointer, typename Reference, typename Container>
383 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::pointer
384 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator->() const
385 	{
386 		return &*mContainerIterator;
387 	}
388 
389 
390 	template <typename T, typename Pointer, typename Reference, typename Container>
391 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
392 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator++()
393 	{
394 		if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
395 			mContainerIterator = mpContainer->begin();
396 		return *this;
397 	}
398 
399 
400 	template <typename T, typename Pointer, typename Reference, typename Container>
401 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
402 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator++(int)
403 	{
404 		const this_type temp(*this);
405 		if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end()))
406 			mContainerIterator = mpContainer->begin();
407 		return temp;
408 	}
409 
410 
411 	template <typename T, typename Pointer, typename Reference, typename Container>
412 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
413 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator--()
414 	{
415 		if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
416 			mContainerIterator = mpContainer->end();
417 		--mContainerIterator;
418 		return *this;
419 	}
420 
421 
422 	template <typename T, typename Pointer, typename Reference, typename Container>
423 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
424 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator--(int)
425 	{
426 		const this_type temp(*this);
427 		if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin()))
428 			mContainerIterator = mpContainer->end();
429 		--mContainerIterator;
430 		return temp;
431 	}
432 
433 
434 	template <typename T, typename Pointer, typename Reference, typename Container>
435 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
436 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator+=(difference_type n)
437 	{
438 		typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
439 		increment(n, IC());
440 		return *this;
441 	}
442 
443 
444 	template <typename T, typename Pointer, typename Reference, typename Container>
445 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type&
446 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator-=(difference_type n)
447 	{
448 		typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
449 		increment(-n, IC());
450 		return *this;
451 	}
452 
453 
454 	template <typename T, typename Pointer, typename Reference, typename Container>
455 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
456 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator+(difference_type n) const
457 	{
458 		return this_type(*this).operator+=(n);
459 	}
460 
461 
462 	template <typename T, typename Pointer, typename Reference, typename Container>
463 	typename ring_buffer_iterator<T, Pointer, Reference, Container>::this_type
464 	ring_buffer_iterator<T, Pointer, Reference, Container>::operator-(difference_type n) const
465 	{
466 		return this_type(*this).operator+=(-n);
467 	}
468 
469 
470 	template <typename T, typename Pointer, typename Reference, typename Container>
increment(difference_type n,EASTL_ITC_NS::input_iterator_tag)471 	void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::input_iterator_tag)
472 	{
473 		// n cannot be negative, as input iterators don't support reverse iteration.
474 		while(n-- > 0)
475 			operator++();
476 	}
477 
478 
479 	template <typename T, typename Pointer, typename Reference, typename Container>
increment(difference_type n,EASTL_ITC_NS::random_access_iterator_tag)480 	void ring_buffer_iterator<T, Pointer, Reference, Container>::increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag)
481 	{
482 		// We make the assumption here that the user is incrementing from a valid
483 		// starting position to a valid ending position. Thus *this + n yields a
484 		// valid iterator, including if n happens to be a negative value.
485 
486 		if(n >= 0)
487 		{
488 			const difference_type d = mpContainer->end() - mContainerIterator;
489 
490 			if(n < d)
491 				mContainerIterator += n;
492 			else
493 				mContainerIterator = mpContainer->begin() + (n - d);
494 		}
495 		else
496 		{
497 			// Recall that n and d here will be negative and so the logic here works as intended.
498 			const difference_type d = mpContainer->begin() - mContainerIterator;
499 
500 			if(n >= d)
501 				mContainerIterator += n;
502 			else
503 				mContainerIterator = mpContainer->end() + (n - d);
504 		}
505 	}
506 
507 
508 	// Random access iterators must support operator + and operator -.
509 	// You can only add an integer to an iterator, and you cannot add two iterators.
510 	template <typename T, typename Pointer, typename Reference, typename Container>
511 	inline ring_buffer_iterator<T, Pointer, Reference, Container>
512 	operator+(ptrdiff_t n, const ring_buffer_iterator<T, Pointer, Reference, Container>& x)
513 	{
514 		return x + n; // Implement (n + x) in terms of (x + n).
515 	}
516 
517 
518 	// You can only add an integer to an iterator, but you can subtract two iterators.
519 	template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB, typename Container>
520 	inline typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type
521 	operator-(const ring_buffer_iterator<T, PointerA, ReferenceA, Container>& a,
522 			  const ring_buffer_iterator<T, PointerB, ReferenceB, Container>& b)
523 	{
524 		typedef typename ring_buffer_iterator<T, PointerA, ReferenceA, Container>::difference_type difference_type;
525 
526 		// To do: If container_iterator is a random access iterator, then do a simple calculation.
527 		// Otherwise, we have little choice but to iterate from a to b and count as we go.
528 		// See the ring_buffer::size function for an implementation of this.
529 
530 		// Iteration implementation:
531 		difference_type d = 0;
532 
533 		for(ring_buffer_iterator<T, PointerA, ReferenceA, Container> temp(b); temp != a; ++temp)
534 			++d;
535 
536 		return d;
537 	}
538 
539 
540 	// The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
541 	// Thus we provide additional template paremeters here to support this. The defect report does not
542 	// require us to support comparisons between reverse_iterators and const_reverse_iterators.
543 	template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
544 						  typename PointerB, typename ReferenceB, typename ContainerB>
545 	inline bool operator==(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
546 						   const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
547 	{
548 		// Perhaps we should compare the container pointer as well.
549 		// However, for valid iterators this shouldn't be necessary.
550 		return a.mContainerIterator == b.mContainerIterator;
551 	}
552 
553 
554 	template <typename T, typename PointerA, typename ReferenceA, typename ContainerA,
555 						  typename PointerB, typename ReferenceB, typename ContainerB>
556 	inline bool operator!=(const ring_buffer_iterator<T, PointerA, ReferenceA, ContainerA>& a,
557 						   const ring_buffer_iterator<T, PointerB, ReferenceB, ContainerB>& b)
558 	{
559 		// Perhaps we should compare the container pointer as well.
560 		// However, for valid iterators this shouldn't be necessary.
561 		return !(a.mContainerIterator == b.mContainerIterator);
562 	}
563 
564 
565 	// We provide a version of operator!= for the case where the iterators are of the
566 	// same type. This helps prevent ambiguity errors in the presence of rel_ops.
567 	template <typename T, typename Pointer, typename Reference, typename Container>
568 	inline bool operator!=(const ring_buffer_iterator<T, Pointer, Reference, Container>& a,
569 						   const ring_buffer_iterator<T, Pointer, Reference, Container>& b)
570 	{
571 		return !(a.mContainerIterator == b.mContainerIterator);
572 	}
573 
574 
575 
576 
577 	///////////////////////////////////////////////////////////////////////
578 	// ring_buffer
579 	///////////////////////////////////////////////////////////////////////
580 
581 	template <typename T, typename Container, typename Allocator>
ring_buffer(size_type cap)582 	ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap)
583 		: c() // Default construction with default allocator for the container.
584 	{
585 		// To do: This code needs to be amended to deal with possible exceptions
586 		// that could occur during the resize call below.
587 
588 		// We add one because the element at mEnd is necessarily unused.
589 		c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
590 		mBegin = c.begin();
591 		mEnd   = mBegin;
592 		mSize  = 0;
593 	}
594 
595 
596 	template <typename T, typename Container, typename Allocator>
ring_buffer(size_type cap,const allocator_type & allocator)597 	ring_buffer<T, Container, Allocator>::ring_buffer(size_type cap, const allocator_type& allocator)
598 		: c(allocator)
599 	{
600 		// To do: This code needs to be amended to deal with possible exceptions
601 		// that could occur during the resize call below.
602 
603 		// We add one because the element at mEnd is necessarily unused.
604 		c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
605 		mBegin = c.begin();
606 		mEnd   = mBegin;
607 		mSize  = 0;
608 	}
609 
610 
611 	template <typename T, typename Container, typename Allocator>
ring_buffer(const Container & x)612 	ring_buffer<T, Container, Allocator>::ring_buffer(const Container& x)
613 		: c(x) // This copies elements from x, but unless the user is doing some tricks, the only thing that matters is that c.size() == x.size().
614 	{
615 		// To do: This code needs to be amended to deal with possible exceptions
616 		// that could occur during the resize call below.
617 		if(c.empty())
618 			c.resize(1);
619 		mBegin = c.begin();
620 		mEnd   = mBegin;
621 		mSize  = 0;
622 	}
623 
624 
625 	template <typename T, typename Container, typename Allocator>
ring_buffer(const allocator_type & allocator)626 	ring_buffer<T, Container, Allocator>::ring_buffer(const allocator_type& allocator)
627 		: c(allocator)
628 	{
629 		// To do: This code needs to be amended to deal with possible exceptions
630 		// that could occur during the resize call below.
631 
632 		// We add one because the element at mEnd is necessarily unused.
633 		c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
634 		mBegin = c.begin();
635 		mEnd   = mBegin;
636 		mSize  = 0;
637 	}
638 
639 
640 	template <typename T, typename Container, typename Allocator>
ring_buffer(const this_type & x)641 	ring_buffer<T, Container, Allocator>::ring_buffer(const this_type& x)
642 		: c(x.c)
643 	{
644 		mBegin = c.begin();
645 		mEnd   = mBegin;
646 		mSize  = x.mSize;
647 
648 		eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
649 		eastl::advance(mEnd,   eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
650 	}
651 
652 	template <typename T, typename Container, typename Allocator>
ring_buffer(this_type && x)653 	ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x)
654 	: c() // Default construction with default allocator for the container.
655 	{
656 		c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
657 		mBegin = c.begin();
658 		mEnd   = mBegin;
659 		mSize  = 0;
660 
661 		swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
662 	}
663 
664 	template <typename T, typename Container, typename Allocator>
ring_buffer(this_type && x,const allocator_type & allocator)665 	ring_buffer<T, Container, Allocator>::ring_buffer(this_type&& x, const allocator_type& allocator)
666 		: c(allocator)
667 	{
668 		c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function.
669 		mBegin = c.begin();
670 		mEnd   = mBegin;
671 		mSize  = 0;
672 
673 		if(c.get_allocator() == x.c.get_allocator())
674 			swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible.
675 		else
676 			operator=(x);
677 	}
678 
679 
680 	template <typename T, typename Container, typename Allocator>
ring_buffer(std::initializer_list<value_type> ilist,const allocator_type & allocator)681 	ring_buffer<T, Container, Allocator>::ring_buffer(std::initializer_list<value_type> ilist, const allocator_type& allocator)
682 		: c(allocator)
683 	{
684 		c.resize((eastl_size_t)ilist.size() + 1);
685 		mBegin = c.begin();
686 		mEnd   = mBegin;
687 		mSize  = 0;
688 
689 		assign(ilist.begin(), ilist.end());
690 	}
691 
692 
693 	template <typename T, typename Container, typename Allocator>
694 	typename ring_buffer<T, Container, Allocator>::this_type&
695 	ring_buffer<T, Container, Allocator>::operator=(const this_type& x)
696 	{
697 		if(&x != this)
698 		{
699 			c = x.c;
700 
701 			mBegin = c.begin();
702 			mEnd   = mBegin;
703 			mSize  = x.mSize;
704 
705 			eastl::advance(mBegin, eastl::distance(const_cast<this_type&>(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound.
706 			eastl::advance(mEnd,   eastl::distance(const_cast<this_type&>(x).c.begin(), x.mEnd));
707 		}
708 
709 		return *this;
710 	}
711 
712 
713 	template <typename T, typename Container, typename Allocator>
714 	typename ring_buffer<T, Container, Allocator>::this_type&
715 	ring_buffer<T, Container, Allocator>::operator=(this_type&& x)
716 	{
717 		swap(x);
718 		return *this;
719 	}
720 
721 
722 	template <typename T, typename Container, typename Allocator>
723 	typename ring_buffer<T, Container, Allocator>::this_type&
724 	ring_buffer<T, Container, Allocator>::operator=(std::initializer_list<value_type> ilist)
725 	{
726 		assign(ilist.begin(), ilist.end());
727 		return *this;
728 	}
729 
730 
731 	template <typename T, typename Container, typename Allocator>
732 	template <typename InputIterator>
assign(InputIterator first,InputIterator last)733 	void ring_buffer<T, Container, Allocator>::assign(InputIterator first, InputIterator last)
734 	{
735 		// To consider: We can make specializations of this for pointer-based
736 		// iterators to PODs and turn the action into a memcpy.
737 		clear();
738 
739 		for(; first != last; ++first)
740 			push_back(*first);
741 	}
742 
743 
744 	template <typename T, typename Container, typename Allocator>
swap(this_type & x)745 	void ring_buffer<T, Container, Allocator>::swap(this_type& x)
746 	{
747 		if(&x != this)
748 		{
749 			const difference_type dBegin  = eastl::distance(c.begin(), mBegin); // We can do a simple distance algorithm here, as there will be no wraparound.
750 			const difference_type dEnd    = eastl::distance(c.begin(), mEnd);
751 
752 			const difference_type dxBegin = eastl::distance(x.c.begin(), x.mBegin);
753 			const difference_type dxEnd   = eastl::distance(x.c.begin(), x.mEnd);
754 
755 			eastl::swap(c, x.c);
756 			eastl::swap(mSize, x.mSize);
757 
758 			mBegin = c.begin();
759 			eastl::advance(mBegin, dxBegin); // We can do a simple advance algorithm here, as there will be no wraparound.
760 
761 			mEnd = c.begin();
762 			eastl::advance(mEnd, dxEnd);
763 
764 			x.mBegin = x.c.begin();
765 			eastl::advance(x.mBegin, dBegin);
766 
767 			x.mEnd = x.c.begin();
768 			eastl::advance(x.mEnd, dEnd);
769 		}
770 	}
771 
772 
773 	template <typename T, typename Container, typename Allocator>
774 	typename ring_buffer<T, Container, Allocator>::iterator
begin()775 	ring_buffer<T, Container, Allocator>::begin() EA_NOEXCEPT
776 	{
777 		return iterator(&c, mBegin);
778 	}
779 
780 
781 	template <typename T, typename Container, typename Allocator>
782 	typename ring_buffer<T, Container, Allocator>::const_iterator
begin()783 	ring_buffer<T, Container, Allocator>::begin() const EA_NOEXCEPT
784 	{
785 		return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
786 	}
787 
788 
789 	template <typename T, typename Container, typename Allocator>
790 	typename ring_buffer<T, Container, Allocator>::const_iterator
cbegin()791 	ring_buffer<T, Container, Allocator>::cbegin() const EA_NOEXCEPT
792 	{
793 		return const_iterator(const_cast<Container*>(&c), mBegin); // We trust that the const_iterator will respect const-ness.
794 	}
795 
796 
797 	template <typename T, typename Container, typename Allocator>
798 	typename ring_buffer<T, Container, Allocator>::iterator
end()799 	ring_buffer<T, Container, Allocator>::end() EA_NOEXCEPT
800 	{
801 		return iterator(&c, mEnd);
802 	}
803 
804 
805 	template <typename T, typename Container, typename Allocator>
806 	typename ring_buffer<T, Container, Allocator>::const_iterator
end()807 	ring_buffer<T, Container, Allocator>::end() const EA_NOEXCEPT
808 	{
809 		return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
810 	}
811 
812 
813 	template <typename T, typename Container, typename Allocator>
814 	typename ring_buffer<T, Container, Allocator>::const_iterator
cend()815 	ring_buffer<T, Container, Allocator>::cend() const EA_NOEXCEPT
816 	{
817 		return const_iterator(const_cast<Container*>(&c), mEnd); // We trust that the const_iterator will respect const-ness.
818 	}
819 
820 
821 	template <typename T, typename Container, typename Allocator>
822 	typename ring_buffer<T, Container, Allocator>::reverse_iterator
rbegin()823 	ring_buffer<T, Container, Allocator>::rbegin() EA_NOEXCEPT
824 	{
825 		return reverse_iterator(iterator(&c, mEnd));
826 	}
827 
828 
829 	template <typename T, typename Container, typename Allocator>
830 	typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
rbegin()831 	ring_buffer<T, Container, Allocator>::rbegin() const EA_NOEXCEPT
832 	{
833 		return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
834 	}
835 
836 
837 	template <typename T, typename Container, typename Allocator>
838 	typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
crbegin()839 	ring_buffer<T, Container, Allocator>::crbegin() const EA_NOEXCEPT
840 	{
841 		return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mEnd));
842 	}
843 
844 
845 	template <typename T, typename Container, typename Allocator>
846 	typename ring_buffer<T, Container, Allocator>::reverse_iterator
rend()847 	ring_buffer<T, Container, Allocator>::rend() EA_NOEXCEPT
848 	{
849 		return reverse_iterator(iterator(&c, mBegin));
850 	}
851 
852 
853 	template <typename T, typename Container, typename Allocator>
854 	typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
rend()855 	ring_buffer<T, Container, Allocator>::rend() const EA_NOEXCEPT
856 	{
857 		return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
858 	}
859 
860 
861 	template <typename T, typename Container, typename Allocator>
862 	typename ring_buffer<T, Container, Allocator>::const_reverse_iterator
crend()863 	ring_buffer<T, Container, Allocator>::crend() const EA_NOEXCEPT
864 	{
865 		return const_reverse_iterator(const_iterator(const_cast<Container*>(&c), mBegin));
866 	}
867 
868 
869 	template <typename T, typename Container, typename Allocator>
empty()870 	bool ring_buffer<T, Container, Allocator>::empty() const EA_NOEXCEPT
871 	{
872 		return mBegin == mEnd;
873 	}
874 
875 
876 	template <typename T, typename Container, typename Allocator>
full()877 	bool ring_buffer<T, Container, Allocator>::full() const EA_NOEXCEPT
878 	{
879 		// Implementation that relies on c.size() being a fast operation:
880 		// return mSize == (c.size() - 1); // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
881 
882 		// Version that has constant speed guarantees, but is still pretty fast.
883 		const_iterator afterEnd(end());
884 		++afterEnd;
885 		return afterEnd.mContainerIterator == mBegin;
886 	}
887 
888 
889 	template <typename T, typename Container, typename Allocator>
890 	typename ring_buffer<T, Container, Allocator>::size_type
size()891 	ring_buffer<T, Container, Allocator>::size() const EA_NOEXCEPT
892 	{
893 		return mSize;
894 
895 		// Alternatives:
896 		// return eastl::distance(begin(), end());
897 		// return end() - begin(); // This is more direct than using distance().
898 		//typedef typename eastl::iterator_traits<container_iterator>::iterator_category IC;
899 		//return DoGetSize(IC()); // This is more direct than using iterator math.
900 	}
901 
902 
903 	/*
904 	template <typename T, typename Container, typename Allocator>
905 	typename ring_buffer<T, Container, Allocator>::size_type
906 	ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::input_iterator_tag) const
907 	{
908 		// We could alternatively just use eastl::distance() here, but we happen to
909 		// know that such code would boil down to what we have here, and we might
910 		// as well remove function calls where possible.
911 		difference_type d = 0;
912 
913 		for(const_iterator temp(begin()), tempEnd(end()); temp != tempEnd; ++temp)
914 			++d;
915 
916 		return (size_type)d;
917 	}
918 	*/
919 
920 	/*
921 	template <typename T, typename Container, typename Allocator>
922 	typename ring_buffer<T, Container, Allocator>::size_type
923 	ring_buffer<T, Container, Allocator>::DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const
924 	{
925 		// A simpler but less efficient implementation fo this function would be:
926 		//     return eastl::distance(mBegin, mEnd);
927 		//
928 		// The calculation of distance here takes advantage of the fact that random
929 		// access iterators' distances can be calculated by simple pointer calculation.
930 		// Thus the code below boils down to a few subtractions when using a vector,
931 		// string, or array as the Container type.
932 		//
933 		const difference_type dBegin = eastl::distance(const_cast<Container&>(c).begin(), mBegin); // const_cast here solves a little compiler
934 		const difference_type dEnd   = eastl::distance(const_cast<Container&>(c).begin(), mEnd);   // argument matching problem.
935 
936 		if(dEnd >= dBegin)
937 			return dEnd - dBegin;
938 
939 		return c.size() - (dBegin - dEnd);
940 	}
941 	*/
942 
943 
944 	namespace Internal
945 	{
946 		///////////////////////////////////////////////////////////////
947 		// has_overflow_allocator
948 		//
949 		// returns true_type when the specified container type is an
950 		// eastl::fixed_*  container and therefore has an overflow
951 		// allocator type.
952 		//
953 		template <typename T, typename = void>
954 		struct has_overflow_allocator : false_type {};
955 
956 		template <typename T>
957 		struct has_overflow_allocator<T, void_t<decltype(declval<T>().get_overflow_allocator())>> : true_type {};
958 
959 
960 		///////////////////////////////////////////////////////////////
961 		// GetFixedContainerCtorAllocator
962 		//
963 		// eastl::fixed_* containers are only constructible via their
964 		// overflow allocator type. This helper select the appropriate
965 		// allocator from the specified container.
966 		//
967 		template <typename Container, bool UseOverflowAllocator = has_overflow_allocator<Container>()()>
968 		struct GetFixedContainerCtorAllocator
969 		{
970 			auto& operator()(Container& c) { return c.get_overflow_allocator(); }
971 		};
972 
973 		template <typename Container>
974 		struct GetFixedContainerCtorAllocator<Container, false>
975 		{
976 			auto& operator()(Container& c) { return c.get_allocator(); }
977 		};
978 	} // namespace Internal
979 
980 
981 	///////////////////////////////////////////////////////////////
982 	// ContainerTemporary
983 	//
984 	// Helper type which prevents utilizing excessive stack space
985 	// when creating temporaries when swapping/copying the underlying
986 	// ring_buffer container type.
987 	//
988 	template <typename Container, bool UseHeapTemporary = (sizeof(Container) >= EASTL_MAX_STACK_USAGE)>
989 	struct ContainerTemporary
990 	{
991 		Container mContainer;
992 
993 		ContainerTemporary(Container& parentContainer)
994 		    : mContainer(Internal::GetFixedContainerCtorAllocator<Container>{}(parentContainer))
995 		{
996 		}
997 
998 		Container& get() { return mContainer; }
999 	};
1000 
1001 	template <typename Container>
1002 	struct ContainerTemporary<Container, true>
1003 	{
1004 		typename Container::allocator_type* mAllocator;
1005 		Container* mContainer;
1006 
1007 		ContainerTemporary(Container& parentContainer)
1008 		    : mAllocator(&parentContainer.get_allocator())
1009 		    , mContainer(new (mAllocator->allocate(sizeof(Container))) Container)
1010 		{
1011 		}
1012 
1013 		~ContainerTemporary()
1014 		{
1015 			mContainer->~Container();
1016 			mAllocator->deallocate(mContainer, sizeof(Container));
1017 		}
1018 
1019 		Container& get() { return *mContainer; }
1020 	};
1021 
1022 
1023 	template <typename T, typename Container, typename Allocator>
1024 	void ring_buffer<T, Container, Allocator>::resize(size_type n)
1025 	{
1026 		// Note that if n > size(), we just move the end position out to
1027 		// the begin + n, with the data being the old end and the new end
1028 		// being stale values from the past. This is by design, as the concept
1029 		// of arbitrarily resizing a ring buffer like this is currently deemed
1030 		// to be vague in what it intends to do. We can only assume that the
1031 		// user knows what he is doing and will deal with the stale values.
1032 		EASTL_ASSERT(c.size() >= 1);
1033 		const size_type cap = (c.size() - 1);
1034 
1035 		mSize = n;
1036 
1037 		if(n > cap) // If we need to grow in capacity...
1038 		{
1039 			// Given that a growing operation will always result in memory allocation,
1040 			// we currently implement this function via the usage of a temp container.
1041 			// This makes for a simple implementation, but in some cases it is less
1042 			// efficient. In particular, if the container is a node-based container like
1043 			// a (linked) list, this function would be faster if we simply added nodes
1044 			// to ourself. We would do this by inserting the nodes to be after end()
1045 			// and adjusting the begin() position if it was after end().
1046 
1047 			// To do: This code needs to be amended to deal with possible exceptions
1048 			// that could occur during the resize call below.
1049 
1050 			ContainerTemporary<Container> cTemp(c);
1051 			cTemp.get().resize(n + 1);
1052 			eastl::copy(begin(), end(), cTemp.get().begin());
1053 			eastl::swap(c, cTemp.get());
1054 
1055 			mBegin = c.begin();
1056 			mEnd   = mBegin;
1057 			eastl::advance(mEnd, n); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
1058 		}
1059 		else // We could do a check here for n != size(), but that would be costly and people don't usually resize things to their same size.
1060 		{
1061 			mEnd = mBegin;
1062 
1063 			// eastl::advance(mEnd, n); // We *cannot* use this because there may be wraparound involved.
1064 
1065 			// To consider: Possibly we should implement some more detailed logic to optimize the code here.
1066 			// We'd need to do different behaviour dending on whether the container iterator type is a
1067 			// random access iterator or otherwise.
1068 
1069 			while(n--)
1070 			{
1071 				if(EASTL_UNLIKELY(++mEnd == c.end()))
1072 					mEnd = c.begin();
1073 			}
1074 		}
1075 	}
1076 
1077 
1078 	template <typename T, typename Container, typename Allocator>
1079 	typename ring_buffer<T, Container, Allocator>::size_type
1080 	ring_buffer<T, Container, Allocator>::capacity() const EA_NOEXCEPT
1081 	{
1082 		EASTL_ASSERT(c.size() >= 1); // This is required because even an empty ring_buffer has one unused termination element, somewhat like a \0 at the end of a C string.
1083 
1084 		return (c.size() - 1); // Need to subtract one because the position at mEnd is unused.
1085 	}
1086 
1087 
1088 	template <typename T, typename Container, typename Allocator>
1089 	void ring_buffer<T, Container, Allocator>::set_capacity(size_type n)
1090 	{
1091 		const size_type capacity = (c.size() - 1);
1092 
1093 		if(n != capacity)    // If we need to change capacity...
1094 		{
1095 			ContainerTemporary<Container> cTemp(c);
1096 			cTemp.get().resize(n + 1);
1097 
1098 			iterator itCopyBegin = begin();
1099 
1100 			if(n < mSize) // If we are shrinking the capacity, to less than our size...
1101 			{
1102 				eastl::advance(itCopyBegin, mSize - n);
1103 				mSize = n;
1104 			}
1105 
1106 			eastl::copy(itCopyBegin, end(), cTemp.get().begin());  // The begin-end range may in fact be larger than n, in which case values will be overwritten.
1107 			eastl::swap(c, cTemp.get());
1108 
1109 			mBegin = c.begin();
1110 			mEnd   = mBegin;
1111 			eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
1112 		}
1113 	}
1114 
1115 
1116 	template <typename T, typename Container, typename Allocator>
1117 	void ring_buffer<T, Container, Allocator>::reserve(size_type n)
1118 	{
1119 		// We follow the pattern of vector and only do something if n > capacity.
1120 		EASTL_ASSERT(c.size() >= 1);
1121 
1122 		if(n > (c.size() - 1))    // If we need to grow in capacity... // (c.size() - 1) == capacity(); we are attempting to reduce function calls.
1123 		{
1124 			ContainerTemporary<Container> cTemp(c);
1125 			cTemp.get().resize(n + 1);
1126 			eastl::copy(begin(), end(), cTemp.get().begin());
1127 			eastl::swap(c, cTemp.get());
1128 
1129 			mBegin = c.begin();
1130 			mEnd   = mBegin;
1131 			eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around.
1132 		}
1133 	}
1134 
1135 
1136 	template <typename T, typename Container, typename Allocator>
1137 	typename ring_buffer<T, Container, Allocator>::reference
1138 	ring_buffer<T, Container, Allocator>::front()
1139 	{
1140 		return *mBegin;
1141 	}
1142 
1143 
1144 	template <typename T, typename Container, typename Allocator>
1145 	typename ring_buffer<T, Container, Allocator>::const_reference
1146 	ring_buffer<T, Container, Allocator>::front() const
1147 	{
1148 		return *mBegin;
1149 	}
1150 
1151 
1152 	template <typename T, typename Container, typename Allocator>
1153 	typename ring_buffer<T, Container, Allocator>::reference
1154 	ring_buffer<T, Container, Allocator>::back()
1155 	{
1156 		// return *(end() - 1); // Can't use this because not all iterators support operator-.
1157 
1158 		iterator temp(end());   // To do: Find a way to construct this temporary in the return statement.
1159 		return *(--temp);       // We can do it by making all our containers' iterators support operator-.
1160 	}
1161 
1162 
1163 	template <typename T, typename Container, typename Allocator>
1164 	typename ring_buffer<T, Container, Allocator>::const_reference
1165 	ring_buffer<T, Container, Allocator>::back() const
1166 	{
1167 		// return *(end() - 1); // Can't use this because not all iterators support operator-.
1168 
1169 		const_iterator temp(end()); // To do: Find a way to construct this temporary in the return statement.
1170 		return *(--temp);           // We can do it by making all our containers' iterators support operator-.
1171 	}
1172 
1173 
1174 	/// A push_back operation on a ring buffer assigns the new value to end.
1175 	/// If there is no more space in the buffer, this will result in begin
1176 	/// being overwritten and the begin position being moved foward one position.
1177 	template <typename T, typename Container, typename Allocator>
1178 	void ring_buffer<T, Container, Allocator>::push_back(const value_type& value)
1179 	{
1180 		*mEnd = value;
1181 
1182 		if(++mEnd == c.end())
1183 			mEnd = c.begin();
1184 
1185 		if(mEnd == mBegin)
1186 		{
1187 			if(++mBegin == c.end())
1188 				mBegin = c.begin();
1189 		}
1190 		else
1191 			++mSize;
1192 	}
1193 
1194 
1195 	/// A push_back operation on a ring buffer assigns the new value to end.
1196 	/// If there is no more space in the buffer, this will result in begin
1197 	/// being overwritten and the begin position being moved foward one position.
1198 	template <typename T, typename Container, typename Allocator>
1199 	typename ring_buffer<T, Container, Allocator>::reference
1200 	ring_buffer<T, Container, Allocator>::push_back()
1201 	{
1202 		// We don't do the following assignment, as the value at mEnd is already constructed;
1203 		// it is merely possibly not default-constructed. However, the spirit of push_back
1204 		// is that the user intends to do an assignment or data modification after the
1205 		// push_back call. The user can always execute *back() = value_type() if he wants.
1206 		//*mEnd = value_type();
1207 
1208 		if(++mEnd == c.end())
1209 			mEnd = c.begin();
1210 
1211 		if(mEnd == mBegin)
1212 		{
1213 			if(++mBegin == c.end())
1214 				mBegin = c.begin();
1215 		}
1216 		else
1217 			++mSize;
1218 
1219 		return back();
1220 	}
1221 
1222 
1223 	template <typename T, typename Container, typename Allocator>
1224 	void ring_buffer<T, Container, Allocator>::pop_back()
1225 	{
1226 		EASTL_ASSERT(mEnd != mBegin); // We assume that size() > 0 and thus that there is something to pop.
1227 
1228 		if(EASTL_UNLIKELY(mEnd == c.begin()))
1229 			mEnd = c.end();
1230 		--mEnd;
1231 		--mSize;
1232 	}
1233 
1234 
1235 	template <typename T, typename Container, typename Allocator>
1236 	void ring_buffer<T, Container, Allocator>::push_front(const value_type& value)
1237 	{
1238 		if(EASTL_UNLIKELY(mBegin == c.begin()))
1239 			mBegin = c.end();
1240 
1241 		if(--mBegin == mEnd)
1242 		{
1243 			if(EASTL_UNLIKELY(mEnd == c.begin()))
1244 				mEnd = c.end();
1245 			--mEnd;
1246 		}
1247 		else
1248 			++mSize;
1249 
1250 		*mBegin = value;
1251 	}
1252 
1253 
1254 	template <typename T, typename Container, typename Allocator>
1255 	typename ring_buffer<T, Container, Allocator>::reference
1256 	ring_buffer<T, Container, Allocator>::push_front()
1257 	{
1258 		if(EASTL_UNLIKELY(mBegin == c.begin()))
1259 			mBegin = c.end();
1260 
1261 		if(--mBegin == mEnd)
1262 		{
1263 			if(EASTL_UNLIKELY(mEnd == c.begin()))
1264 				mEnd = c.end();
1265 			--mEnd;
1266 		}
1267 		else
1268 			++mSize;
1269 
1270 		// See comments above in push_back for why we don't execute this:
1271 		// *mBegin = value_type();
1272 
1273 		return *mBegin; // Same as return front();
1274 	}
1275 
1276 
1277 	template <typename T, typename Container, typename Allocator>
1278 	void ring_buffer<T, Container, Allocator>::pop_front()
1279 	{
1280 		EASTL_ASSERT(mBegin != mEnd); // We assume that mEnd > mBegin and thus that there is something to pop.
1281 
1282 		if(++mBegin == c.end())
1283 			mBegin = c.begin();
1284 		--mSize;
1285 	}
1286 
1287 
1288 	template <typename T, typename Container, typename Allocator>
1289 	typename ring_buffer<T, Container, Allocator>::reference
1290 	ring_buffer<T, Container, Allocator>::operator[](size_type n)
1291 	{
1292 		// return *(begin() + n); // Can't use this because not all iterators support operator+.
1293 
1294 		// This should compile to code that is nearly as efficient as that above.
1295 		// The primary difference is the possible generation of a temporary in this case.
1296 		iterator temp(begin());
1297 		eastl::advance(temp, n);
1298 		return *(temp.mContainerIterator);
1299 	}
1300 
1301 
1302 	template <typename T, typename Container, typename Allocator>
1303 	typename ring_buffer<T, Container, Allocator>::const_reference
1304 	ring_buffer<T, Container, Allocator>::operator[](size_type n) const
1305 	{
1306 		// return *(begin() + n); // Can't use this because not all iterators support operator+.
1307 
1308 		// This should compile to code that is nearly as efficient as that above.
1309 		// The primary difference is the possible generation of a temporary in this case.
1310 		const_iterator temp(begin());
1311 		eastl::advance(temp, n);
1312 		return *(temp.mContainerIterator);
1313 	}
1314 
1315 
1316 	template <typename T, typename Container, typename Allocator>
1317 	typename ring_buffer<T, Container, Allocator>::iterator
1318 	ring_buffer<T, Container, Allocator>::insert(const_iterator position, const value_type& value)
1319 	{
1320 		// To consider: It would be faster if we could tell that position was in the first
1321 		// half of the container and instead of moving things after the position back,
1322 		// we could move things before the position forward.
1323 
1324 		iterator afterEnd(end());
1325 		iterator beforeEnd(afterEnd);
1326 
1327 		++afterEnd;
1328 
1329 		if(afterEnd.mContainerIterator == mBegin) // If we are at full capacity...
1330 			--beforeEnd;
1331 		else
1332 			push_back();
1333 
1334 		iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
1335 		eastl::copy_backward(itPosition, beforeEnd, end());
1336 		*itPosition = value;
1337 
1338 		return itPosition;
1339 	}
1340 
1341 
1342 	template <typename T, typename Container, typename Allocator>
1343 	void ring_buffer<T, Container, Allocator>::insert(const_iterator position, size_type n, const value_type& value)
1344 	{
1345 		// To do: This can be improved with a smarter version. However,
1346 		// this is a little tricky because we need to deal with the case
1347 		// whereby n is greater than the size of the container itself.
1348 		while(n--)
1349 			insert(position, value);
1350 	}
1351 
1352 
1353 	template <typename T, typename Container, typename Allocator>
1354 	void ring_buffer<T, Container, Allocator>::insert(const_iterator position, std::initializer_list<value_type> ilist)
1355 	{
1356 		insert(position, ilist.begin(), ilist.end());
1357 	}
1358 
1359 
1360 	template <typename T, typename Container, typename Allocator>
1361 	template <typename InputIterator>
1362 	void ring_buffer<T, Container, Allocator>::insert(const_iterator position, InputIterator first, InputIterator last)
1363 	{
1364 		// To do: This can possibly be improved with a smarter version.
1365 		// However, this can be tricky if distance(first, last) is greater
1366 		// than the size of the container itself.
1367 		for(; first != last; ++first, ++position)
1368 			insert(position, *first);
1369 	}
1370 
1371 
1372 	template <typename T, typename Container, typename Allocator>
1373 	typename ring_buffer<T, Container, Allocator>::iterator
1374 	ring_buffer<T, Container, Allocator>::erase(const_iterator position)
1375 	{
1376 		iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator.
1377 		iterator iNext(itPosition);
1378 
1379 		eastl::copy(++iNext, end(), itPosition);
1380 		pop_back();
1381 
1382 		return itPosition;
1383 	}
1384 
1385 
1386 	template <typename T, typename Container, typename Allocator>
1387 	typename ring_buffer<T, Container, Allocator>::iterator
1388 	ring_buffer<T, Container, Allocator>::erase(const_iterator first, const_iterator last)
1389 	{
1390 		iterator itFirst(first.mpContainer, first.mContainerIterator); // We merely copy from const_iterator to iterator.
1391 		iterator itLast(last.mpContainer, last.mContainerIterator);
1392 
1393 		typename iterator::difference_type d = eastl::distance(itFirst, itLast);
1394 
1395 		eastl::copy(itLast, end(), itFirst);
1396 
1397 		while(d--)      // To do: improve this implementation.
1398 			pop_back();
1399 
1400 		return itFirst;
1401 	}
1402 
1403 
1404 	template <typename T, typename Container, typename Allocator>
1405 	typename ring_buffer<T, Container, Allocator>::reverse_iterator
1406 	ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator position)
1407 	{
1408 		return reverse_iterator(erase((++position).base()));
1409 	}
1410 
1411 
1412 	template <typename T, typename Container, typename Allocator>
1413 	typename ring_buffer<T, Container, Allocator>::reverse_iterator
1414 	ring_buffer<T, Container, Allocator>::erase(const_reverse_iterator first, const_reverse_iterator last)
1415 	{
1416 		// Version which erases in order from first to last.
1417 		// difference_type i(first.base() - last.base());
1418 		// while(i--)
1419 		//     first = erase(first);
1420 		// return first;
1421 
1422 		// Version which erases in order from last to first, but is slightly more efficient:
1423 		return reverse_iterator(erase((++last).base(), (++first).base()));
1424 	}
1425 
1426 
1427 	template <typename T, typename Container, typename Allocator>
1428 	void ring_buffer<T, Container, Allocator>::clear()
1429 	{
1430 		// Don't clear the container; we use its valid data for our elements.
1431 		mBegin = c.begin();
1432 		mEnd   = c.begin();
1433 		mSize  = 0;
1434 	}
1435 
1436 
1437 	template <typename T, typename Container, typename Allocator>
1438 	typename ring_buffer<T, Container, Allocator>::container_type&
1439 	ring_buffer<T, Container, Allocator>::get_container()
1440 	{
1441 		return c;
1442 	}
1443 
1444 
1445 	template <typename T, typename Container, typename Allocator>
1446 	const typename ring_buffer<T, Container, Allocator>::container_type&
1447 	ring_buffer<T, Container, Allocator>::get_container() const
1448 	{
1449 		return c;
1450 	}
1451 
1452 
1453 	template <typename T, typename Container, typename Allocator>
1454 	inline bool ring_buffer<T, Container, Allocator>::validate() const
1455 	{
1456 		if(!c.validate())  // This requires that the container implement the validate function. That pretty much
1457 			return false;  // means that the container is an EASTL container and not a std STL container.
1458 
1459 		if(c.empty())      // c must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element.
1460 			return false;
1461 
1462 		if(size() > capacity())
1463 			return false;
1464 
1465 		if((validate_iterator(begin()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
1466 			return false;
1467 
1468 		if((validate_iterator(end()) & (isf_valid | isf_current)) != (isf_valid | isf_current))
1469 			return false;
1470 
1471 		// Verify that the size calculation is consistent.
1472 		size_type n = 0;
1473 		for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
1474 			++n;
1475 		if(n != mSize)
1476 			return false;
1477 
1478 		return true;
1479 	}
1480 
1481 
1482 	template <typename T, typename Container, typename Allocator>
1483 	inline int ring_buffer<T, Container, Allocator>::validate_iterator(const_iterator i) const
1484 	{
1485 		// To do: Replace this with a more efficient implementation if possible.
1486 
1487 		for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
1488 		{
1489 			if(temp == i)
1490 				return (isf_valid | isf_current | isf_can_dereference);
1491 		}
1492 
1493 		if(i == end())
1494 			return (isf_valid | isf_current);
1495 
1496 		return isf_none;
1497 	}
1498 
1499 
1500 
1501 	///////////////////////////////////////////////////////////////////////
1502 	// global operators
1503 	///////////////////////////////////////////////////////////////////////
1504 
1505 	template <typename T, typename Container, typename Allocator>
1506 	inline bool operator==(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1507 	{
1508 		return (a.size() == b.size()) && (a.c == b.c);
1509 	}
1510 
1511 
1512 	template <typename T, typename Container, typename Allocator>
1513 	inline bool operator<(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1514 	{
1515 		const typename ring_buffer<T, Container, Allocator>::size_type sizeA = a.size();
1516 		const typename ring_buffer<T, Container, Allocator>::size_type sizeB = b.size();
1517 
1518 		if(sizeA == sizeB)
1519 			return (a.c < b.c);
1520 		return sizeA < sizeB;
1521 	}
1522 
1523 
1524 	template <typename T, typename Container, typename Allocator>
1525 	inline bool operator!=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1526 	{
1527 		return !(a == b);
1528 	}
1529 
1530 
1531 	template <typename T, typename Container, typename Allocator>
1532 	inline bool operator>(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1533 	{
1534 		return (b < a);
1535 	}
1536 
1537 
1538 	template <typename T, typename Container, typename Allocator>
1539 	inline bool operator<=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1540 	{
1541 		return !(b < a);
1542 	}
1543 
1544 
1545 	template <typename T, typename Container, typename Allocator>
1546 	inline bool operator>=(const ring_buffer<T, Container, Allocator>& a, const ring_buffer<T, Container, Allocator>& b)
1547 	{
1548 		return !(a < b);
1549 	}
1550 
1551 
1552 	template <typename T, typename Container, typename Allocator>
1553 	inline void swap(ring_buffer<T, Container, Allocator>& a, ring_buffer<T, Container, Allocator>& b)
1554 	{
1555 		a.swap(b);
1556 	}
1557 
1558 
1559 } // namespace eastl
1560 
1561 
1562 #endif // Header include guard
1563 
1564 
1565 
1566 
1567 
1568 
1569 
1570 
1571 
1572 
1573 
1574 
1575 
1576 
1577 
1578 
1579 
1580 
1581 
1582