1 //  Copyright (C) 2008-2013 Tim Blechmann
2 //
3 //  Distributed under the Boost Software License, Version 1.0. (See
4 //  accompanying file LICENSE_1_0.txt or copy at
5 //  http://www.boost.org/LICENSE_1_0.txt)
6 
7 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
9 
10 #include <boost/assert.hpp>
11 #include <boost/checked_delete.hpp>
12 #include <boost/core/no_exceptions_support.hpp>
13 #include <boost/integer_traits.hpp>
14 #include <boost/static_assert.hpp>
15 #include <boost/tuple/tuple.hpp>
16 #include <boost/type_traits/is_copy_constructible.hpp>
17 
18 #include <boost/lockfree/detail/allocator_rebind_helper.hpp>
19 #include <boost/lockfree/detail/atomic.hpp>
20 #include <boost/lockfree/detail/copy_payload.hpp>
21 #include <boost/lockfree/detail/freelist.hpp>
22 #include <boost/lockfree/detail/parameter.hpp>
23 #include <boost/lockfree/detail/tagged_ptr.hpp>
24 
25 #include <boost/lockfree/lockfree_forward.hpp>
26 
27 #ifdef BOOST_HAS_PRAGMA_ONCE
28 #pragma once
29 #endif
30 
31 namespace boost    {
32 namespace lockfree {
33 namespace detail   {
34 
35 typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
36                               boost::parameter::optional<tag::capacity>
37                              > stack_signature;
38 
39 }
40 
41 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
42  *  construction/destruction has to be synchronized. It uses a freelist for memory management,
43  *  freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
44  *
45  *  \b Policies:
46  *
47  *  - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
48  *    Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
49  *    If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
50  *    by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
51  *    type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
52  *    to achieve lock-freedom.
53  *
54  *  - \c boost::lockfree::capacity<>, optional <br>
55  *    If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
56  *    It this option implies \c fixed_sized<true>
57  *
58  *  - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
59  *    Specifies the allocator that is used for the internal freelist
60  *
61  *  \b Requirements:
62  *  - T must have a copy constructor
63  * */
64 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
65 template <typename T, class A0, class A1, class A2>
66 #else
67 template <typename T, typename ...Options>
68 #endif
69 class stack
70 {
71 private:
72 #ifndef BOOST_DOXYGEN_INVOKED
73     BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
74 
75 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
76     typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
77 #else
78     typedef typename detail::stack_signature::bind<Options...>::type bound_args;
79 #endif
80 
81     static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
82     static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
83     static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
84     static const bool node_based = !(has_capacity || fixed_sized);
85     static const bool compile_time_sized = has_capacity;
86 
87     struct node
88     {
nodeboost::lockfree::stack::node89         node(T const & val):
90             v(val)
91         {}
92 
93         typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
94         handle_t next;
95         const T v;
96     };
97 
98     typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
99     typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
100     typedef typename pool_t::tagged_node_handle tagged_node_handle;
101 
102     // check compile-time capacity
103     BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
104                                    mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
105                                    mpl::true_
106                                   >::type::value));
107 
108     struct implementation_defined
109     {
110         typedef node_allocator allocator;
111         typedef std::size_t size_type;
112     };
113 
114 #endif
115 
116     BOOST_DELETED_FUNCTION(stack(stack const&))
117     BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
118 
119 public:
120     typedef T value_type;
121     typedef typename implementation_defined::allocator allocator;
122     typedef typename implementation_defined::size_type size_type;
123 
124     /**
125      * \return true, if implementation is lock-free.
126      *
127      * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
128      *          On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
129      *          there is no possibility to provide a completely accurate implementation, because one would need to test
130      *          every internal node, which is impossible if further nodes will be allocated from the operating system.
131      *
132      * */
is_lock_free(void) const133     bool is_lock_free (void) const
134     {
135         return tos.is_lock_free() && pool.is_lock_free();
136     }
137 
138     //! Construct stack
139     // @{
stack(void)140     stack(void):
141         pool(node_allocator(), capacity)
142     {
143         BOOST_ASSERT(has_capacity);
144         initialize();
145     }
146 
147     template <typename U>
stack(typename detail::allocator_rebind_helper<node_allocator,U>::type const & alloc)148     explicit stack(typename detail::allocator_rebind_helper<node_allocator, U>::type const & alloc):
149         pool(alloc, capacity)
150     {
151         BOOST_STATIC_ASSERT(has_capacity);
152         initialize();
153     }
154 
stack(allocator const & alloc)155     explicit stack(allocator const & alloc):
156         pool(alloc, capacity)
157     {
158         BOOST_ASSERT(has_capacity);
159         initialize();
160     }
161     // @}
162 
163     //! Construct stack, allocate n nodes for the freelist.
164     // @{
stack(size_type n)165     explicit stack(size_type n):
166         pool(node_allocator(), n)
167     {
168         BOOST_ASSERT(!has_capacity);
169         initialize();
170     }
171 
172     template <typename U>
stack(size_type n,typename detail::allocator_rebind_helper<node_allocator,U>::type const & alloc)173     stack(size_type n, typename detail::allocator_rebind_helper<node_allocator, U>::type const & alloc):
174         pool(alloc, n)
175     {
176         BOOST_STATIC_ASSERT(!has_capacity);
177         initialize();
178     }
179     // @}
180 
181     /** Allocate n nodes for freelist
182      *
183      *  \pre  only valid if no capacity<> argument given
184      *  \note thread-safe, may block if memory allocator blocks
185      *
186      * */
reserve(size_type n)187     void reserve(size_type n)
188     {
189         BOOST_STATIC_ASSERT(!has_capacity);
190         pool.template reserve<true>(n);
191     }
192 
193     /** Allocate n nodes for freelist
194      *
195      *  \pre  only valid if no capacity<> argument given
196      *  \note not thread-safe, may block if memory allocator blocks
197      *
198      * */
reserve_unsafe(size_type n)199     void reserve_unsafe(size_type n)
200     {
201         BOOST_STATIC_ASSERT(!has_capacity);
202         pool.template reserve<false>(n);
203     }
204 
205     /** Destroys stack, free all nodes from freelist.
206      *
207      *  \note not thread-safe
208      *
209      * */
~stack(void)210     ~stack(void)
211     {
212         T dummy;
213         while(unsynchronized_pop(dummy))
214         {}
215     }
216 
217 private:
218 #ifndef BOOST_DOXYGEN_INVOKED
initialize(void)219     void initialize(void)
220     {
221         tos.store(tagged_node_handle(pool.null_handle(), 0));
222     }
223 
link_nodes_atomic(node * new_top_node,node * end_node)224     void link_nodes_atomic(node * new_top_node, node * end_node)
225     {
226         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
227         for (;;) {
228             tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
229             end_node->next = pool.get_handle(old_tos);
230 
231             if (tos.compare_exchange_weak(old_tos, new_tos))
232                 break;
233         }
234     }
235 
link_nodes_unsafe(node * new_top_node,node * end_node)236     void link_nodes_unsafe(node * new_top_node, node * end_node)
237     {
238         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
239 
240         tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
241         end_node->next = pool.get_pointer(old_tos);
242 
243         tos.store(new_tos, memory_order_relaxed);
244     }
245 
246     template <bool Threadsafe, bool Bounded, typename ConstIterator>
prepare_node_list(ConstIterator begin,ConstIterator end,ConstIterator & ret)247     tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
248     {
249         ConstIterator it = begin;
250         node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
251         if (end_node == NULL) {
252             ret = begin;
253             return make_tuple<node*, node*>(NULL, NULL);
254         }
255 
256         node * new_top_node = end_node;
257         end_node->next = NULL;
258 
259         BOOST_TRY {
260             /* link nodes */
261             for (; it != end; ++it) {
262                 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
263                 if (newnode == NULL)
264                     break;
265                 newnode->next = new_top_node;
266                 new_top_node = newnode;
267             }
268         } BOOST_CATCH (...) {
269             for (node * current_node = new_top_node; current_node != NULL;) {
270                 node * next = current_node->next;
271                 pool.template destruct<Threadsafe>(current_node);
272                 current_node = next;
273             }
274             BOOST_RETHROW;
275         }
276         BOOST_CATCH_END
277 
278         ret = it;
279         return make_tuple(new_top_node, end_node);
280     }
281 #endif
282 
283 public:
284     /** Pushes object t to the stack.
285      *
286      * \post object will be pushed to the stack, if internal node can be allocated
287      * \returns true, if the push operation is successful.
288      *
289      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
290      *                    from the OS. This may not be lock-free.
291      * \throws if memory allocator throws
292      * */
push(T const & v)293     bool push(T const & v)
294     {
295         return do_push<false>(v);
296     }
297 
298     /** Pushes object t to the stack.
299      *
300      * \post object will be pushed to the stack, if internal node can be allocated
301      * \returns true, if the push operation is successful.
302      *
303      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
304      * */
bounded_push(T const & v)305     bool bounded_push(T const & v)
306     {
307         return do_push<true>(v);
308     }
309 
310 #ifndef BOOST_DOXYGEN_INVOKED
311 private:
312     template <bool Bounded>
do_push(T const & v)313     bool do_push(T const & v)
314     {
315         node * newnode = pool.template construct<true, Bounded>(v);
316         if (newnode == 0)
317             return false;
318 
319         link_nodes_atomic(newnode, newnode);
320         return true;
321     }
322 
323     template <bool Bounded, typename ConstIterator>
do_push(ConstIterator begin,ConstIterator end)324     ConstIterator do_push(ConstIterator begin, ConstIterator end)
325     {
326         node * new_top_node;
327         node * end_node;
328         ConstIterator ret;
329 
330         tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
331         if (new_top_node)
332             link_nodes_atomic(new_top_node, end_node);
333 
334         return ret;
335     }
336 
337 public:
338 #endif
339 
340     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
341      *
342      * \return iterator to the first element, which has not been pushed
343      *
344      * \note Operation is applied atomically
345      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
346      *                    from the OS. This may not be lock-free.
347      * \throws if memory allocator throws
348      */
349     template <typename ConstIterator>
push(ConstIterator begin,ConstIterator end)350     ConstIterator push(ConstIterator begin, ConstIterator end)
351     {
352         return do_push<false, ConstIterator>(begin, end);
353     }
354 
355     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
356      *
357      * \return iterator to the first element, which has not been pushed
358      *
359      * \note Operation is applied atomically
360      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
361      * \throws if memory allocator throws
362      */
363     template <typename ConstIterator>
bounded_push(ConstIterator begin,ConstIterator end)364     ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
365     {
366         return do_push<true, ConstIterator>(begin, end);
367     }
368 
369 
370     /** Pushes object t to the stack.
371      *
372      * \post object will be pushed to the stack, if internal node can be allocated
373      * \returns true, if the push operation is successful.
374      *
375      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
376      *       from the OS. This may not be lock-free.
377      * \throws if memory allocator throws
378      * */
unsynchronized_push(T const & v)379     bool unsynchronized_push(T const & v)
380     {
381         node * newnode = pool.template construct<false, false>(v);
382         if (newnode == 0)
383             return false;
384 
385         link_nodes_unsafe(newnode, newnode);
386         return true;
387     }
388 
389     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
390      *
391      * \return iterator to the first element, which has not been pushed
392      *
393      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
394      *       from the OS. This may not be lock-free.
395      * \throws if memory allocator throws
396      */
397     template <typename ConstIterator>
unsynchronized_push(ConstIterator begin,ConstIterator end)398     ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
399     {
400         node * new_top_node;
401         node * end_node;
402         ConstIterator ret;
403 
404         tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
405         if (new_top_node)
406             link_nodes_unsafe(new_top_node, end_node);
407 
408         return ret;
409     }
410 
411 
412     /** Pops object from stack.
413      *
414      * \post if pop operation is successful, object will be copied to ret.
415      * \returns true, if the pop operation is successful, false if stack was empty.
416      *
417      * \note Thread-safe and non-blocking
418      *
419      * */
pop(T & ret)420     bool pop(T & ret)
421     {
422         return pop<T>(ret);
423     }
424 
425     /** Pops object from stack.
426      *
427      * \pre type T must be convertible to U
428      * \post if pop operation is successful, object will be copied to ret.
429      * \returns true, if the pop operation is successful, false if stack was empty.
430      *
431      * \note Thread-safe and non-blocking
432      *
433      * */
434     template <typename U>
pop(U & ret)435     bool pop(U & ret)
436     {
437         BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
438         detail::consume_via_copy<U> consumer(ret);
439 
440         return consume_one(consumer);
441     }
442 
443 
444     /** Pops object from stack.
445      *
446      * \post if pop operation is successful, object will be copied to ret.
447      * \returns true, if the pop operation is successful, false if stack was empty.
448      *
449      * \note Not thread-safe, but non-blocking
450      *
451      * */
unsynchronized_pop(T & ret)452     bool unsynchronized_pop(T & ret)
453     {
454         return unsynchronized_pop<T>(ret);
455     }
456 
457     /** Pops object from stack.
458      *
459      * \pre type T must be convertible to U
460      * \post if pop operation is successful, object will be copied to ret.
461      * \returns true, if the pop operation is successful, false if stack was empty.
462      *
463      * \note Not thread-safe, but non-blocking
464      *
465      * */
466     template <typename U>
unsynchronized_pop(U & ret)467     bool unsynchronized_pop(U & ret)
468     {
469         BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
470         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
471         node * old_tos_pointer = pool.get_pointer(old_tos);
472 
473         if (!pool.get_pointer(old_tos))
474             return false;
475 
476         node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
477         tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
478 
479         tos.store(new_tos, memory_order_relaxed);
480         detail::copy_payload(old_tos_pointer->v, ret);
481         pool.template destruct<false>(old_tos);
482         return true;
483     }
484 
485     /** consumes one element via a functor
486      *
487      *  pops one element from the stack and applies the functor on this object
488      *
489      * \returns true, if one element was consumed
490      *
491      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
492      * */
493     template <typename Functor>
consume_one(Functor & f)494     bool consume_one(Functor & f)
495     {
496         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
497 
498         for (;;) {
499             node * old_tos_pointer = pool.get_pointer(old_tos);
500             if (!old_tos_pointer)
501                 return false;
502 
503             tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
504 
505             if (tos.compare_exchange_weak(old_tos, new_tos)) {
506                 f(old_tos_pointer->v);
507                 pool.template destruct<true>(old_tos);
508                 return true;
509             }
510         }
511     }
512 
513     /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
514     template <typename Functor>
consume_one(Functor const & f)515     bool consume_one(Functor const & f)
516     {
517         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
518 
519         for (;;) {
520             node * old_tos_pointer = pool.get_pointer(old_tos);
521             if (!old_tos_pointer)
522                 return false;
523 
524             tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
525 
526             if (tos.compare_exchange_weak(old_tos, new_tos)) {
527                 f(old_tos_pointer->v);
528                 pool.template destruct<true>(old_tos);
529                 return true;
530             }
531         }
532     }
533 
534     /** consumes all elements via a functor
535      *
536      * sequentially pops all elements from the stack and applies the functor on each object
537      *
538      * \returns number of elements that are consumed
539      *
540      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
541      * */
542     template <typename Functor>
consume_all(Functor & f)543     size_t consume_all(Functor & f)
544     {
545         size_t element_count = 0;
546         while (consume_one(f))
547             element_count += 1;
548 
549         return element_count;
550     }
551 
552     /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
553     template <typename Functor>
consume_all(Functor const & f)554     size_t consume_all(Functor const & f)
555     {
556         size_t element_count = 0;
557         while (consume_one(f))
558             element_count += 1;
559 
560         return element_count;
561     }
562 
563     /** consumes all elements via a functor
564      *
565      * atomically pops all elements from the stack and applies the functor on each object
566      *
567      * \returns number of elements that are consumed
568      *
569      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
570      * */
571     template <typename Functor>
consume_all_atomic(Functor & f)572     size_t consume_all_atomic(Functor & f)
573     {
574         size_t element_count = 0;
575         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
576 
577         for (;;) {
578             node * old_tos_pointer = pool.get_pointer(old_tos);
579             if (!old_tos_pointer)
580                 return 0;
581 
582             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
583 
584             if (tos.compare_exchange_weak(old_tos, new_tos))
585                 break;
586         }
587 
588         tagged_node_handle nodes_to_consume = old_tos;
589 
590         for(;;) {
591             node * node_pointer = pool.get_pointer(nodes_to_consume);
592             f(node_pointer->v);
593             element_count += 1;
594 
595             node * next_node = pool.get_pointer(node_pointer->next);
596 
597             if (!next_node) {
598                 pool.template destruct<true>(nodes_to_consume);
599                 break;
600             }
601 
602             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
603             pool.template destruct<true>(nodes_to_consume);
604             nodes_to_consume = next;
605         }
606 
607         return element_count;
608     }
609 
610     /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
611     template <typename Functor>
consume_all_atomic(Functor const & f)612     size_t consume_all_atomic(Functor const & f)
613     {
614         size_t element_count = 0;
615         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
616 
617         for (;;) {
618             node * old_tos_pointer = pool.get_pointer(old_tos);
619             if (!old_tos_pointer)
620                 return 0;
621 
622             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
623 
624             if (tos.compare_exchange_weak(old_tos, new_tos))
625                 break;
626         }
627 
628         tagged_node_handle nodes_to_consume = old_tos;
629 
630         for(;;) {
631             node * node_pointer = pool.get_pointer(nodes_to_consume);
632             f(node_pointer->v);
633             element_count += 1;
634 
635             node * next_node = pool.get_pointer(node_pointer->next);
636 
637             if (!next_node) {
638                 pool.template destruct<true>(nodes_to_consume);
639                 break;
640             }
641 
642             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
643             pool.template destruct<true>(nodes_to_consume);
644             nodes_to_consume = next;
645         }
646 
647         return element_count;
648     }
649 
650     /** consumes all elements via a functor
651      *
652      * atomically pops all elements from the stack and applies the functor on each object in reversed order
653      *
654      * \returns number of elements that are consumed
655      *
656      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
657      * */
658     template <typename Functor>
consume_all_atomic_reversed(Functor & f)659     size_t consume_all_atomic_reversed(Functor & f)
660     {
661         size_t element_count = 0;
662         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
663 
664         for (;;) {
665             node * old_tos_pointer = pool.get_pointer(old_tos);
666             if (!old_tos_pointer)
667                 return 0;
668 
669             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
670 
671             if (tos.compare_exchange_weak(old_tos, new_tos))
672                 break;
673         }
674 
675         tagged_node_handle nodes_to_consume = old_tos;
676 
677         node * last_node_pointer = NULL;
678         tagged_node_handle nodes_in_reversed_order;
679         for(;;) {
680             node * node_pointer = pool.get_pointer(nodes_to_consume);
681             node * next_node    = pool.get_pointer(node_pointer->next);
682 
683             node_pointer->next  = pool.get_handle(last_node_pointer);
684             last_node_pointer   = node_pointer;
685 
686             if (!next_node) {
687                 nodes_in_reversed_order = nodes_to_consume;
688                 break;
689             }
690 
691             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
692             nodes_to_consume = next;
693         }
694 
695         for(;;) {
696             node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
697             f(node_pointer->v);
698             element_count += 1;
699 
700             node * next_node = pool.get_pointer(node_pointer->next);
701 
702             if (!next_node) {
703                 pool.template destruct<true>(nodes_in_reversed_order);
704                 break;
705             }
706 
707             tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
708             pool.template destruct<true>(nodes_in_reversed_order);
709             nodes_in_reversed_order = next;
710         }
711 
712         return element_count;
713     }
714 
715     /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
716     template <typename Functor>
consume_all_atomic_reversed(Functor const & f)717     size_t consume_all_atomic_reversed(Functor const & f)
718     {
719         size_t element_count = 0;
720         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
721 
722         for (;;) {
723             node * old_tos_pointer = pool.get_pointer(old_tos);
724             if (!old_tos_pointer)
725                 return 0;
726 
727             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
728 
729             if (tos.compare_exchange_weak(old_tos, new_tos))
730                 break;
731         }
732 
733         tagged_node_handle nodes_to_consume = old_tos;
734 
735         node * last_node_pointer = NULL;
736         tagged_node_handle nodes_in_reversed_order;
737         for(;;) {
738             node * node_pointer = pool.get_pointer(nodes_to_consume);
739             node * next_node    = pool.get_pointer(node_pointer->next);
740 
741             node_pointer->next  = pool.get_handle(last_node_pointer);
742             last_node_pointer   = node_pointer;
743 
744             if (!next_node) {
745                 nodes_in_reversed_order = nodes_to_consume;
746                 break;
747             }
748 
749             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
750             nodes_to_consume = next;
751         }
752 
753         for(;;) {
754             node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
755             f(node_pointer->v);
756             element_count += 1;
757 
758             node * next_node = pool.get_pointer(node_pointer->next);
759 
760             if (!next_node) {
761                 pool.template destruct<true>(nodes_in_reversed_order);
762                 break;
763             }
764 
765             tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
766             pool.template destruct<true>(nodes_in_reversed_order);
767             nodes_in_reversed_order = next;
768         }
769 
770         return element_count;
771     }
772     /**
773      * \return true, if stack is empty.
774      *
775      * \note It only guarantees that at some point during the execution of the function the stack has been empty.
776      *       It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
777      * */
empty(void) const778     bool empty(void) const
779     {
780         return pool.get_pointer(tos.load()) == NULL;
781     }
782 
783 private:
784 #ifndef BOOST_DOXYGEN_INVOKED
785     detail::atomic<tagged_node_handle> tos;
786 
787     static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
788     char padding[padding_size];
789 
790     pool_t pool;
791 #endif
792 };
793 
794 } /* namespace lockfree */
795 } /* namespace boost */
796 
797 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */
798