1 /*
2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP
26 #define SHARE_UTILITIES_LOCKFREESTACK_HPP
27 
28 #include "runtime/atomic.hpp"
29 #include "utilities/debug.hpp"
30 #include "utilities/globalDefinitions.hpp"
31 
32 // The LockFreeStack class template provides a lock-free LIFO. The objects
33 // in the sequence are intrusively linked via a member in the objects.  As
34 // a result, there is no allocation involved in adding objects to the stack
35 // or removing them from the stack.
36 //
37 // To be used in a LockFreeStack of objects of type T, an object of
38 // type T must have a list entry member of type T* volatile, with an
39 // non-member accessor function returning a pointer to that member.  A
40 // LockFreeStack is associated with the class of its elements and an
41 // entry member from that class.
42 //
43 // An object can be in multiple stacks at the same time, so long as
44 // each stack uses a different entry member. That is, the class of the
45 // object must have multiple LockFreeStack entry members, one for each
46 // stack in which the object may simultaneously be an element.
47 //
48 // LockFreeStacks support polymorphic elements.  Because the objects
49 // in a stack are externally managed, rather than being embedded
50 // values in the stack, the actual type of such objects may be more
51 // specific than the stack's element type.
52 //
53 // \tparam T is the class of the elements in the stack.
54 //
55 // \tparam next_ptr is a function pointer.  Applying this function to
56 // an object of type T must return a pointer to the list entry member
57 // of the object associated with the LockFreeStack type.
58 template<typename T, T* volatile* (*next_ptr)(T&)>
59 class LockFreeStack {
60   T* volatile _top;
61 
prepend_impl(T * first,T * last)62   void prepend_impl(T* first, T* last) {
63     T* cur = top();
64     T* old;
65     do {
66       old = cur;
67       set_next(*last, cur);
68       cur = Atomic::cmpxchg(first, &_top, cur);
69     } while (old != cur);
70   }
71 
72   NONCOPYABLE(LockFreeStack);
73 
74 public:
LockFreeStack()75   LockFreeStack() : _top(NULL) {}
~LockFreeStack()76   ~LockFreeStack() { assert(empty(), "stack not empty"); }
77 
78   // Atomically removes the top object from this stack and returns a
79   // pointer to that object, or NULL if this stack is empty. Acts as a
80   // full memory barrier. Subject to ABA behavior; callers must ensure
81   // usage is safe.
pop()82   T* pop() {
83     T* result = top();
84     T* old;
85     do {
86       old = result;
87       T* new_top = NULL;
88       if (result != NULL) {
89         new_top = next(*result);
90       }
91       // CAS even on empty pop, for consistent membar bahavior.
92       result = Atomic::cmpxchg(new_top, &_top, result);
93     } while (result != old);
94     if (result != NULL) {
95       set_next(*result, NULL);
96     }
97     return result;
98   }
99 
100   // Atomically exchange the list of elements with NULL, returning the old
101   // list of elements.  Acts as a full memory barrier.
102   // postcondition: empty()
pop_all()103   T* pop_all() {
104     return Atomic::xchg((T*)NULL, &_top);
105   }
106 
107   // Atomically adds value to the top of this stack.  Acts as a full
108   // memory barrier.
push(T & value)109   void push(T& value) {
110     assert(next(value) == NULL, "precondition");
111     prepend_impl(&value, &value);
112   }
113 
114   // Atomically adds the list of objects (designated by first and
115   // last) before the objects already in this stack, in the same order
116   // as in the list. Acts as a full memory barrier.
117   // precondition: next(last) == NULL.
118   // postcondition: top() == &first, next(last) == old top().
prepend(T & first,T & last)119   void prepend(T& first, T& last) {
120     assert(next(last) == NULL, "precondition");
121 #ifdef ASSERT
122     for (T* p = &first; p != &last; p = next(*p)) {
123       assert(p != NULL, "invalid prepend list");
124     }
125 #endif
126     prepend_impl(&first, &last);
127   }
128 
129   // Atomically adds the list of objects headed by first before the
130   // objects already in this stack, in the same order as in the list.
131   // Acts as a full memory barrier.
132   // postcondition: top() == &first.
prepend(T & first)133   void prepend(T& first) {
134     T* last = &first;
135     while (true) {
136       T* step_to = next(*last);
137       if (step_to == NULL) break;
138       last = step_to;
139     }
140     prepend_impl(&first, last);
141   }
142 
143   // Return true if the stack is empty.
empty() const144   bool empty() const { return top() == NULL; }
145 
146   // Return the most recently pushed element, or NULL if the stack is empty.
147   // The returned element is not removed from the stack.
top() const148   T* top() const { return Atomic::load(&_top); }
149 
150   // Return the number of objects in the stack.  There must be no concurrent
151   // pops while the length is being determined.
length() const152   size_t length() const {
153     size_t result = 0;
154     for (const T* current = top(); current != NULL; current = next(*current)) {
155       ++result;
156     }
157     return result;
158   }
159 
160   // Return the entry following value in the list used by the
161   // specialized LockFreeStack class.
next(const T & value)162   static T* next(const T& value) {
163     return Atomic::load(next_ptr(const_cast<T&>(value)));
164   }
165 
166   // Set the entry following value to new_next in the list used by the
167   // specialized LockFreeStack class.  Not thread-safe; in particular,
168   // if value is in an instance of this specialization of LockFreeStack,
169   // there must be no concurrent push or pop operations on that stack.
set_next(T & value,T * new_next)170   static void set_next(T& value, T* new_next) {
171     Atomic::store(new_next, next_ptr(value));
172   }
173 };
174 
175 #endif // SHARE_UTILITIES_LOCKFREESTACK_HPP
176