1 /* 2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP 26 #define SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP 27 28 #include "runtime/atomic.hpp" 29 #include "utilities/globalDefinitions.hpp" 30 #include "utilities/concurrentHashTable.inline.hpp" 31 32 // This inline file contains BulkDeleteTask and GrowTasks which are both bucket 33 // operations, which they are serialized with each other. 34 35 // Base class for pause and/or parallel bulk operations. 36 template <typename CONFIG, MEMFLAGS F> 37 class ConcurrentHashTable<CONFIG, F>::BucketsOperation { 38 protected: 39 ConcurrentHashTable<CONFIG, F>* _cht; 40 41 // Default size of _task_size_log2 42 static const size_t DEFAULT_TASK_SIZE_LOG2 = 12; 43 44 // The table is split into ranges, every increment is one range. 45 volatile size_t _next_to_claim; 46 size_t _task_size_log2; // Number of buckets. 47 size_t _stop_task; // Last task 48 size_t _size_log2; // Table size. 49 bool _is_mt; 50 BucketsOperation(ConcurrentHashTable<CONFIG,F> * cht,bool is_mt=false)51 BucketsOperation(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false) 52 : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2), 53 _stop_task(0), _size_log2(0), _is_mt(is_mt) {} 54 55 // Returns true if you succeeded to claim the range start -> (stop-1). claim(size_t * start,size_t * stop)56 bool claim(size_t* start, size_t* stop) { 57 size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u); 58 if (claimed >= _stop_task) { 59 return false; 60 } 61 *start = claimed * (((size_t)1) << _task_size_log2); 62 *stop = ((*start) + (((size_t)1) << _task_size_log2)); 63 return true; 64 } 65 66 // Calculate starting values. setup(Thread * thread)67 void setup(Thread* thread) { 68 thread_owns_resize_lock(thread); 69 _size_log2 = _cht->_table->_log2_size; 70 _task_size_log2 = MIN2(_task_size_log2, _size_log2); 71 size_t tmp = _size_log2 > _task_size_log2 ? 72 _size_log2 - _task_size_log2 : 0; 73 _stop_task = (((size_t)1) << tmp); 74 } 75 76 // Returns false if all ranges are claimed. have_more_work()77 bool have_more_work() { 78 return Atomic::load_acquire(&_next_to_claim) >= _stop_task; 79 } 80 thread_owns_resize_lock(Thread * thread)81 void thread_owns_resize_lock(Thread* thread) { 82 assert(BucketsOperation::_cht->_resize_lock_owner == thread, 83 "Should be locked by me"); 84 assert(BucketsOperation::_cht->_resize_lock->owned_by_self(), 85 "Operations lock not held"); 86 } thread_owns_only_state_lock(Thread * thread)87 void thread_owns_only_state_lock(Thread* thread) { 88 assert(BucketsOperation::_cht->_resize_lock_owner == thread, 89 "Should be locked by me"); 90 assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(), 91 "Operations lock held"); 92 } thread_do_not_own_resize_lock(Thread * thread)93 void thread_do_not_own_resize_lock(Thread* thread) { 94 assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(), 95 "Operations lock held"); 96 assert(BucketsOperation::_cht->_resize_lock_owner != thread, 97 "Should not be locked by me"); 98 } 99 100 public: 101 // Pauses for safepoint pause(Thread * thread)102 void pause(Thread* thread) { 103 // This leaves internal state locked. 104 this->thread_owns_resize_lock(thread); 105 BucketsOperation::_cht->_resize_lock->unlock(); 106 this->thread_owns_only_state_lock(thread); 107 } 108 109 // Continues after safepoint. cont(Thread * thread)110 void cont(Thread* thread) { 111 this->thread_owns_only_state_lock(thread); 112 // If someone slips in here directly after safepoint. 113 while (!BucketsOperation::_cht->_resize_lock->try_lock()) 114 { /* for ever */ }; 115 this->thread_owns_resize_lock(thread); 116 } 117 }; 118 119 // For doing pausable/parallel bulk delete. 120 template <typename CONFIG, MEMFLAGS F> 121 class ConcurrentHashTable<CONFIG, F>::BulkDeleteTask : 122 public BucketsOperation 123 { 124 public: BulkDeleteTask(ConcurrentHashTable<CONFIG,F> * cht,bool is_mt=false)125 BulkDeleteTask(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false) 126 : BucketsOperation(cht, is_mt) { 127 } 128 // Before start prepare must be called. prepare(Thread * thread)129 bool prepare(Thread* thread) { 130 bool lock = BucketsOperation::_cht->try_resize_lock(thread); 131 if (!lock) { 132 return false; 133 } 134 this->setup(thread); 135 return true; 136 } 137 138 // Does one range destroying all matching EVALUATE_FUNC and 139 // DELETE_FUNC is called be destruction. Returns true if there is more work. 140 template <typename EVALUATE_FUNC, typename DELETE_FUNC> do_task(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)141 bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { 142 size_t start, stop; 143 assert(BucketsOperation::_cht->_resize_lock_owner != NULL, 144 "Should be locked"); 145 if (!this->claim(&start, &stop)) { 146 return false; 147 } 148 BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop, 149 eval_f, del_f, 150 BucketsOperation::_is_mt); 151 assert(BucketsOperation::_cht->_resize_lock_owner != NULL, 152 "Should be locked"); 153 return true; 154 } 155 156 // Must be called after ranges are done. done(Thread * thread)157 void done(Thread* thread) { 158 this->thread_owns_resize_lock(thread); 159 BucketsOperation::_cht->unlock_resize_lock(thread); 160 this->thread_do_not_own_resize_lock(thread); 161 } 162 }; 163 164 template <typename CONFIG, MEMFLAGS F> 165 class ConcurrentHashTable<CONFIG, F>::GrowTask : 166 public BucketsOperation 167 { 168 public: GrowTask(ConcurrentHashTable<CONFIG,F> * cht)169 GrowTask(ConcurrentHashTable<CONFIG, F>* cht) : BucketsOperation(cht) { 170 } 171 // Before start prepare must be called. prepare(Thread * thread)172 bool prepare(Thread* thread) { 173 if (!BucketsOperation::_cht->internal_grow_prolog( 174 thread, BucketsOperation::_cht->_log2_size_limit)) { 175 return false; 176 } 177 this->setup(thread); 178 return true; 179 } 180 181 // Re-sizes a portion of the table. Returns true if there is more work. do_task(Thread * thread)182 bool do_task(Thread* thread) { 183 size_t start, stop; 184 assert(BucketsOperation::_cht->_resize_lock_owner != NULL, 185 "Should be locked"); 186 if (!this->claim(&start, &stop)) { 187 return false; 188 } 189 BucketsOperation::_cht->internal_grow_range(thread, start, stop); 190 assert(BucketsOperation::_cht->_resize_lock_owner != NULL, 191 "Should be locked"); 192 return true; 193 } 194 195 // Must be called after do_task returns false. done(Thread * thread)196 void done(Thread* thread) { 197 this->thread_owns_resize_lock(thread); 198 BucketsOperation::_cht->internal_grow_epilog(thread); 199 this->thread_do_not_own_resize_lock(thread); 200 } 201 }; 202 203 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP 204