1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_lru_cache_H
18 #define __TBB_concurrent_lru_cache_H
19 
20 #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE
21     #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h
22 #endif
23 
24 #include "detail/_assert.h"
25 #include "detail/_aggregator.h"
26 
27 #include <map>       // for std::map
28 #include <list>      // for std::list
29 #include <utility>   // for std::make_pair
30 #include <algorithm> // for std::find
31 #include <atomic>    // for std::atomic<bool>
32 
33 namespace tbb {
34 
35 namespace detail {
36 namespace d1 {
37 
38 //-----------------------------------------------------------------------------
39 // Concurrent LRU cache
40 //-----------------------------------------------------------------------------
41 
42 template<typename KeyT, typename ValT, typename KeyToValFunctorT = ValT (*) (KeyT)>
43 class concurrent_lru_cache : no_assign {
44 // incapsulated helper classes
45 private:
46     struct handle_object;
47     struct storage_map_value_type;
48 
49     struct aggregator_operation;
50     struct retrieve_aggregator_operation;
51     struct signal_end_of_usage_aggregator_operation;
52 
53 // typedefs
54 public:
55     using key_type = KeyT;
56     using value_type = ValT;
57     using pointer = ValT*;
58     using reference = ValT&;
59     using const_pointer = const ValT*;
60     using const_reference = const ValT&;
61 
62     using value_function_type = KeyToValFunctorT;
63     using handle = handle_object;
64 private:
65     using lru_cache_type = concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>;
66 
67     using storage_map_type = std::map<key_type, storage_map_value_type>;
68     using storage_map_iterator_type = typename storage_map_type::iterator;
69     using storage_map_pointer_type = typename storage_map_type::pointer;
70     using storage_map_reference_type = typename storage_map_type::reference;
71 
72     using history_list_type = std::list<storage_map_iterator_type>;
73     using history_list_iterator_type = typename history_list_type::iterator;
74 
75     using aggregator_operation_type = aggregator_operation;
76     using aggregator_function_type = aggregating_functor<lru_cache_type, aggregator_operation_type>;
77     using aggregator_type = aggregator<aggregator_function_type, aggregator_operation_type>;
78 
79     friend class aggregating_functor<lru_cache_type,aggregator_operation_type>;
80 
81 // fields
82 private:
83     value_function_type my_value_function;
84     aggregator_type my_aggregator;
85 
86     storage_map_type my_storage_map;            // storage map for used objects
87     history_list_type my_history_list;          // history list for unused objects
88     const std::size_t my_history_list_capacity; // history list's allowed capacity
89 
90 // interface
91 public:
92 
concurrent_lru_cache(value_function_type value_function,std::size_t cache_capacity)93     concurrent_lru_cache(value_function_type value_function, std::size_t cache_capacity)
94         : my_value_function(value_function), my_history_list_capacity(cache_capacity) {
95         my_aggregator.initialize_handler(aggregator_function_type(this));
96     }
97 
98     handle operator[](key_type key) {
99         retrieve_aggregator_operation op(key);
100         my_aggregator.execute(&op);
101 
102         if (op.is_new_value_needed()) {
103             op.result().second.my_value = my_value_function(key);
104             op.result().second.my_is_ready.store(true, std::memory_order_release);
105         } else {
106             spin_wait_while_eq(op.result().second.my_is_ready, false);
107         }
108 
109         return handle(*this, op.result());
110     }
111 
112 private:
113 
handle_operations(aggregator_operation * op_list)114     void handle_operations(aggregator_operation* op_list) {
115         while (op_list) {
116             op_list->cast_and_handle(*this);
117             aggregator_operation* prev_op = op_list;
118             op_list = op_list->next;
119 
120             (prev_op->status).store(1, std::memory_order_release);
121         }
122     }
123 
signal_end_of_usage(storage_map_reference_type map_record_ref)124     void signal_end_of_usage(storage_map_reference_type map_record_ref) {
125         signal_end_of_usage_aggregator_operation op(map_record_ref);
126         my_aggregator.execute(&op);
127     }
128 
signal_end_of_usage_serial(storage_map_reference_type map_record_ref)129     void signal_end_of_usage_serial(storage_map_reference_type map_record_ref) {
130         storage_map_iterator_type map_it = my_storage_map.find(map_record_ref.first);
131 
132         __TBB_ASSERT(map_it != my_storage_map.end(),
133             "cache should not return past-end iterators to outer world");
134         __TBB_ASSERT(&(*map_it) == &map_record_ref,
135             "dangling reference has been returned to outside world: data race?");
136         __TBB_ASSERT(std::find(my_history_list.begin(), my_history_list.end(), map_it) == my_history_list.end(),
137             "object in use should not be in list of unused objects ");
138 
139         // if it was the last reference, put it to the LRU history
140         if (! --(map_it->second.my_ref_counter)) {
141             // if the LRU history is full, evict the oldest items to get space
142             if (my_history_list.size() >= my_history_list_capacity) {
143                 std::size_t number_of_elements_to_evict = 1 + my_history_list.size() - my_history_list_capacity;
144 
145                 for (std::size_t i = 0; i < number_of_elements_to_evict; ++i) {
146                     storage_map_iterator_type map_it_to_evict = my_history_list.back();
147 
148                     __TBB_ASSERT(map_it_to_evict->second.my_ref_counter == 0,
149                         "item to be evicted should not have a live references");
150 
151                     // TODO: can we use forward_list instead of list? pop_front / insert_after last
152                     my_history_list.pop_back();
153                     my_storage_map.erase(map_it_to_evict);
154                 }
155             }
156 
157             // TODO: can we use forward_list instead of list? pop_front / insert_after last
158             my_history_list.push_front(map_it);
159             map_it->second.my_history_list_iterator = my_history_list.begin();
160         }
161     }
162 
retrieve_serial(key_type key,bool & is_new_value_needed)163     storage_map_reference_type retrieve_serial(key_type key, bool& is_new_value_needed) {
164         storage_map_iterator_type map_it = my_storage_map.find(key);
165 
166         if (map_it == my_storage_map.end()) {
167             map_it = my_storage_map.emplace_hint(
168                 map_it, std::piecewise_construct, std::make_tuple(key), std::make_tuple(value_type(), 0, my_history_list.end(), false));
169             is_new_value_needed = true;
170         } else {
171             history_list_iterator_type list_it = map_it->second.my_history_list_iterator;
172             if (list_it != my_history_list.end()) {
173                 __TBB_ASSERT(map_it->second.my_ref_counter == 0,
174                     "item to be evicted should not have a live references");
175 
176                 // Item is going to be used. Therefore it is not a subject for eviction,
177                 // so we remove it from LRU history.
178                 my_history_list.erase(list_it);
179                 map_it->second.my_history_list_iterator = my_history_list.end();
180             }
181         }
182 
183         ++(map_it->second.my_ref_counter);
184         return *map_it;
185     }
186 };
187 
188 //-----------------------------------------------------------------------------
189 // Value type for storage map in concurrent LRU cache
190 //-----------------------------------------------------------------------------
191 
192 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
193 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::storage_map_value_type {
194 //typedefs
195 public:
196     using ref_counter_type = std::size_t;
197 
198 // fields
199 public:
200     value_type my_value;
201     ref_counter_type my_ref_counter;
202     history_list_iterator_type my_history_list_iterator;
203     std::atomic<bool> my_is_ready;
204 
205 // interface
206 public:
207     storage_map_value_type(
208         value_type const& value, ref_counter_type ref_counter,
209         history_list_iterator_type history_list_iterator, bool is_ready)
210         : my_value(value), my_ref_counter(ref_counter),
211           my_history_list_iterator(history_list_iterator), my_is_ready(is_ready) {}
212 };
213 
214 //-----------------------------------------------------------------------------
215 // Handle object for operator[] in concurrent LRU cache
216 //-----------------------------------------------------------------------------
217 
218 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
219 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::handle_object {
220 // fields
221 private:
222     lru_cache_type* my_lru_cache_ptr;
223     storage_map_pointer_type my_map_record_ptr;
224 
225 // interface
226 public:
227     handle_object()
228         : my_lru_cache_ptr(nullptr), my_map_record_ptr(nullptr) {}
229     handle_object(lru_cache_type& lru_cache_ref, storage_map_reference_type map_record_ref)
230         : my_lru_cache_ptr(&lru_cache_ref), my_map_record_ptr(&map_record_ref) {}
231 
232     handle_object(handle_object&) = delete;
233     void operator=(handle_object&) = delete;
234 
235     handle_object(handle_object&& other)
236         : my_lru_cache_ptr(other.my_lru_cache_ptr), my_map_record_ptr(other.my_map_record_ptr) {
237 
238         __TBB_ASSERT(
239             (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
240             (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
241             "invalid state of moving object?");
242 
243         other.my_lru_cache_ptr = nullptr;
244         other.my_map_record_ptr = nullptr;
245     }
246 
247     handle_object& operator=(handle_object&& other) {
248         __TBB_ASSERT(
249             (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
250             (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
251             "invalid state of moving object?");
252 
253         if (my_lru_cache_ptr)
254             my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
255 
256         my_lru_cache_ptr = other.my_lru_cache_ptr;
257         my_map_record_ptr = other.my_map_record_ptr;
258         other.my_lru_cache_ptr = nullptr;
259         other.my_map_record_ptr = nullptr;
260 
261         return *this;
262     }
263 
264     ~handle_object() {
265         if (my_lru_cache_ptr)
266             my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
267     }
268 
269     operator bool() const {
270         return (my_lru_cache_ptr && my_map_record_ptr);
271     }
272 
273     value_type& value() {
274         __TBB_ASSERT(my_lru_cache_ptr, "get value from already moved object?");
275         __TBB_ASSERT(my_map_record_ptr, "get value from an invalid or already moved object?");
276 
277         return my_map_record_ptr->second.my_value;
278     }
279 };
280 
281 //-----------------------------------------------------------------------------
282 // Aggregator operation for aggregator type in concurrent LRU cache
283 //-----------------------------------------------------------------------------
284 
285 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
286 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::aggregator_operation
287     : aggregated_operation<aggregator_operation> {
288 // incapsulated helper classes
289 public:
290     enum class op_type { retrieve, signal_end_of_usage };
291 
292 // fields
293 private:
294     op_type my_op;
295 
296 // interface
297 public:
298     aggregator_operation(op_type op) : my_op(op) {}
299 
300     // TODO: aggregator_operation can be implemented
301     //   - as a statically typed variant type or CRTP? (static, dependent on the use case)
302     //   - or use pointer to function and apply_visitor (dynamic)
303     //   - or use virtual functions (dynamic)
304     void cast_and_handle(lru_cache_type& lru_cache_ref) {
305         if (my_op == op_type::retrieve)
306             static_cast<retrieve_aggregator_operation*>(this)->handle(lru_cache_ref);
307         else
308             static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(lru_cache_ref);
309     }
310 };
311 
312 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
313 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::retrieve_aggregator_operation
314     : aggregator_operation, private no_assign {
315 public:
316     key_type my_key;
317     storage_map_pointer_type my_map_record_ptr;
318     bool my_is_new_value_needed;
319 
320 public:
321     retrieve_aggregator_operation(key_type key)
322         : aggregator_operation(aggregator_operation::op_type::retrieve),
323           my_key(key), my_is_new_value_needed(false) {}
324 
325     void handle(lru_cache_type& lru_cache_ref) {
326         my_map_record_ptr = &lru_cache_ref.retrieve_serial(my_key, my_is_new_value_needed);
327     }
328 
329     storage_map_reference_type result() { return *my_map_record_ptr; }
330 
331     bool is_new_value_needed() { return my_is_new_value_needed; }
332 };
333 
334 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
335 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::signal_end_of_usage_aggregator_operation
336     : aggregator_operation, private no_assign {
337 
338 private:
339     storage_map_reference_type my_map_record_ref;
340 
341 public:
342     signal_end_of_usage_aggregator_operation(storage_map_reference_type map_record_ref)
343         : aggregator_operation(aggregator_operation::op_type::signal_end_of_usage),
344           my_map_record_ref(map_record_ref) {}
345 
346     void handle(lru_cache_type& lru_cache_ref) {
347         lru_cache_ref.signal_end_of_usage_serial(my_map_record_ref);
348     }
349 };
350 
351 // TODO: if we have guarantees that KeyToValFunctorT always have
352 //       ValT as a return type and KeyT as an argument type
353 //       we can deduce template parameters of concurrent_lru_cache
354 //       by pattern matching on KeyToValFunctorT
355 
356 } // namespace d1
357 } // namespace detail
358 
359 inline namespace v1 {
360 
361 using detail::d1::concurrent_lru_cache;
362 
363 } // inline namespace v1
364 } // namespace tbb
365 
366 #endif // __TBB_concurrent_lru_cache_H
367