1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef coarse_grained_raii_lru_cache_H
18 #define coarse_grained_raii_lru_cache_H
19 
20 #include <map>
21 #include <list>
22 #include <utility>
23 #include <algorithm>
24 
25 #include "tbb/spin_mutex.h"
26 #include "tbb/tbb_stddef.h"
27 template <typename key_type, typename value_type, typename value_functor_type = value_type (*)(key_type) >
28 class coarse_grained_raii_lru_cache : tbb::internal::no_assign{
29     typedef value_functor_type value_function_type;
30 
31     typedef std::size_t ref_counter_type;
32     struct map_value_type;
33     typedef std::map<key_type, map_value_type> map_storage_type;
34     typedef std::list<typename map_storage_type::iterator> lru_list_type;
35     struct map_value_type {
36         value_type my_value;
37         ref_counter_type my_ref_counter;
38         typename lru_list_type::iterator my_lru_list_iterator;
39         bool my_is_ready;
40 
map_value_typemap_value_type41         map_value_type (value_type const& a_value,  ref_counter_type a_ref_counter,    typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready)
42             : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator)
43             ,my_is_ready(a_is_ready)
44         {}
45     };
46 
47     class handle_object;
48 public:
49     typedef handle_object handle;
50 
coarse_grained_raii_lru_cache(value_function_type f,std::size_t number_of_lru_history_items)51     coarse_grained_raii_lru_cache(value_function_type f, std::size_t number_of_lru_history_items): my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items){}
52     handle_object operator[](key_type k){
53         tbb::spin_mutex::scoped_lock lock(my_mutex);
54         bool is_new_value_needed = false;
55         typename map_storage_type::iterator it = my_map_storage.find(k);
56         if (it == my_map_storage.end()){
57             it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false)));
58             is_new_value_needed = true;
59         }else {
60             typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator;
61             if (list_it!=my_lru_list.end()) {
62                 my_lru_list.erase(list_it);
63                 it->second.my_lru_list_iterator= my_lru_list.end();
64             }
65         }
66         typename map_storage_type::reference value_ref = *it;
67         //increase ref count
68         ++(value_ref.second.my_ref_counter);
69         if (is_new_value_needed){
70             lock.release();
71             value_ref.second.my_value = my_value_function(k);
72             __TBB_store_with_release(value_ref.second.my_is_ready, true);
73 
74         }else{
75             if (!value_ref.second.my_is_ready){
76                 lock.release();
77                 tbb::internal::spin_wait_while_eq(value_ref.second.my_is_ready,false);
78             }
79         }
80         return handle_object(*this,(value_ref));
81     }
82 private:
signal_end_of_usage(typename map_storage_type::reference value_ref)83     void signal_end_of_usage(typename map_storage_type::reference value_ref){
84         tbb::spin_mutex::scoped_lock lock(my_mutex);
85         typename map_storage_type::iterator it = my_map_storage.find(value_ref.first);
86         __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world");
87         __TBB_ASSERT(&(*it) == &value_ref,"dangling reference has been returned to outside world? data race ?");
88         __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it),
89                 "object in use should not be in list of unused objects ");
90         if (! --(it->second.my_ref_counter)){ //decrease ref count, and check if it was the last reference
91             if (my_lru_list.size()>=my_number_of_lru_history_items){
92                 size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items;
93                 for (size_t i=0; i<number_of_elements_to_evict; ++i){
94                     typename map_storage_type::iterator it_to_evict = my_lru_list.back();
95                     my_lru_list.pop_back();
96                     my_map_storage.erase(it_to_evict);
97                 }
98             }
99             my_lru_list.push_front(it);
100             it->second.my_lru_list_iterator = my_lru_list.begin();
101         }
102     }
103 private:
104     value_function_type my_value_function;
105     std::size_t const my_number_of_lru_history_items;
106     map_storage_type my_map_storage;
107     lru_list_type my_lru_list;
108     tbb::spin_mutex my_mutex;
109 private:
110     struct handle_move_t:tbb::internal::no_assign{
111         coarse_grained_raii_lru_cache & my_cache_ref;
112         typename map_storage_type::reference my_value_ref;
handle_move_thandle_move_t113         handle_move_t(coarse_grained_raii_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_value_ref(value_ref) {};
114     };
115     class handle_object {
116         coarse_grained_raii_lru_cache * my_cache_pointer;
117         typename map_storage_type::reference my_value_ref;
118     public:
handle_object(coarse_grained_raii_lru_cache & cache_ref,typename map_storage_type::reference value_ref)119         handle_object(coarse_grained_raii_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_value_ref(value_ref) {}
handle_object(handle_move_t m)120         handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_value_ref(m.my_value_ref){}
handle_move_t()121         operator handle_move_t(){ return move(*this);}
value()122         value_type& value(){return my_value_ref.second.my_value;}
~handle_object()123         ~handle_object(){
124             if (my_cache_pointer){
125                 my_cache_pointer->signal_end_of_usage(my_value_ref);
126             }
127         }
128     private:
move(handle_object & h)129         friend handle_move_t move(handle_object& h){
130             return handle_object::move(h);
131         }
move(handle_object & h)132         static handle_move_t move(handle_object& h){
133             __TBB_ASSERT(h.my_cache_pointer,"move from the same object twice ?");
134             coarse_grained_raii_lru_cache * cache_pointer = NULL;
135             std::swap(cache_pointer,h.my_cache_pointer);
136             return handle_move_t(*cache_pointer,h.my_value_ref);
137         }
138     private:
139         void operator=(handle_object&);
140         handle_object(handle_object &);
141     };
142 };
143 #endif //coarse_grained_raii_lru_cache_H
144