1 /*
2 This file is part of darktable,
3 Copyright (C) 2011-2021 darktable developers.
4
5 darktable is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
9
10 darktable is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with darktable. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "config.h"
20
21 #include "common/cache.h"
22 #include "common/darktable.h"
23 #include "common/dtpthread.h"
24
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 // this implements a concurrent LRU cache
31
dt_cache_init(dt_cache_t * cache,size_t entry_size,size_t cost_quota)32 void dt_cache_init(
33 dt_cache_t *cache,
34 size_t entry_size,
35 size_t cost_quota)
36 {
37 cache->cost = 0;
38 cache->lru = 0;
39 cache->entry_size = entry_size;
40 cache->cost_quota = cost_quota;
41 dt_pthread_mutex_init(&cache->lock, 0);
42 cache->allocate = 0;
43 cache->allocate_data = 0;
44 cache->cleanup = 0;
45 cache->cleanup_data = 0;
46 cache->hashtable = g_hash_table_new(0, 0);
47 }
48
dt_cache_cleanup(dt_cache_t * cache)49 void dt_cache_cleanup(dt_cache_t *cache)
50 {
51 g_hash_table_destroy(cache->hashtable);
52 for(GList *l = cache->lru; l; l = g_list_next(l))
53 {
54 dt_cache_entry_t *entry = (dt_cache_entry_t *)l->data;
55
56 if(cache->cleanup)
57 {
58 assert(entry->data_size);
59 ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);
60
61 cache->cleanup(cache->cleanup_data, entry);
62 }
63 else
64 dt_free_align(entry->data);
65
66 dt_pthread_rwlock_destroy(&entry->lock);
67 g_slice_free1(sizeof(*entry), entry);
68 }
69 g_list_free(cache->lru);
70 dt_pthread_mutex_destroy(&cache->lock);
71 }
72
dt_cache_contains(dt_cache_t * cache,const uint32_t key)73 int32_t dt_cache_contains(dt_cache_t *cache, const uint32_t key)
74 {
75 dt_pthread_mutex_lock(&cache->lock);
76 int32_t result = g_hash_table_contains(cache->hashtable, GINT_TO_POINTER(key));
77 dt_pthread_mutex_unlock(&cache->lock);
78 return result;
79 }
80
dt_cache_for_all(dt_cache_t * cache,int (* process)(const uint32_t key,const void * data,void * user_data),void * user_data)81 int dt_cache_for_all(
82 dt_cache_t *cache,
83 int (*process)(const uint32_t key, const void *data, void *user_data),
84 void *user_data)
85 {
86 dt_pthread_mutex_lock(&cache->lock);
87 GHashTableIter iter;
88 gpointer key, value;
89
90 g_hash_table_iter_init (&iter, cache->hashtable);
91 while (g_hash_table_iter_next (&iter, &key, &value))
92 {
93 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
94 const int err = process(GPOINTER_TO_INT(key), entry->data, user_data);
95 if(err)
96 {
97 dt_pthread_mutex_unlock(&cache->lock);
98 return err;
99 }
100 }
101 dt_pthread_mutex_unlock(&cache->lock);
102 return 0;
103 }
104
105 // return read locked bucket, or NULL if it's not already there.
106 // never attempt to allocate a new slot.
dt_cache_testget(dt_cache_t * cache,const uint32_t key,char mode)107 dt_cache_entry_t *dt_cache_testget(dt_cache_t *cache, const uint32_t key, char mode)
108 {
109 gpointer orig_key, value;
110 gboolean res;
111 double start = dt_get_wtime();
112 dt_pthread_mutex_lock(&cache->lock);
113 res = g_hash_table_lookup_extended(
114 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
115 if(res)
116 {
117 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
118 // lock the cache entry
119 const int result
120 = (mode == 'w') ? dt_pthread_rwlock_trywrlock(&entry->lock) : dt_pthread_rwlock_tryrdlock(&entry->lock);
121 if(result)
122 { // need to give up mutex so other threads have a chance to get in between and
123 // free the lock we're trying to acquire:
124 dt_pthread_mutex_unlock(&cache->lock);
125 return 0;
126 }
127 // bubble up in lru list:
128 cache->lru = g_list_remove_link(cache->lru, entry->link);
129 cache->lru = g_list_concat(cache->lru, entry->link);
130 dt_pthread_mutex_unlock(&cache->lock);
131 double end = dt_get_wtime();
132 if(end - start > 0.1)
133 fprintf(stderr, "try+ wait time %.06fs mode %c \n", end - start, mode);
134
135 if(mode == 'w')
136 {
137 assert(entry->data_size);
138 ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
139 }
140
141 // WARNING: do *NOT* unpoison here. it must be done by the caller!
142
143 return entry;
144 }
145 dt_pthread_mutex_unlock(&cache->lock);
146 double end = dt_get_wtime();
147 if(end - start > 0.1)
148 fprintf(stderr, "try- wait time %.06fs\n", end - start);
149 return 0;
150 }
151
152 // if found, the data void* is returned. if not, it is set to be
153 // the given *data and a new hash table entry is created, which can be
154 // found using the given key later on.
dt_cache_get_with_caller(dt_cache_t * cache,const uint32_t key,char mode,const char * file,int line)155 dt_cache_entry_t *dt_cache_get_with_caller(dt_cache_t *cache, const uint32_t key, char mode, const char *file, int line)
156 {
157 gpointer orig_key, value;
158 gboolean res;
159 int result;
160 double start = dt_get_wtime();
161 restart:
162 dt_pthread_mutex_lock(&cache->lock);
163 res = g_hash_table_lookup_extended(
164 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
165 if(res)
166 { // yay, found. read lock and pass on.
167 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
168 if(mode == 'w') result = dt_pthread_rwlock_trywrlock_with_caller(&entry->lock, file, line);
169 else result = dt_pthread_rwlock_tryrdlock_with_caller(&entry->lock, file, line);
170 if(result)
171 { // need to give up mutex so other threads have a chance to get in between and
172 // free the lock we're trying to acquire:
173 dt_pthread_mutex_unlock(&cache->lock);
174 g_usleep(5);
175 goto restart;
176 }
177 // bubble up in lru list:
178 cache->lru = g_list_remove_link(cache->lru, entry->link);
179 cache->lru = g_list_concat(cache->lru, entry->link);
180 dt_pthread_mutex_unlock(&cache->lock);
181
182 #ifdef _DEBUG
183 const pthread_t writer = dt_pthread_rwlock_get_writer(&entry->lock);
184 if(mode == 'w')
185 {
186 assert(pthread_equal(writer, pthread_self()));
187 }
188 else
189 {
190 assert(!pthread_equal(writer, pthread_self()));
191 }
192 #endif
193
194 if(mode == 'w')
195 {
196 assert(entry->data_size);
197 ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
198 }
199
200 // WARNING: do *NOT* unpoison here. it must be done by the caller!
201
202 return entry;
203 }
204
205 // else, not found, need to allocate.
206
207 // first try to clean up.
208 // also wait if we can't free more than the requested fill ratio.
209 if(cache->cost > 0.8f * cache->cost_quota)
210 {
211 // need to roll back all the way to get a consistent lock state:
212 dt_cache_gc(cache, 0.8f);
213 }
214
215 // here dies your 32-bit system:
216 dt_cache_entry_t *entry = (dt_cache_entry_t *)g_slice_alloc(sizeof(dt_cache_entry_t));
217 int ret = dt_pthread_rwlock_init(&entry->lock, 0);
218 if(ret) fprintf(stderr, "rwlock init: %d\n", ret);
219 entry->data = 0;
220 entry->data_size = cache->entry_size;
221 entry->cost = 1;
222 entry->link = g_list_append(0, entry);
223 entry->key = key;
224 entry->_lock_demoting = 0;
225
226 g_hash_table_insert(cache->hashtable, GINT_TO_POINTER(key), entry);
227
228 assert(cache->allocate || entry->data_size);
229
230 if(cache->allocate)
231 cache->allocate(cache->allocate_data, entry);
232 else
233 entry->data = dt_alloc_align(64, entry->data_size);
234
235 assert(entry->data_size);
236 ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
237
238 // if allocate callback is given, always return a write lock
239 const int write = ((mode == 'w') || cache->allocate);
240
241 // write lock in case the caller requests it:
242 if(write) dt_pthread_rwlock_wrlock_with_caller(&entry->lock, file, line);
243 else dt_pthread_rwlock_rdlock_with_caller(&entry->lock, file, line);
244
245 cache->cost += entry->cost;
246
247 // put at end of lru list (most recently used):
248 cache->lru = g_list_concat(cache->lru, entry->link);
249
250 dt_pthread_mutex_unlock(&cache->lock);
251 double end = dt_get_wtime();
252 if(end - start > 0.1)
253 fprintf(stderr, "wait time %.06fs\n", end - start);
254
255 // WARNING: do *NOT* unpoison here. it must be done by the caller!
256
257 return entry;
258 }
259
dt_cache_remove(dt_cache_t * cache,const uint32_t key)260 int dt_cache_remove(dt_cache_t *cache, const uint32_t key)
261 {
262 gpointer orig_key, value;
263 gboolean res;
264 int result;
265 dt_cache_entry_t *entry;
266 restart:
267 dt_pthread_mutex_lock(&cache->lock);
268
269 res = g_hash_table_lookup_extended(
270 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
271 entry = (dt_cache_entry_t *)value;
272 if(!res)
273 { // not found in cache, not deleting.
274 dt_pthread_mutex_unlock(&cache->lock);
275 return 1;
276 }
277 // need write lock to be able to delete:
278 result = dt_pthread_rwlock_trywrlock(&entry->lock);
279 if(result)
280 {
281 dt_pthread_mutex_unlock(&cache->lock);
282 g_usleep(5);
283 goto restart;
284 }
285
286 if(entry->_lock_demoting)
287 {
288 // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
289 dt_pthread_rwlock_unlock(&entry->lock);
290 dt_pthread_mutex_unlock(&cache->lock);
291 g_usleep(5);
292 goto restart;
293 }
294
295 gboolean removed = g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(key));
296 (void)removed; // make non-assert compile happy
297 assert(removed);
298 cache->lru = g_list_delete_link(cache->lru, entry->link);
299
300 if(cache->cleanup)
301 {
302 assert(entry->data_size);
303 ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);
304
305 cache->cleanup(cache->cleanup_data, entry);
306 }
307 else
308 dt_free_align(entry->data);
309
310 dt_pthread_rwlock_unlock(&entry->lock);
311 dt_pthread_rwlock_destroy(&entry->lock);
312 cache->cost -= entry->cost;
313 g_slice_free1(sizeof(*entry), entry);
314
315 dt_pthread_mutex_unlock(&cache->lock);
316 return 0;
317 }
318
319 // best-effort garbage collection. never blocks, never fails. well, sometimes it just doesn't free anything.
dt_cache_gc(dt_cache_t * cache,const float fill_ratio)320 void dt_cache_gc(dt_cache_t *cache, const float fill_ratio)
321 {
322 GList *l = cache->lru;
323 int cnt = 0;
324 while(l)
325 {
326 cnt++;
327 dt_cache_entry_t *entry = (dt_cache_entry_t *)l->data;
328 assert(entry->link->data == entry);
329 l = g_list_next(l); // we might remove this element, so walk to the next one while we still have the pointer..
330 if(cache->cost < cache->cost_quota * fill_ratio) break;
331
332 // if still locked by anyone else give up:
333 if(dt_pthread_rwlock_trywrlock(&entry->lock)) continue;
334
335 if(entry->_lock_demoting)
336 {
337 // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
338 dt_pthread_rwlock_unlock(&entry->lock);
339 continue;
340 }
341
342 // delete!
343 g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(entry->key));
344 cache->lru = g_list_delete_link(cache->lru, entry->link);
345 cache->cost -= entry->cost;
346
347 if(cache->cleanup)
348 {
349 assert(entry->data_size);
350 ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);
351
352 cache->cleanup(cache->cleanup_data, entry);
353 }
354 else
355 dt_free_align(entry->data);
356
357 dt_pthread_rwlock_unlock(&entry->lock);
358 dt_pthread_rwlock_destroy(&entry->lock);
359 g_slice_free1(sizeof(*entry), entry);
360 }
361 }
362
dt_cache_release_with_caller(dt_cache_t * cache,dt_cache_entry_t * entry,const char * file,int line)363 void dt_cache_release_with_caller(dt_cache_t *cache, dt_cache_entry_t *entry, const char *file, int line)
364 {
365 #if((__has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)) && 1)
366 // yes, this is *HIGHLY* unportable and is accessing implementation details.
367 #ifdef _DEBUG
368
369 # if defined(HAVE_THREAD_RWLOCK_ARCH_T_READERS)
370 if (entry->lock.lock.__data.__readers <= 1)
371 # elif defined(HAVE_THREAD_RWLOCK_ARCH_T_NR_READERS)
372 if (entry->lock.lock.__data.__nr_readers <= 1)
373 # else /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
374 # error "No valid reader member"
375 # endif /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
376
377 #else /* _DEBUG */
378
379 # if defined(HAVE_THREAD_RWLOCK_ARCH_T_READERS)
380 if (entry->lock.__data.__readers <= 1)
381 # elif defined(HAVE_THREAD_RWLOCK_ARCH_T_NR_READERS)
382 if(entry->lock.__data.__nr_readers <= 1)
383 # else /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
384 # error "No valid reader member"
385 # endif /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
386
387 #endif /* _DEBUG */
388 {
389 // only if there are no other reades we may poison.
390 assert(entry->data_size);
391 ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
392 }
393 #endif
394
395 dt_pthread_rwlock_unlock(&entry->lock);
396 }
397
398 // modelines: These editor modelines have been set for all relevant files by tools/update_modelines.sh
399 // vim: shiftwidth=2 expandtab tabstop=2 cindent
400 // kate: tab-indents: off; indent-width 2; replace-tabs on; indent-mode cstyle; remove-trailing-spaces modified;
401