1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifdef ENABLE_SHADER_CACHE
25 
26 #include <ctype.h>
27 #include <ftw.h>
28 #include <string.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <sys/file.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/mman.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <dirent.h>
38 #include <inttypes.h>
39 
40 #include "util/crc32.h"
41 #include "util/debug.h"
42 #include "util/rand_xor.h"
43 #include "util/u_atomic.h"
44 #include "util/mesa-sha1.h"
45 #include "util/ralloc.h"
46 #include "util/compiler.h"
47 
48 #include "disk_cache.h"
49 #include "disk_cache_os.h"
50 
51 /* The cache version should be bumped whenever a change is made to the
52  * structure of cache entries or the index. This will give any 3rd party
53  * applications reading the cache entries a chance to adjust to the changes.
54  *
55  * - The cache version is checked internally when reading a cache entry. If we
56  *   ever have a mismatch we are in big trouble as this means we had a cache
57  *   collision. In case of such an event please check the skys for giant
58  *   asteroids and that the entire Mesa team hasn't been eaten by wolves.
59  *
60  * - There is no strict requirement that cache versions be backwards
61  *   compatible but effort should be taken to limit disruption where possible.
62  */
63 #define CACHE_VERSION 1
64 
65 #define DRV_KEY_CPY(_dst, _src, _src_size) \
66 do {                                       \
67    memcpy(_dst, _src, _src_size);          \
68    _dst += _src_size;                      \
69 } while (0);
70 
71 struct disk_cache *
disk_cache_create(const char * gpu_name,const char * driver_id,uint64_t driver_flags)72 disk_cache_create(const char *gpu_name, const char *driver_id,
73                   uint64_t driver_flags)
74 {
75    void *local;
76    struct disk_cache *cache = NULL;
77    char *max_size_str;
78    uint64_t max_size;
79 
80    uint8_t cache_version = CACHE_VERSION;
81    size_t cv_size = sizeof(cache_version);
82 
83    if (!disk_cache_enabled())
84       return NULL;
85 
86    /* A ralloc context for transient data during this invocation. */
87    local = ralloc_context(NULL);
88    if (local == NULL)
89       goto fail;
90 
91    cache = rzalloc(NULL, struct disk_cache);
92    if (cache == NULL)
93       goto fail;
94 
95    /* Assume failure. */
96    cache->path_init_failed = true;
97 
98 #ifdef ANDROID
99    /* Android needs the "disk cache" to be enabled for
100     * EGL_ANDROID_blob_cache's callbacks to be called, but it doesn't actually
101     * want any storing to disk to happen inside of the driver.
102     */
103    goto path_fail;
104 #endif
105 
106    char *path = disk_cache_generate_cache_dir(local, gpu_name, driver_id);
107    if (!path)
108       goto path_fail;
109 
110    cache->path = ralloc_strdup(cache, path);
111    if (cache->path == NULL)
112       goto path_fail;
113 
114    if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false)) {
115       if (!disk_cache_load_cache_index(local, cache))
116          goto path_fail;
117    }
118 
119    if (!disk_cache_mmap_cache_index(local, cache, path))
120       goto path_fail;
121 
122    max_size = 0;
123 
124    max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE");
125 
126    #ifdef MESA_GLSL_CACHE_MAX_SIZE
127    if( !max_size_str ) {
128       max_size_str = MESA_GLSL_CACHE_MAX_SIZE;
129    }
130    #endif
131 
132    if (max_size_str) {
133       char *end;
134       max_size = strtoul(max_size_str, &end, 10);
135       if (end == max_size_str) {
136          max_size = 0;
137       } else {
138          switch (*end) {
139          case 'K':
140          case 'k':
141             max_size *= 1024;
142             break;
143          case 'M':
144          case 'm':
145             max_size *= 1024*1024;
146             break;
147          case '\0':
148          case 'G':
149          case 'g':
150          default:
151             max_size *= 1024*1024*1024;
152             break;
153          }
154       }
155    }
156 
157    /* Default to 1GB for maximum cache size. */
158    if (max_size == 0) {
159       max_size = 1024*1024*1024;
160    }
161 
162    cache->max_size = max_size;
163 
164    /* 4 threads were chosen below because just about all modern CPUs currently
165     * available that run Mesa have *at least* 4 cores. For these CPUs allowing
166     * more threads can result in the queue being processed faster, thus
167     * avoiding excessive memory use due to a backlog of cache entrys building
168     * up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
169     * flag this should have little negative impact on low core systems.
170     *
171     * The queue will resize automatically when it's full, so adding new jobs
172     * doesn't stall.
173     */
174    if (!util_queue_init(&cache->cache_queue, "disk$", 32, 4,
175                         UTIL_QUEUE_INIT_SCALE_THREADS |
176                         UTIL_QUEUE_INIT_RESIZE_IF_FULL |
177                         UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
178                         UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY, NULL))
179       goto fail;
180 
181    cache->path_init_failed = false;
182 
183  path_fail:
184 
185    cache->driver_keys_blob_size = cv_size;
186 
187    /* Create driver id keys */
188    size_t id_size = strlen(driver_id) + 1;
189    size_t gpu_name_size = strlen(gpu_name) + 1;
190    cache->driver_keys_blob_size += id_size;
191    cache->driver_keys_blob_size += gpu_name_size;
192 
193    /* We sometimes store entire structs that contains a pointers in the cache,
194     * use pointer size as a key to avoid hard to debug issues.
195     */
196    uint8_t ptr_size = sizeof(void *);
197    size_t ptr_size_size = sizeof(ptr_size);
198    cache->driver_keys_blob_size += ptr_size_size;
199 
200    size_t driver_flags_size = sizeof(driver_flags);
201    cache->driver_keys_blob_size += driver_flags_size;
202 
203    cache->driver_keys_blob =
204       ralloc_size(cache, cache->driver_keys_blob_size);
205    if (!cache->driver_keys_blob)
206       goto fail;
207 
208    uint8_t *drv_key_blob = cache->driver_keys_blob;
209    DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size)
210    DRV_KEY_CPY(drv_key_blob, driver_id, id_size)
211    DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size)
212    DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size)
213    DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size)
214 
215    /* Seed our rand function */
216    s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
217 
218    ralloc_free(local);
219 
220    return cache;
221 
222  fail:
223    if (cache)
224       ralloc_free(cache);
225    ralloc_free(local);
226 
227    return NULL;
228 }
229 
230 void
disk_cache_destroy(struct disk_cache * cache)231 disk_cache_destroy(struct disk_cache *cache)
232 {
233    if (cache && !cache->path_init_failed) {
234       util_queue_finish(&cache->cache_queue);
235       util_queue_destroy(&cache->cache_queue);
236 
237       if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false))
238          foz_destroy(&cache->foz_db);
239 
240       disk_cache_destroy_mmap(cache);
241    }
242 
243    ralloc_free(cache);
244 }
245 
246 void
disk_cache_wait_for_idle(struct disk_cache * cache)247 disk_cache_wait_for_idle(struct disk_cache *cache)
248 {
249    util_queue_finish(&cache->cache_queue);
250 }
251 
252 void
disk_cache_remove(struct disk_cache * cache,const cache_key key)253 disk_cache_remove(struct disk_cache *cache, const cache_key key)
254 {
255    char *filename = disk_cache_get_cache_filename(cache, key);
256    if (filename == NULL) {
257       return;
258    }
259 
260    disk_cache_evict_item(cache, filename);
261 }
262 
263 static struct disk_cache_put_job *
create_put_job(struct disk_cache * cache,const cache_key key,void * data,size_t size,struct cache_item_metadata * cache_item_metadata,bool take_ownership)264 create_put_job(struct disk_cache *cache, const cache_key key,
265                void *data, size_t size,
266                struct cache_item_metadata *cache_item_metadata,
267                bool take_ownership)
268 {
269    struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *)
270       malloc(sizeof(struct disk_cache_put_job) + (take_ownership ? 0 : size));
271 
272    if (dc_job) {
273       dc_job->cache = cache;
274       memcpy(dc_job->key, key, sizeof(cache_key));
275       if (take_ownership) {
276          dc_job->data = data;
277       } else {
278          dc_job->data = dc_job + 1;
279          memcpy(dc_job->data, data, size);
280       }
281       dc_job->size = size;
282 
283       /* Copy the cache item metadata */
284       if (cache_item_metadata) {
285          dc_job->cache_item_metadata.type = cache_item_metadata->type;
286          if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) {
287             dc_job->cache_item_metadata.num_keys =
288                cache_item_metadata->num_keys;
289             dc_job->cache_item_metadata.keys = (cache_key *)
290                malloc(cache_item_metadata->num_keys * sizeof(cache_key));
291 
292             if (!dc_job->cache_item_metadata.keys)
293                goto fail;
294 
295             memcpy(dc_job->cache_item_metadata.keys,
296                    cache_item_metadata->keys,
297                    sizeof(cache_key) * cache_item_metadata->num_keys);
298          }
299       } else {
300          dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN;
301          dc_job->cache_item_metadata.keys = NULL;
302       }
303    }
304 
305    return dc_job;
306 
307 fail:
308    free(dc_job);
309 
310    return NULL;
311 }
312 
313 static void
destroy_put_job(void * job,void * gdata,int thread_index)314 destroy_put_job(void *job, void *gdata, int thread_index)
315 {
316    if (job) {
317       struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
318       free(dc_job->cache_item_metadata.keys);
319       free(job);
320    }
321 }
322 
323 static void
destroy_put_job_nocopy(void * job,void * gdata,int thread_index)324 destroy_put_job_nocopy(void *job, void *gdata, int thread_index)
325 {
326    struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
327    free(dc_job->data);
328    destroy_put_job(job, gdata, thread_index);
329 }
330 
331 static void
cache_put(void * job,void * gdata,int thread_index)332 cache_put(void *job, void *gdata, int thread_index)
333 {
334    assert(job);
335 
336    unsigned i = 0;
337    char *filename = NULL;
338    struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
339 
340    if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false)) {
341       disk_cache_write_item_to_disk_foz(dc_job);
342    } else {
343       filename = disk_cache_get_cache_filename(dc_job->cache, dc_job->key);
344       if (filename == NULL)
345          goto done;
346 
347       /* If the cache is too large, evict something else first. */
348       while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size &&
349              i < 8) {
350          disk_cache_evict_lru_item(dc_job->cache);
351          i++;
352       }
353 
354       disk_cache_write_item_to_disk(dc_job, filename);
355 
356 done:
357       free(filename);
358    }
359 }
360 
361 void
disk_cache_put(struct disk_cache * cache,const cache_key key,const void * data,size_t size,struct cache_item_metadata * cache_item_metadata)362 disk_cache_put(struct disk_cache *cache, const cache_key key,
363                const void *data, size_t size,
364                struct cache_item_metadata *cache_item_metadata)
365 {
366    if (cache->blob_put_cb) {
367       cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size);
368       return;
369    }
370 
371    if (cache->path_init_failed)
372       return;
373 
374    struct disk_cache_put_job *dc_job =
375       create_put_job(cache, key, (void*)data, size, cache_item_metadata, false);
376 
377    if (dc_job) {
378       util_queue_fence_init(&dc_job->fence);
379       util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
380                          cache_put, destroy_put_job, dc_job->size);
381    }
382 }
383 
384 void
disk_cache_put_nocopy(struct disk_cache * cache,const cache_key key,void * data,size_t size,struct cache_item_metadata * cache_item_metadata)385 disk_cache_put_nocopy(struct disk_cache *cache, const cache_key key,
386                       void *data, size_t size,
387                       struct cache_item_metadata *cache_item_metadata)
388 {
389    if (cache->blob_put_cb) {
390       cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size);
391       free(data);
392       return;
393    }
394 
395    if (cache->path_init_failed) {
396       free(data);
397       return;
398    }
399 
400    struct disk_cache_put_job *dc_job =
401       create_put_job(cache, key, data, size, cache_item_metadata, true);
402 
403    if (dc_job) {
404       util_queue_fence_init(&dc_job->fence);
405       util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
406                          cache_put, destroy_put_job_nocopy, dc_job->size);
407    }
408 }
409 
410 void *
disk_cache_get(struct disk_cache * cache,const cache_key key,size_t * size)411 disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
412 {
413    if (size)
414       *size = 0;
415 
416    if (cache->blob_get_cb) {
417       /* This is what Android EGL defines as the maxValueSize in egl_cache_t
418        * class implementation.
419        */
420       const signed long max_blob_size = 64 * 1024;
421       void *blob = malloc(max_blob_size);
422       if (!blob)
423          return NULL;
424 
425       signed long bytes =
426          cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size);
427 
428       if (!bytes) {
429          free(blob);
430          return NULL;
431       }
432 
433       if (size)
434          *size = bytes;
435       return blob;
436    }
437 
438    if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false)) {
439       return disk_cache_load_item_foz(cache, key, size);
440    } else {
441       char *filename = disk_cache_get_cache_filename(cache, key);
442       if (filename == NULL)
443          return NULL;
444 
445       return disk_cache_load_item(cache, filename, size);
446    }
447 }
448 
449 void
disk_cache_put_key(struct disk_cache * cache,const cache_key key)450 disk_cache_put_key(struct disk_cache *cache, const cache_key key)
451 {
452    const uint32_t *key_chunk = (const uint32_t *) key;
453    int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
454    unsigned char *entry;
455 
456    if (cache->blob_put_cb) {
457       cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t));
458       return;
459    }
460 
461    if (cache->path_init_failed)
462       return;
463 
464    entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
465 
466    memcpy(entry, key, CACHE_KEY_SIZE);
467 }
468 
469 /* This function lets us test whether a given key was previously
470  * stored in the cache with disk_cache_put_key(). The implement is
471  * efficient by not using syscalls or hitting the disk. It's not
472  * race-free, but the races are benign. If we race with someone else
473  * calling disk_cache_put_key, then that's just an extra cache miss and an
474  * extra recompile.
475  */
476 bool
disk_cache_has_key(struct disk_cache * cache,const cache_key key)477 disk_cache_has_key(struct disk_cache *cache, const cache_key key)
478 {
479    const uint32_t *key_chunk = (const uint32_t *) key;
480    int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
481    unsigned char *entry;
482 
483    if (cache->blob_get_cb) {
484       uint32_t blob;
485       return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t));
486    }
487 
488    if (cache->path_init_failed)
489       return false;
490 
491    entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
492 
493    return memcmp(entry, key, CACHE_KEY_SIZE) == 0;
494 }
495 
496 void
disk_cache_compute_key(struct disk_cache * cache,const void * data,size_t size,cache_key key)497 disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
498                        cache_key key)
499 {
500    struct mesa_sha1 ctx;
501 
502    _mesa_sha1_init(&ctx);
503    _mesa_sha1_update(&ctx, cache->driver_keys_blob,
504                      cache->driver_keys_blob_size);
505    _mesa_sha1_update(&ctx, data, size);
506    _mesa_sha1_final(&ctx, key);
507 }
508 
509 void
disk_cache_set_callbacks(struct disk_cache * cache,disk_cache_put_cb put,disk_cache_get_cb get)510 disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
511                          disk_cache_get_cb get)
512 {
513    cache->blob_put_cb = put;
514    cache->blob_get_cb = get;
515 }
516 
517 #endif /* ENABLE_SHADER_CACHE */
518