1 /*
2  * Copyright © 2022 Imagination Technologies Ltd.
3  *
4  * based in part on anv driver which is:
5  * Copyright © 2015 Intel Corporation
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24  * SOFTWARE.
25  */
26 
27 #include <string.h>
28 
29 #include "pvr_device_info.h"
30 #include "pvr_private.h"
31 #include "util/blob.h"
32 #include "vk_log.h"
33 #include "vk_object.h"
34 #include "vulkan/util/vk_util.h"
35 
pvr_pipeline_cache_load(struct pvr_pipeline_cache * cache,const void * data,size_t size)36 static void pvr_pipeline_cache_load(struct pvr_pipeline_cache *cache,
37                                     const void *data,
38                                     size_t size)
39 {
40    struct pvr_device *device = cache->device;
41    struct pvr_physical_device *pdevice = device->pdevice;
42    struct vk_pipeline_cache_header header;
43    struct blob_reader blob;
44 
45    blob_reader_init(&blob, data, size);
46 
47    blob_copy_bytes(&blob, &header, sizeof(header));
48    if (blob.overrun)
49       return;
50 
51    if (header.header_size < sizeof(header))
52       return;
53    if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
54       return;
55    if (header.vendor_id != VK_VENDOR_ID_IMAGINATION)
56       return;
57    if (header.device_id != pdevice->dev_info.ident.device_id)
58       return;
59    if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
60       return;
61 
62    /* TODO: There isn't currently any cached data so there's nothing to load
63     * at this point. Once there is something to load then load it now.
64     */
65 }
66 
pvr_CreatePipelineCache(VkDevice _device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)67 VkResult pvr_CreatePipelineCache(VkDevice _device,
68                                  const VkPipelineCacheCreateInfo *pCreateInfo,
69                                  const VkAllocationCallbacks *pAllocator,
70                                  VkPipelineCache *pPipelineCache)
71 {
72    PVR_FROM_HANDLE(pvr_device, device, _device);
73    struct pvr_pipeline_cache *cache;
74 
75    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
76    assert(pCreateInfo->flags == 0);
77 
78    cache = vk_object_alloc(&device->vk,
79                            pAllocator,
80                            sizeof(*cache),
81                            VK_OBJECT_TYPE_PIPELINE_CACHE);
82    if (!cache)
83       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
84 
85    cache->device = device;
86 
87    if (pCreateInfo->initialDataSize > 0) {
88       pvr_pipeline_cache_load(cache,
89                               pCreateInfo->pInitialData,
90                               pCreateInfo->initialDataSize);
91    }
92 
93    *pPipelineCache = pvr_pipeline_cache_to_handle(cache);
94 
95    return VK_SUCCESS;
96 }
97 
pvr_DestroyPipelineCache(VkDevice _device,VkPipelineCache _cache,const VkAllocationCallbacks * pAllocator)98 void pvr_DestroyPipelineCache(VkDevice _device,
99                               VkPipelineCache _cache,
100                               const VkAllocationCallbacks *pAllocator)
101 {
102    PVR_FROM_HANDLE(pvr_device, device, _device);
103    PVR_FROM_HANDLE(pvr_pipeline_cache, cache, _cache);
104 
105    if (!cache)
106       return;
107 
108    vk_object_free(&device->vk, pAllocator, cache);
109 }
110 
pvr_GetPipelineCacheData(VkDevice _device,VkPipelineCache _cache,size_t * pDataSize,void * pData)111 VkResult pvr_GetPipelineCacheData(VkDevice _device,
112                                   VkPipelineCache _cache,
113                                   size_t *pDataSize,
114                                   void *pData)
115 {
116    PVR_FROM_HANDLE(pvr_device, device, _device);
117    struct pvr_physical_device *pdevice = device->pdevice;
118    struct blob blob;
119 
120    if (pData)
121       blob_init_fixed(&blob, pData, *pDataSize);
122    else
123       blob_init_fixed(&blob, NULL, SIZE_MAX);
124 
125    struct vk_pipeline_cache_header header = {
126       .header_size = sizeof(struct vk_pipeline_cache_header),
127       .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
128       .vendor_id = VK_VENDOR_ID_IMAGINATION,
129       .device_id = pdevice->dev_info.ident.device_id,
130    };
131    memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
132    blob_write_bytes(&blob, &header, sizeof(header));
133 
134    /* TODO: Once there's some data to cache then this should be written to
135     * 'blob'.
136     */
137 
138    *pDataSize = blob.size;
139 
140    blob_finish(&blob);
141 
142    return VK_SUCCESS;
143 }
144 
pvr_MergePipelineCaches(VkDevice _device,VkPipelineCache destCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)145 VkResult pvr_MergePipelineCaches(VkDevice _device,
146                                  VkPipelineCache destCache,
147                                  uint32_t srcCacheCount,
148                                  const VkPipelineCache *pSrcCaches)
149 {
150    /* TODO: Once there's some data to cache then this will need to be able to
151     * merge caches together.
152     */
153 
154    return VK_SUCCESS;
155 }
156