1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4
5 #include <linux/dma-buf.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9
10 #include <drm/panthor_drm.h>
11
12 #include "panthor_device.h"
13 #include "panthor_gem.h"
14 #include "panthor_mmu.h"
15
panthor_gem_free_object(struct drm_gem_object * obj)16 static void panthor_gem_free_object(struct drm_gem_object *obj)
17 {
18 struct panthor_gem_object *bo = to_panthor_bo(obj);
19 struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
20
21 drm_gem_free_mmap_offset(&bo->base.base);
22 mutex_destroy(&bo->gpuva_list_lock);
23 drm_gem_shmem_free(&bo->base);
24 drm_gem_object_put(vm_root_gem);
25 }
26
27 /**
28 * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
29 * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
30 * is skipped.
31 */
panthor_kernel_bo_destroy(struct panthor_kernel_bo * bo)32 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
33 {
34 struct panthor_vm *vm;
35 int ret;
36
37 if (IS_ERR_OR_NULL(bo))
38 return;
39
40 vm = bo->vm;
41 panthor_kernel_bo_vunmap(bo);
42
43 if (drm_WARN_ON(bo->obj->dev,
44 to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
45 goto out_free_bo;
46
47 ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
48 if (ret)
49 goto out_free_bo;
50
51 panthor_vm_free_va(vm, &bo->va_node);
52 drm_gem_object_put(bo->obj);
53
54 out_free_bo:
55 panthor_vm_put(vm);
56 kfree(bo);
57 }
58
59 /**
60 * panthor_kernel_bo_create() - Create and map a GEM object to a VM
61 * @ptdev: Device.
62 * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
63 * @size: Size of the buffer object.
64 * @bo_flags: Combination of drm_panthor_bo_flags flags.
65 * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
66 * that are related to map operations).
67 * @gpu_va: GPU address assigned when mapping to the VM.
68 * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
69 * automatically allocated.
70 *
71 * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
72 */
73 struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device * ptdev,struct panthor_vm * vm,size_t size,u32 bo_flags,u32 vm_map_flags,u64 gpu_va)74 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
75 size_t size, u32 bo_flags, u32 vm_map_flags,
76 u64 gpu_va)
77 {
78 struct drm_gem_shmem_object *obj;
79 struct panthor_kernel_bo *kbo;
80 struct panthor_gem_object *bo;
81 int ret;
82
83 if (drm_WARN_ON(&ptdev->base, !vm))
84 return ERR_PTR(-EINVAL);
85
86 kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
87 if (!kbo)
88 return ERR_PTR(-ENOMEM);
89
90 obj = drm_gem_shmem_create(&ptdev->base, size);
91 if (IS_ERR(obj)) {
92 ret = PTR_ERR(obj);
93 goto err_free_bo;
94 }
95
96 bo = to_panthor_bo(&obj->base);
97 kbo->obj = &obj->base;
98 bo->flags = bo_flags;
99
100 /* The system and GPU MMU page size might differ, which becomes a
101 * problem for FW sections that need to be mapped at explicit address
102 * since our PAGE_SIZE alignment might cover a VA range that's
103 * expected to be used for another section.
104 * Make sure we never map more than we need.
105 */
106 size = ALIGN(size, panthor_vm_page_size(vm));
107 ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
108 if (ret)
109 goto err_put_obj;
110
111 ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
112 if (ret)
113 goto err_free_va;
114
115 kbo->vm = panthor_vm_get(vm);
116 bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
117 drm_gem_object_get(bo->exclusive_vm_root_gem);
118 bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
119 return kbo;
120
121 err_free_va:
122 panthor_vm_free_va(vm, &kbo->va_node);
123
124 err_put_obj:
125 drm_gem_object_put(&obj->base);
126
127 err_free_bo:
128 kfree(kbo);
129 return ERR_PTR(ret);
130 }
131
panthor_gem_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)132 static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
133 {
134 struct panthor_gem_object *bo = to_panthor_bo(obj);
135
136 /* Don't allow mmap on objects that have the NO_MMAP flag set. */
137 if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
138 return -EINVAL;
139
140 return drm_gem_shmem_object_mmap(obj, vma);
141 }
142
143 static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object * obj,int flags)144 panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
145 {
146 /* We can't export GEMs that have an exclusive VM. */
147 if (to_panthor_bo(obj)->exclusive_vm_root_gem)
148 return ERR_PTR(-EINVAL);
149
150 return drm_gem_prime_export(obj, flags);
151 }
152
153 static const struct drm_gem_object_funcs panthor_gem_funcs = {
154 .free = panthor_gem_free_object,
155 .print_info = drm_gem_shmem_object_print_info,
156 .pin = drm_gem_shmem_object_pin,
157 .unpin = drm_gem_shmem_object_unpin,
158 .get_sg_table = drm_gem_shmem_object_get_sg_table,
159 .vmap = drm_gem_shmem_object_vmap,
160 .vunmap = drm_gem_shmem_object_vunmap,
161 .mmap = panthor_gem_mmap,
162 .export = panthor_gem_prime_export,
163 .vm_ops = &drm_gem_shmem_vm_ops,
164 };
165
166 /**
167 * panthor_gem_create_object - Implementation of driver->gem_create_object.
168 * @ddev: DRM device
169 * @size: Size in bytes of the memory the object will reference
170 *
171 * This lets the GEM helpers allocate object structs for us, and keep
172 * our BO stats correct.
173 */
panthor_gem_create_object(struct drm_device * ddev,size_t size)174 struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
175 {
176 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
177 struct panthor_gem_object *obj;
178
179 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
180 if (!obj)
181 return ERR_PTR(-ENOMEM);
182
183 obj->base.base.funcs = &panthor_gem_funcs;
184 obj->base.map_wc = !ptdev->coherent;
185 mutex_init(&obj->gpuva_list_lock);
186 drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
187
188 return &obj->base.base;
189 }
190
191 /**
192 * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
193 * @file: DRM file.
194 * @ddev: DRM device.
195 * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
196 * @size: Size of the GEM object to allocate.
197 * @flags: Combination of drm_panthor_bo_flags flags.
198 * @handle: Pointer holding the handle pointing to the new GEM object.
199 *
200 * Return: Zero on success
201 */
202 int
panthor_gem_create_with_handle(struct drm_file * file,struct drm_device * ddev,struct panthor_vm * exclusive_vm,u64 * size,u32 flags,u32 * handle)203 panthor_gem_create_with_handle(struct drm_file *file,
204 struct drm_device *ddev,
205 struct panthor_vm *exclusive_vm,
206 u64 *size, u32 flags, u32 *handle)
207 {
208 int ret;
209 struct drm_gem_shmem_object *shmem;
210 struct panthor_gem_object *bo;
211
212 shmem = drm_gem_shmem_create(ddev, *size);
213 if (IS_ERR(shmem))
214 return PTR_ERR(shmem);
215
216 bo = to_panthor_bo(&shmem->base);
217 bo->flags = flags;
218
219 if (exclusive_vm) {
220 bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
221 drm_gem_object_get(bo->exclusive_vm_root_gem);
222 bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
223 }
224
225 /*
226 * Allocate an id of idr table where the obj is registered
227 * and handle has the id what user can see.
228 */
229 ret = drm_gem_handle_create(file, &shmem->base, handle);
230 if (!ret)
231 *size = bo->base.base.size;
232
233 /* drop reference from allocate - handle holds it now. */
234 drm_gem_object_put(&shmem->base);
235
236 return ret;
237 }
238