xref: /linux/drivers/gpu/drm/panfrost/panfrost_gem.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 
4 #include <linux/err.h>
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
13 
14 /* Called DRM core on the last userspace/kernel unreference of the
15  * BO.
16  */
17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
18 {
19 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 	struct panfrost_device *pfdev = obj->dev->dev_private;
21 
22 	/*
23 	 * Make sure the BO is no longer inserted in the shrinker list before
24 	 * taking care of the destruction itself. If we don't do that we have a
25 	 * race condition between this function and what's done in
26 	 * panfrost_gem_shrinker_scan().
27 	 */
28 	mutex_lock(&pfdev->shrinker_lock);
29 	list_del_init(&bo->base.madv_list);
30 	mutex_unlock(&pfdev->shrinker_lock);
31 
32 	if (bo->sgts) {
33 		int i;
34 		int n_sgt = bo->base.base.size / SZ_2M;
35 
36 		for (i = 0; i < n_sgt; i++) {
37 			if (bo->sgts[i].sgl) {
38 				dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
39 					     bo->sgts[i].nents, DMA_BIDIRECTIONAL);
40 				sg_free_table(&bo->sgts[i]);
41 			}
42 		}
43 		kfree(bo->sgts);
44 	}
45 
46 	drm_gem_shmem_free_object(obj);
47 }
48 
49 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
50 {
51 	int ret;
52 	size_t size = obj->size;
53 	u64 align;
54 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
55 	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
56 	struct panfrost_file_priv *priv = file_priv->driver_priv;
57 
58 	/*
59 	 * Executable buffers cannot cross a 16MB boundary as the program
60 	 * counter is 24-bits. We assume executable buffers will be less than
61 	 * 16MB and aligning executable buffers to their size will avoid
62 	 * crossing a 16MB boundary.
63 	 */
64 	if (!bo->noexec)
65 		align = size >> PAGE_SHIFT;
66 	else
67 		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
68 
69 	bo->mmu = &priv->mmu;
70 	spin_lock(&priv->mm_lock);
71 	ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
72 					 size >> PAGE_SHIFT, align, color, 0);
73 	spin_unlock(&priv->mm_lock);
74 	if (ret)
75 		return ret;
76 
77 	if (!bo->is_heap) {
78 		ret = panfrost_mmu_map(bo);
79 		if (ret) {
80 			spin_lock(&priv->mm_lock);
81 			drm_mm_remove_node(&bo->node);
82 			spin_unlock(&priv->mm_lock);
83 		}
84 	}
85 	return ret;
86 }
87 
88 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
89 {
90 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
91 	struct panfrost_file_priv *priv = file_priv->driver_priv;
92 
93 	if (bo->is_mapped)
94 		panfrost_mmu_unmap(bo);
95 
96 	spin_lock(&priv->mm_lock);
97 	if (drm_mm_node_allocated(&bo->node))
98 		drm_mm_remove_node(&bo->node);
99 	spin_unlock(&priv->mm_lock);
100 }
101 
102 static int panfrost_gem_pin(struct drm_gem_object *obj)
103 {
104 	if (to_panfrost_bo(obj)->is_heap)
105 		return -EINVAL;
106 
107 	return drm_gem_shmem_pin(obj);
108 }
109 
110 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
111 	.free = panfrost_gem_free_object,
112 	.open = panfrost_gem_open,
113 	.close = panfrost_gem_close,
114 	.print_info = drm_gem_shmem_print_info,
115 	.pin = panfrost_gem_pin,
116 	.unpin = drm_gem_shmem_unpin,
117 	.get_sg_table = drm_gem_shmem_get_sg_table,
118 	.vmap = drm_gem_shmem_vmap,
119 	.vunmap = drm_gem_shmem_vunmap,
120 	.mmap = drm_gem_shmem_mmap,
121 };
122 
123 /**
124  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
125  * @dev: DRM device
126  * @size: Size in bytes of the memory the object will reference
127  *
128  * This lets the GEM helpers allocate object structs for us, and keep
129  * our BO stats correct.
130  */
131 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
132 {
133 	struct panfrost_gem_object *obj;
134 
135 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
136 	if (!obj)
137 		return NULL;
138 
139 	obj->base.base.funcs = &panfrost_gem_funcs;
140 
141 	return &obj->base.base;
142 }
143 
144 struct panfrost_gem_object *
145 panfrost_gem_create_with_handle(struct drm_file *file_priv,
146 				struct drm_device *dev, size_t size,
147 				u32 flags,
148 				uint32_t *handle)
149 {
150 	int ret;
151 	struct drm_gem_shmem_object *shmem;
152 	struct panfrost_gem_object *bo;
153 
154 	/* Round up heap allocations to 2MB to keep fault handling simple */
155 	if (flags & PANFROST_BO_HEAP)
156 		size = roundup(size, SZ_2M);
157 
158 	shmem = drm_gem_shmem_create(dev, size);
159 	if (IS_ERR(shmem))
160 		return ERR_CAST(shmem);
161 
162 	bo = to_panfrost_bo(&shmem->base);
163 	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
164 	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
165 
166 	/*
167 	 * Allocate an id of idr table where the obj is registered
168 	 * and handle has the id what user can see.
169 	 */
170 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
171 	/* drop reference from allocate - handle holds it now. */
172 	drm_gem_object_put_unlocked(&shmem->base);
173 	if (ret)
174 		return ERR_PTR(ret);
175 
176 	return bo;
177 }
178 
179 struct drm_gem_object *
180 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
181 				   struct dma_buf_attachment *attach,
182 				   struct sg_table *sgt)
183 {
184 	struct drm_gem_object *obj;
185 	struct panfrost_gem_object *bo;
186 
187 	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
188 	if (IS_ERR(obj))
189 		return ERR_CAST(obj);
190 
191 	bo = to_panfrost_bo(obj);
192 	bo->noexec = true;
193 
194 	return obj;
195 }
196