1 /*	$NetBSD: qxl_object.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alon Levy
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: qxl_object.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $");
30 
31 #include "qxl_drv.h"
32 #include "qxl_object.h"
33 
34 #include <linux/io-mapping.h>
qxl_ttm_bo_destroy(struct ttm_buffer_object * tbo)35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 {
37 	struct qxl_bo *bo;
38 	struct qxl_device *qdev;
39 
40 	bo = to_qxl_bo(tbo);
41 	qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
42 
43 	qxl_surface_evict(qdev, bo, false);
44 	WARN_ON_ONCE(bo->map_count > 0);
45 	mutex_lock(&qdev->gem.mutex);
46 	list_del_init(&bo->list);
47 	mutex_unlock(&qdev->gem.mutex);
48 	drm_gem_object_release(&bo->tbo.base);
49 	kfree(bo);
50 }
51 
qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object * bo)52 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
53 {
54 	if (bo->destroy == &qxl_ttm_bo_destroy)
55 		return true;
56 	return false;
57 }
58 
qxl_ttm_placement_from_domain(struct qxl_bo * qbo,u32 domain,bool pinned)59 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
60 {
61 	u32 c = 0;
62 	u32 pflag = 0;
63 	unsigned int i;
64 
65 	if (pinned)
66 		pflag |= TTM_PL_FLAG_NO_EVICT;
67 	if (qbo->tbo.base.size <= PAGE_SIZE)
68 		pflag |= TTM_PL_FLAG_TOPDOWN;
69 
70 	qbo->placement.placement = qbo->placements;
71 	qbo->placement.busy_placement = qbo->placements;
72 	if (domain == QXL_GEM_DOMAIN_VRAM)
73 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
74 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
75 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
76 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
77 	}
78 	if (domain == QXL_GEM_DOMAIN_CPU)
79 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
80 	if (!c)
81 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
82 	qbo->placement.num_placement = c;
83 	qbo->placement.num_busy_placement = c;
84 	for (i = 0; i < c; ++i) {
85 		qbo->placements[i].fpfn = 0;
86 		qbo->placements[i].lpfn = 0;
87 	}
88 }
89 
90 static const struct drm_gem_object_funcs qxl_object_funcs = {
91 	.free = qxl_gem_object_free,
92 	.open = qxl_gem_object_open,
93 	.close = qxl_gem_object_close,
94 	.pin = qxl_gem_prime_pin,
95 	.unpin = qxl_gem_prime_unpin,
96 	.get_sg_table = qxl_gem_prime_get_sg_table,
97 	.vmap = qxl_gem_prime_vmap,
98 	.vunmap = qxl_gem_prime_vunmap,
99 	.mmap = drm_gem_ttm_mmap,
100 	.print_info = drm_gem_ttm_print_info,
101 };
102 
qxl_bo_create(struct qxl_device * qdev,unsigned long size,bool kernel,bool pinned,u32 domain,struct qxl_surface * surf,struct qxl_bo ** bo_ptr)103 int qxl_bo_create(struct qxl_device *qdev,
104 		  unsigned long size, bool kernel, bool pinned, u32 domain,
105 		  struct qxl_surface *surf,
106 		  struct qxl_bo **bo_ptr)
107 {
108 	struct qxl_bo *bo;
109 	enum ttm_bo_type type;
110 	int r;
111 
112 	if (kernel)
113 		type = ttm_bo_type_kernel;
114 	else
115 		type = ttm_bo_type_device;
116 	*bo_ptr = NULL;
117 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
118 	if (bo == NULL)
119 		return -ENOMEM;
120 	size = roundup(size, PAGE_SIZE);
121 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
122 	if (unlikely(r)) {
123 		kfree(bo);
124 		return r;
125 	}
126 	bo->tbo.base.funcs = &qxl_object_funcs;
127 	bo->type = domain;
128 	bo->pin_count = pinned ? 1 : 0;
129 	bo->surface_id = 0;
130 	INIT_LIST_HEAD(&bo->list);
131 
132 	if (surf)
133 		bo->surf = *surf;
134 
135 	qxl_ttm_placement_from_domain(bo, domain, pinned);
136 
137 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
138 			&bo->placement, 0, !kernel, size,
139 			NULL, NULL, &qxl_ttm_bo_destroy);
140 	if (unlikely(r != 0)) {
141 		if (r != -ERESTARTSYS)
142 			dev_err(qdev->ddev.dev,
143 				"object_init failed for (%lu, 0x%08X)\n",
144 				size, domain);
145 		return r;
146 	}
147 	*bo_ptr = bo;
148 	return 0;
149 }
150 
qxl_bo_kmap(struct qxl_bo * bo,void ** ptr)151 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
152 {
153 	bool is_iomem;
154 	int r;
155 
156 	if (bo->kptr) {
157 		if (ptr)
158 			*ptr = bo->kptr;
159 		bo->map_count++;
160 		return 0;
161 	}
162 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
163 	if (r)
164 		return r;
165 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
166 	if (ptr)
167 		*ptr = bo->kptr;
168 	bo->map_count = 1;
169 	return 0;
170 }
171 
qxl_bo_kmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,int page_offset)172 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
173 			      struct qxl_bo *bo, int page_offset)
174 {
175 	void *rptr;
176 	int ret;
177 	struct io_mapping *map;
178 
179 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
180 		map = qdev->vram_mapping;
181 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
182 		map = qdev->surface_mapping;
183 	else
184 		goto fallback;
185 
186 	ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
187 
188 	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
189 fallback:
190 	if (bo->kptr) {
191 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
192 		return rptr;
193 	}
194 
195 	ret = qxl_bo_kmap(bo, &rptr);
196 	if (ret)
197 		return NULL;
198 
199 	rptr += page_offset * PAGE_SIZE;
200 	return rptr;
201 }
202 
qxl_bo_kunmap(struct qxl_bo * bo)203 void qxl_bo_kunmap(struct qxl_bo *bo)
204 {
205 	if (bo->kptr == NULL)
206 		return;
207 	bo->map_count--;
208 	if (bo->map_count > 0)
209 		return;
210 	bo->kptr = NULL;
211 	ttm_bo_kunmap(&bo->kmap);
212 }
213 
qxl_bo_kunmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,void * pmap)214 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
215 			       struct qxl_bo *bo, void *pmap)
216 {
217 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
218 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
219 		goto fallback;
220 
221 	io_mapping_unmap_atomic(pmap);
222 	return;
223  fallback:
224 	qxl_bo_kunmap(bo);
225 }
226 
qxl_bo_unref(struct qxl_bo ** bo)227 void qxl_bo_unref(struct qxl_bo **bo)
228 {
229 	if ((*bo) == NULL)
230 		return;
231 
232 	drm_gem_object_put_unlocked(&(*bo)->tbo.base);
233 	*bo = NULL;
234 }
235 
qxl_bo_ref(struct qxl_bo * bo)236 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
237 {
238 	drm_gem_object_get(&bo->tbo.base);
239 	return bo;
240 }
241 
__qxl_bo_pin(struct qxl_bo * bo)242 static int __qxl_bo_pin(struct qxl_bo *bo)
243 {
244 	struct ttm_operation_ctx ctx = { false, false };
245 	struct drm_device *ddev = bo->tbo.base.dev;
246 	int r;
247 
248 	if (bo->pin_count) {
249 		bo->pin_count++;
250 		return 0;
251 	}
252 	qxl_ttm_placement_from_domain(bo, bo->type, true);
253 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
254 	if (likely(r == 0)) {
255 		bo->pin_count = 1;
256 	}
257 	if (unlikely(r != 0))
258 		dev_err(ddev->dev, "%p pin failed\n", bo);
259 	return r;
260 }
261 
__qxl_bo_unpin(struct qxl_bo * bo)262 static int __qxl_bo_unpin(struct qxl_bo *bo)
263 {
264 	struct ttm_operation_ctx ctx = { false, false };
265 	struct drm_device *ddev = bo->tbo.base.dev;
266 	int r, i;
267 
268 	if (!bo->pin_count) {
269 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
270 		return 0;
271 	}
272 	bo->pin_count--;
273 	if (bo->pin_count)
274 		return 0;
275 	for (i = 0; i < bo->placement.num_placement; i++)
276 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
277 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
278 	if (unlikely(r != 0))
279 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
280 	return r;
281 }
282 
283 /*
284  * Reserve the BO before pinning the object.  If the BO was reserved
285  * beforehand, use the internal version directly __qxl_bo_pin.
286  *
287  */
qxl_bo_pin(struct qxl_bo * bo)288 int qxl_bo_pin(struct qxl_bo *bo)
289 {
290 	int r;
291 
292 	r = qxl_bo_reserve(bo, false);
293 	if (r)
294 		return r;
295 
296 	r = __qxl_bo_pin(bo);
297 	qxl_bo_unreserve(bo);
298 	return r;
299 }
300 
301 /*
302  * Reserve the BO before pinning the object.  If the BO was reserved
303  * beforehand, use the internal version directly __qxl_bo_unpin.
304  *
305  */
qxl_bo_unpin(struct qxl_bo * bo)306 int qxl_bo_unpin(struct qxl_bo *bo)
307 {
308 	int r;
309 
310 	r = qxl_bo_reserve(bo, false);
311 	if (r)
312 		return r;
313 
314 	r = __qxl_bo_unpin(bo);
315 	qxl_bo_unreserve(bo);
316 	return r;
317 }
318 
qxl_bo_force_delete(struct qxl_device * qdev)319 void qxl_bo_force_delete(struct qxl_device *qdev)
320 {
321 	struct qxl_bo *bo, *n;
322 
323 	if (list_empty(&qdev->gem.objects))
324 		return;
325 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
326 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
327 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
328 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
329 			*((unsigned long *)&bo->tbo.base.refcount));
330 		mutex_lock(&qdev->gem.mutex);
331 		list_del_init(&bo->list);
332 		mutex_unlock(&qdev->gem.mutex);
333 		/* this should unref the ttm bo */
334 		drm_gem_object_put_unlocked(&bo->tbo.base);
335 	}
336 }
337 
qxl_bo_init(struct qxl_device * qdev)338 int qxl_bo_init(struct qxl_device *qdev)
339 {
340 	return qxl_ttm_init(qdev);
341 }
342 
qxl_bo_fini(struct qxl_device * qdev)343 void qxl_bo_fini(struct qxl_device *qdev)
344 {
345 	qxl_ttm_fini(qdev);
346 }
347 
qxl_bo_check_id(struct qxl_device * qdev,struct qxl_bo * bo)348 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
349 {
350 	int ret;
351 
352 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
353 		/* allocate a surface id for this surface now */
354 		ret = qxl_surface_id_alloc(qdev, bo);
355 		if (ret)
356 			return ret;
357 
358 		ret = qxl_hw_surface_alloc(qdev, bo);
359 		if (ret)
360 			return ret;
361 	}
362 	return 0;
363 }
364 
qxl_surf_evict(struct qxl_device * qdev)365 int qxl_surf_evict(struct qxl_device *qdev)
366 {
367 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
368 }
369 
qxl_vram_evict(struct qxl_device * qdev)370 int qxl_vram_evict(struct qxl_device *qdev)
371 {
372 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
373 }
374