xref: /linux/drivers/gpu/drm/qxl/qxl_object.c (revision 1e525507)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/iosys-map.h>
27 #include <linux/io-mapping.h>
28 
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31 
32 static int __qxl_bo_pin(struct qxl_bo *bo);
33 static void __qxl_bo_unpin(struct qxl_bo *bo);
34 
35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 {
37 	struct qxl_bo *bo;
38 	struct qxl_device *qdev;
39 
40 	bo = to_qxl_bo(tbo);
41 	qdev = to_qxl(bo->tbo.base.dev);
42 
43 	qxl_surface_evict(qdev, bo, false);
44 	WARN_ON_ONCE(bo->map_count > 0);
45 	mutex_lock(&qdev->gem.mutex);
46 	list_del_init(&bo->list);
47 	mutex_unlock(&qdev->gem.mutex);
48 	drm_gem_object_release(&bo->tbo.base);
49 	kfree(bo);
50 }
51 
52 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
53 {
54 	if (bo->destroy == &qxl_ttm_bo_destroy)
55 		return true;
56 	return false;
57 }
58 
59 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
60 {
61 	u32 c = 0;
62 	u32 pflag = 0;
63 	unsigned int i;
64 
65 	if (qbo->tbo.base.size <= PAGE_SIZE)
66 		pflag |= TTM_PL_FLAG_TOPDOWN;
67 
68 	qbo->placement.placement = qbo->placements;
69 	if (domain == QXL_GEM_DOMAIN_VRAM) {
70 		qbo->placements[c].mem_type = TTM_PL_VRAM;
71 		qbo->placements[c++].flags = pflag;
72 	}
73 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
74 		qbo->placements[c].mem_type = TTM_PL_PRIV;
75 		qbo->placements[c++].flags = pflag;
76 		qbo->placements[c].mem_type = TTM_PL_VRAM;
77 		qbo->placements[c++].flags = pflag;
78 	}
79 	if (domain == QXL_GEM_DOMAIN_CPU) {
80 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
81 		qbo->placements[c++].flags = pflag;
82 	}
83 	if (!c) {
84 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
85 		qbo->placements[c++].flags = 0;
86 	}
87 	qbo->placement.num_placement = c;
88 	for (i = 0; i < c; ++i) {
89 		qbo->placements[i].fpfn = 0;
90 		qbo->placements[i].lpfn = 0;
91 	}
92 }
93 
94 static const struct drm_gem_object_funcs qxl_object_funcs = {
95 	.free = qxl_gem_object_free,
96 	.open = qxl_gem_object_open,
97 	.close = qxl_gem_object_close,
98 	.pin = qxl_gem_prime_pin,
99 	.unpin = qxl_gem_prime_unpin,
100 	.get_sg_table = qxl_gem_prime_get_sg_table,
101 	.vmap = qxl_gem_prime_vmap,
102 	.vunmap = qxl_gem_prime_vunmap,
103 	.mmap = drm_gem_ttm_mmap,
104 	.print_info = drm_gem_ttm_print_info,
105 };
106 
107 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
108 		  bool kernel, bool pinned, u32 domain, u32 priority,
109 		  struct qxl_surface *surf,
110 		  struct qxl_bo **bo_ptr)
111 {
112 	struct ttm_operation_ctx ctx = { !kernel, false };
113 	struct qxl_bo *bo;
114 	enum ttm_bo_type type;
115 	int r;
116 
117 	if (kernel)
118 		type = ttm_bo_type_kernel;
119 	else
120 		type = ttm_bo_type_device;
121 	*bo_ptr = NULL;
122 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
123 	if (bo == NULL)
124 		return -ENOMEM;
125 	size = roundup(size, PAGE_SIZE);
126 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
127 	if (unlikely(r)) {
128 		kfree(bo);
129 		return r;
130 	}
131 	bo->tbo.base.funcs = &qxl_object_funcs;
132 	bo->type = domain;
133 	bo->surface_id = 0;
134 	INIT_LIST_HEAD(&bo->list);
135 
136 	if (surf)
137 		bo->surf = *surf;
138 
139 	qxl_ttm_placement_from_domain(bo, domain);
140 
141 	bo->tbo.priority = priority;
142 	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
143 				 &bo->placement, 0, &ctx, NULL, NULL,
144 				 &qxl_ttm_bo_destroy);
145 	if (unlikely(r != 0)) {
146 		if (r != -ERESTARTSYS)
147 			dev_err(qdev->ddev.dev,
148 				"object_init failed for (%lu, 0x%08X)\n",
149 				size, domain);
150 		return r;
151 	}
152 	if (pinned)
153 		ttm_bo_pin(&bo->tbo);
154 	ttm_bo_unreserve(&bo->tbo);
155 	*bo_ptr = bo;
156 	return 0;
157 }
158 
159 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
160 {
161 	int r;
162 
163 	dma_resv_assert_held(bo->tbo.base.resv);
164 
165 	if (bo->kptr) {
166 		bo->map_count++;
167 		goto out;
168 	}
169 
170 	r = __qxl_bo_pin(bo);
171 	if (r)
172 		return r;
173 
174 	r = ttm_bo_vmap(&bo->tbo, &bo->map);
175 	if (r) {
176 		__qxl_bo_unpin(bo);
177 		return r;
178 	}
179 	bo->map_count = 1;
180 
181 	/* TODO: Remove kptr in favor of map everywhere. */
182 	if (bo->map.is_iomem)
183 		bo->kptr = (void *)bo->map.vaddr_iomem;
184 	else
185 		bo->kptr = bo->map.vaddr;
186 
187 out:
188 	*map = bo->map;
189 	return 0;
190 }
191 
192 int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
193 {
194 	int r;
195 
196 	r = qxl_bo_reserve(bo);
197 	if (r)
198 		return r;
199 
200 	r = qxl_bo_vmap_locked(bo, map);
201 	qxl_bo_unreserve(bo);
202 	return r;
203 }
204 
205 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
206 			      struct qxl_bo *bo, int page_offset)
207 {
208 	unsigned long offset;
209 	void *rptr;
210 	int ret;
211 	struct io_mapping *map;
212 	struct iosys_map bo_map;
213 
214 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
215 		map = qdev->vram_mapping;
216 	else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
217 		map = qdev->surface_mapping;
218 	else
219 		goto fallback;
220 
221 	offset = bo->tbo.resource->start << PAGE_SHIFT;
222 	return io_mapping_map_atomic_wc(map, offset + page_offset);
223 fallback:
224 	if (bo->kptr) {
225 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
226 		return rptr;
227 	}
228 
229 	ret = qxl_bo_vmap_locked(bo, &bo_map);
230 	if (ret)
231 		return NULL;
232 	rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
233 
234 	rptr += page_offset * PAGE_SIZE;
235 	return rptr;
236 }
237 
238 void qxl_bo_vunmap_locked(struct qxl_bo *bo)
239 {
240 	dma_resv_assert_held(bo->tbo.base.resv);
241 
242 	if (bo->kptr == NULL)
243 		return;
244 	bo->map_count--;
245 	if (bo->map_count > 0)
246 		return;
247 	bo->kptr = NULL;
248 	ttm_bo_vunmap(&bo->tbo, &bo->map);
249 	__qxl_bo_unpin(bo);
250 }
251 
252 int qxl_bo_vunmap(struct qxl_bo *bo)
253 {
254 	int r;
255 
256 	r = qxl_bo_reserve(bo);
257 	if (r)
258 		return r;
259 
260 	qxl_bo_vunmap_locked(bo);
261 	qxl_bo_unreserve(bo);
262 	return 0;
263 }
264 
265 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
266 			       struct qxl_bo *bo, void *pmap)
267 {
268 	if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
269 	    (bo->tbo.resource->mem_type != TTM_PL_PRIV))
270 		goto fallback;
271 
272 	io_mapping_unmap_atomic(pmap);
273 	return;
274  fallback:
275 	qxl_bo_vunmap_locked(bo);
276 }
277 
278 void qxl_bo_unref(struct qxl_bo **bo)
279 {
280 	if ((*bo) == NULL)
281 		return;
282 
283 	drm_gem_object_put(&(*bo)->tbo.base);
284 	*bo = NULL;
285 }
286 
287 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
288 {
289 	drm_gem_object_get(&bo->tbo.base);
290 	return bo;
291 }
292 
293 static int __qxl_bo_pin(struct qxl_bo *bo)
294 {
295 	struct ttm_operation_ctx ctx = { false, false };
296 	struct drm_device *ddev = bo->tbo.base.dev;
297 	int r;
298 
299 	if (bo->tbo.pin_count) {
300 		ttm_bo_pin(&bo->tbo);
301 		return 0;
302 	}
303 	qxl_ttm_placement_from_domain(bo, bo->type);
304 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
305 	if (likely(r == 0))
306 		ttm_bo_pin(&bo->tbo);
307 	if (unlikely(r != 0))
308 		dev_err(ddev->dev, "%p pin failed\n", bo);
309 	return r;
310 }
311 
312 static void __qxl_bo_unpin(struct qxl_bo *bo)
313 {
314 	ttm_bo_unpin(&bo->tbo);
315 }
316 
317 /*
318  * Reserve the BO before pinning the object.  If the BO was reserved
319  * beforehand, use the internal version directly __qxl_bo_pin.
320  *
321  */
322 int qxl_bo_pin(struct qxl_bo *bo)
323 {
324 	int r;
325 
326 	r = qxl_bo_reserve(bo);
327 	if (r)
328 		return r;
329 
330 	r = __qxl_bo_pin(bo);
331 	qxl_bo_unreserve(bo);
332 	return r;
333 }
334 
335 /*
336  * Reserve the BO before pinning the object.  If the BO was reserved
337  * beforehand, use the internal version directly __qxl_bo_unpin.
338  *
339  */
340 int qxl_bo_unpin(struct qxl_bo *bo)
341 {
342 	int r;
343 
344 	r = qxl_bo_reserve(bo);
345 	if (r)
346 		return r;
347 
348 	__qxl_bo_unpin(bo);
349 	qxl_bo_unreserve(bo);
350 	return 0;
351 }
352 
353 void qxl_bo_force_delete(struct qxl_device *qdev)
354 {
355 	struct qxl_bo *bo, *n;
356 
357 	if (list_empty(&qdev->gem.objects))
358 		return;
359 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
360 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
361 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
362 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
363 			*((unsigned long *)&bo->tbo.base.refcount));
364 		mutex_lock(&qdev->gem.mutex);
365 		list_del_init(&bo->list);
366 		mutex_unlock(&qdev->gem.mutex);
367 		/* this should unref the ttm bo */
368 		drm_gem_object_put(&bo->tbo.base);
369 	}
370 }
371 
372 int qxl_bo_init(struct qxl_device *qdev)
373 {
374 	return qxl_ttm_init(qdev);
375 }
376 
377 void qxl_bo_fini(struct qxl_device *qdev)
378 {
379 	qxl_ttm_fini(qdev);
380 }
381 
382 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
383 {
384 	int ret;
385 
386 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
387 		/* allocate a surface id for this surface now */
388 		ret = qxl_surface_id_alloc(qdev, bo);
389 		if (ret)
390 			return ret;
391 
392 		ret = qxl_hw_surface_alloc(qdev, bo);
393 		if (ret)
394 			return ret;
395 	}
396 	return 0;
397 }
398 
399 int qxl_surf_evict(struct qxl_device *qdev)
400 {
401 	struct ttm_resource_manager *man;
402 
403 	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
404 	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
405 }
406 
407 int qxl_vram_evict(struct qxl_device *qdev)
408 {
409 	struct ttm_resource_manager *man;
410 
411 	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
412 	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
413 }
414