xref: /dragonfly/sys/dev/drm/radeon/radeon_cs.c (revision 65cc0652)
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include <linux/list_sort.h>
28 #include <drm/drmP.h>
29 #include <uapi_drm/radeon_drm.h>
30 #include "radeon_reg.h"
31 #include "radeon.h"
32 #ifdef TRACE_TODO
33 #include "radeon_trace.h"
34 #endif
35 
36 #define RADEON_CS_MAX_PRIORITY		32u
37 #define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
38 
39 /* This is based on the bucket sort with O(n) time complexity.
40  * An item with priority "i" is added to bucket[i]. The lists are then
41  * concatenated in descending order.
42  */
43 struct radeon_cs_buckets {
44 	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 };
46 
47 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
48 {
49 	unsigned i;
50 
51 	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
52 		INIT_LIST_HEAD(&b->bucket[i]);
53 }
54 
55 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
56 				  struct list_head *item, unsigned priority)
57 {
58 	/* Since buffers which appear sooner in the relocation list are
59 	 * likely to be used more often than buffers which appear later
60 	 * in the list, the sort mustn't change the ordering of buffers
61 	 * with the same priority, i.e. it must be stable.
62 	 */
63 	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 }
65 
66 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
67 				       struct list_head *out_list)
68 {
69 	unsigned i;
70 
71 	/* Connect the sorted buckets in the output list. */
72 	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
73 		list_splice(&b->bucket[i], out_list);
74 	}
75 }
76 
77 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
78 {
79 	struct radeon_cs_chunk *chunk;
80 	struct radeon_cs_buckets buckets;
81 	unsigned i, j;
82 	bool duplicate;
83 
84 	if (p->chunk_relocs_idx == -1) {
85 		return 0;
86 	}
87 	chunk = &p->chunks[p->chunk_relocs_idx];
88 	p->dma_reloc_idx = 0;
89 	/* FIXME: we assume that each relocs use 4 dwords */
90 	p->nrelocs = chunk->length_dw / 4;
91 	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 	if (p->relocs_ptr == NULL) {
93 		return -ENOMEM;
94 	}
95 	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 	if (p->relocs == NULL) {
97 		return -ENOMEM;
98 	}
99 
100 	radeon_cs_buckets_init(&buckets);
101 
102 	for (i = 0; i < p->nrelocs; i++) {
103 		struct drm_radeon_cs_reloc *r;
104 		unsigned priority;
105 
106 		duplicate = false;
107 		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 		for (j = 0; j < i; j++) {
109 			if (r->handle == p->relocs[j].handle) {
110 				p->relocs_ptr[i] = &p->relocs[j];
111 				duplicate = true;
112 				break;
113 			}
114 		}
115 		if (duplicate) {
116 			p->relocs[i].handle = 0;
117 			continue;
118 		}
119 
120 		p->relocs[i].gobj = drm_gem_object_lookup(p->filp, r->handle);
121 		if (p->relocs[i].gobj == NULL) {
122 			DRM_ERROR("gem object lookup failed 0x%x\n",
123 				  r->handle);
124 			return -ENOENT;
125 		}
126 		p->relocs_ptr[i] = &p->relocs[i];
127 		p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
128 
129 		/* The userspace buffer priorities are from 0 to 15. A higher
130 		 * number means the buffer is more important.
131 		 * Also, the buffers used for write have a higher priority than
132 		 * the buffers used for read only, which doubles the range
133 		 * to 0 to 31. 32 is reserved for the kernel driver.
134 		 */
135 		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
136 			   + !!r->write_domain;
137 
138 		/* the first reloc of an UVD job is the msg and that must be in
139 		   VRAM, also but everything into VRAM on AGP cards and older
140 		   IGP chips to avoid image corruptions */
141 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
142 		    (i == 0 || (p->rdev->flags & RADEON_IS_AGP) ||
143 		     p->rdev->family == CHIP_RS780 ||
144 		     p->rdev->family == CHIP_RS880)) {
145 
146 			/* TODO: is this still needed for NI+ ? */
147 			p->relocs[i].prefered_domains =
148 				RADEON_GEM_DOMAIN_VRAM;
149 
150 			p->relocs[i].allowed_domains =
151 				RADEON_GEM_DOMAIN_VRAM;
152 
153 			/* prioritize this over any other relocation */
154 			priority = RADEON_CS_MAX_PRIORITY;
155 		} else {
156 			uint32_t domain = r->write_domain ?
157 				r->write_domain : r->read_domains;
158 
159 			if (domain & RADEON_GEM_DOMAIN_CPU) {
160 				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
161 					  "for command submission\n");
162 				return -EINVAL;
163 			}
164 
165 			p->relocs[i].prefered_domains = domain;
166 			if (domain == RADEON_GEM_DOMAIN_VRAM)
167 				domain |= RADEON_GEM_DOMAIN_GTT;
168 			p->relocs[i].allowed_domains = domain;
169 		}
170 
171 		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
172 		p->relocs[i].handle = r->handle;
173 
174 		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
175 				      priority);
176 	}
177 
178 	radeon_cs_buckets_get_list(&buckets, &p->validated);
179 
180 	if (p->cs_flags & RADEON_CS_USE_VM)
181 		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
182 					      &p->validated);
183 
184 	return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
185 }
186 
187 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
188 {
189 	p->priority = priority;
190 
191 	switch (ring) {
192 	default:
193 		DRM_ERROR("unknown ring id: %d\n", ring);
194 		return -EINVAL;
195 	case RADEON_CS_RING_GFX:
196 		p->ring = RADEON_RING_TYPE_GFX_INDEX;
197 		break;
198 	case RADEON_CS_RING_COMPUTE:
199 		if (p->rdev->family >= CHIP_TAHITI) {
200 			if (p->priority > 0)
201 				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
202 			else
203 				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
204 		} else
205 			p->ring = RADEON_RING_TYPE_GFX_INDEX;
206 		break;
207 	case RADEON_CS_RING_DMA:
208 		if (p->rdev->family >= CHIP_CAYMAN) {
209 			if (p->priority > 0)
210 				p->ring = R600_RING_TYPE_DMA_INDEX;
211 			else
212 				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
213 		} else if (p->rdev->family >= CHIP_RV770) {
214 			p->ring = R600_RING_TYPE_DMA_INDEX;
215 		} else {
216 			return -EINVAL;
217 		}
218 		break;
219 	case RADEON_CS_RING_UVD:
220 		p->ring = R600_RING_TYPE_UVD_INDEX;
221 		break;
222 	case RADEON_CS_RING_VCE:
223 		/* TODO: only use the low priority ring for now */
224 		p->ring = TN_RING_TYPE_VCE1_INDEX;
225 		break;
226 	}
227 	return 0;
228 }
229 
230 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
231 {
232 	int i;
233 
234 	for (i = 0; i < p->nrelocs; i++) {
235 		if (!p->relocs[i].robj)
236 			continue;
237 
238 		radeon_semaphore_sync_to(p->ib.semaphore,
239 					 p->relocs[i].robj->tbo.sync_obj);
240 	}
241 }
242 
243 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
244 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
245 {
246 	struct drm_radeon_cs *cs = data;
247 	uint64_t *chunk_array_ptr;
248 	unsigned size, i;
249 	u32 ring = RADEON_CS_RING_GFX;
250 	s32 priority = 0;
251 
252 	if (!cs->num_chunks) {
253 		return 0;
254 	}
255 	/* get chunks */
256 	INIT_LIST_HEAD(&p->validated);
257 	p->idx = 0;
258 	p->ib.sa_bo = NULL;
259 	p->ib.semaphore = NULL;
260 	p->const_ib.sa_bo = NULL;
261 	p->const_ib.semaphore = NULL;
262 	p->chunk_ib_idx = -1;
263 	p->chunk_relocs_idx = -1;
264 	p->chunk_flags_idx = -1;
265 	p->chunk_const_ib_idx = -1;
266 	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
267 	if (p->chunks_array == NULL) {
268 		return -ENOMEM;
269 	}
270 	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
271 	if (copy_from_user(p->chunks_array, chunk_array_ptr,
272 			       sizeof(uint64_t)*cs->num_chunks)) {
273 		return -EFAULT;
274 	}
275 	p->cs_flags = 0;
276 	p->nchunks = cs->num_chunks;
277 	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
278 	if (p->chunks == NULL) {
279 		return -ENOMEM;
280 	}
281 	for (i = 0; i < p->nchunks; i++) {
282 		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
283 		struct drm_radeon_cs_chunk user_chunk;
284 		uint32_t __user *cdata;
285 
286 		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
287 		if (copy_from_user(&user_chunk, chunk_ptr,
288 				       sizeof(struct drm_radeon_cs_chunk))) {
289 			return -EFAULT;
290 		}
291 		p->chunks[i].length_dw = user_chunk.length_dw;
292 		p->chunks[i].chunk_id = user_chunk.chunk_id;
293 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
294 			p->chunk_relocs_idx = i;
295 		}
296 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
297 			p->chunk_ib_idx = i;
298 			/* zero length IB isn't useful */
299 			if (p->chunks[i].length_dw == 0)
300 				return -EINVAL;
301 		}
302 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
303 			p->chunk_const_ib_idx = i;
304 			/* zero length CONST IB isn't useful */
305 			if (p->chunks[i].length_dw == 0)
306 				return -EINVAL;
307 		}
308 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
309 			p->chunk_flags_idx = i;
310 			/* zero length flags aren't useful */
311 			if (p->chunks[i].length_dw == 0)
312 				return -EINVAL;
313 		}
314 
315 		size = p->chunks[i].length_dw;
316 		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
317 		p->chunks[i].user_ptr = cdata;
318 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
319 			continue;
320 
321 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
322 			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
323 				continue;
324 		}
325 
326 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
327 		size *= sizeof(uint32_t);
328 		if (p->chunks[i].kdata == NULL) {
329 			return -ENOMEM;
330 		}
331 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
332 			return -EFAULT;
333 		}
334 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
335 			p->cs_flags = p->chunks[i].kdata[0];
336 			if (p->chunks[i].length_dw > 1)
337 				ring = p->chunks[i].kdata[1];
338 			if (p->chunks[i].length_dw > 2)
339 				priority = (s32)p->chunks[i].kdata[2];
340 		}
341 	}
342 
343 	/* these are KMS only */
344 	if (p->rdev) {
345 		if ((p->cs_flags & RADEON_CS_USE_VM) &&
346 		    !p->rdev->vm_manager.enabled) {
347 			DRM_ERROR("VM not active on asic!\n");
348 			return -EINVAL;
349 		}
350 
351 		if (radeon_cs_get_ring(p, ring, priority))
352 			return -EINVAL;
353 
354 		/* we only support VM on some SI+ rings */
355 		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
356 			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
357 				DRM_ERROR("Ring %d requires VM!\n", p->ring);
358 				return -EINVAL;
359 			}
360 		} else {
361 			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
362 				DRM_ERROR("VM not supported on ring %d!\n",
363 					  p->ring);
364 				return -EINVAL;
365 			}
366 		}
367 	}
368 
369 	return 0;
370 }
371 
372 static int cmp_size_smaller_first(void *priv, struct list_head *a,
373 				  struct list_head *b)
374 {
375 	struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
376 	struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
377 
378 	/* Sort A before B if A is smaller. */
379 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
380 }
381 
382 /**
383  * cs_parser_fini() - clean parser states
384  * @parser:	parser structure holding parsing context.
385  * @error:	error number
386  *
387  * If error is set than unvalidate buffer, otherwise just free memory
388  * used by parsing context.
389  **/
390 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
391 {
392 	unsigned i;
393 
394 	if (!error) {
395 		/* Sort the buffer list from the smallest to largest buffer,
396 		 * which affects the order of buffers in the LRU list.
397 		 * This assures that the smallest buffers are added first
398 		 * to the LRU list, so they are likely to be later evicted
399 		 * first, instead of large buffers whose eviction is more
400 		 * expensive.
401 		 *
402 		 * This slightly lowers the number of bytes moved by TTM
403 		 * per frame under memory pressure.
404 		 */
405 		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
406 
407 		ttm_eu_fence_buffer_objects(&parser->ticket,
408 					    &parser->validated,
409 					    parser->ib.fence);
410 	} else if (backoff) {
411 		ttm_eu_backoff_reservation(&parser->ticket,
412 					   &parser->validated);
413 	}
414 
415 	if (parser->relocs != NULL) {
416 		for (i = 0; i < parser->nrelocs; i++) {
417 			if (parser->relocs[i].gobj)
418 				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
419 		}
420 	}
421 	kfree(parser->track);
422 	kfree(parser->relocs);
423 	kfree(parser->relocs_ptr);
424 	drm_free_large(parser->vm_bos);
425 	for (i = 0; i < parser->nchunks; i++)
426 		drm_free_large(parser->chunks[i].kdata);
427 	kfree(parser->chunks);
428 	kfree(parser->chunks_array);
429 	radeon_ib_free(parser->rdev, &parser->ib);
430 	radeon_ib_free(parser->rdev, &parser->const_ib);
431 }
432 
433 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
434 			      struct radeon_cs_parser *parser)
435 {
436 	int r;
437 
438 	if (parser->chunk_ib_idx == -1)
439 		return 0;
440 
441 	if (parser->cs_flags & RADEON_CS_USE_VM)
442 		return 0;
443 
444 	r = radeon_cs_parse(rdev, parser->ring, parser);
445 	if (r || parser->parser_error) {
446 		DRM_ERROR("Invalid command stream !\n");
447 		return r;
448 	}
449 
450 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
451 		radeon_uvd_note_usage(rdev);
452 	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
453 		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
454 		radeon_vce_note_usage(rdev);
455 
456 	radeon_cs_sync_rings(parser);
457 	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
458 	if (r) {
459 		DRM_ERROR("Failed to schedule IB !\n");
460 	}
461 	return r;
462 }
463 
464 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
465 				   struct radeon_vm *vm)
466 {
467 	struct radeon_device *rdev = p->rdev;
468 	struct radeon_bo_va *bo_va;
469 	int i, r;
470 
471 	r = radeon_vm_update_page_directory(rdev, vm);
472 	if (r)
473 		return r;
474 
475 	r = radeon_vm_clear_freed(rdev, vm);
476 	if (r)
477 		return r;
478 
479 	if (vm->ib_bo_va == NULL) {
480 		DRM_ERROR("Tmp BO not in VM!\n");
481 		return -EINVAL;
482 	}
483 
484 	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
485 				&rdev->ring_tmp_bo.bo->tbo.mem);
486 	if (r)
487 		return r;
488 
489 	for (i = 0; i < p->nrelocs; i++) {
490 		struct radeon_bo *bo;
491 
492 		/* ignore duplicates */
493 		if (p->relocs_ptr[i] != &p->relocs[i])
494 			continue;
495 
496 		bo = p->relocs[i].robj;
497 		bo_va = radeon_vm_bo_find(vm, bo);
498 		if (bo_va == NULL) {
499 			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
500 			return -EINVAL;
501 		}
502 
503 		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
504 		if (r)
505 			return r;
506 	}
507 
508 	return radeon_vm_clear_invalids(rdev, vm);
509 }
510 
511 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
512 				 struct radeon_cs_parser *parser)
513 {
514 	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
515 	struct radeon_vm *vm = &fpriv->vm;
516 	int r;
517 
518 	if (parser->chunk_ib_idx == -1)
519 		return 0;
520 	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
521 		return 0;
522 
523 	if (parser->const_ib.length_dw) {
524 		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
525 		if (r) {
526 			return r;
527 		}
528 	}
529 
530 	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
531 	if (r) {
532 		return r;
533 	}
534 
535 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
536 		radeon_uvd_note_usage(rdev);
537 
538 	lockmgr(&vm->mutex, LK_EXCLUSIVE);
539 	r = radeon_bo_vm_update_pte(parser, vm);
540 	if (r) {
541 		goto out;
542 	}
543 	radeon_cs_sync_rings(parser);
544 	radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
545 
546 	if ((rdev->family >= CHIP_TAHITI) &&
547 	    (parser->chunk_const_ib_idx != -1)) {
548 		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
549 	} else {
550 		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
551 	}
552 
553 out:
554 	lockmgr(&vm->mutex, LK_RELEASE);
555 	return r;
556 }
557 
558 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
559 {
560 	if (r == -EDEADLK) {
561 		r = radeon_gpu_reset(rdev);
562 		if (!r)
563 			r = -EAGAIN;
564 	}
565 	return r;
566 }
567 
568 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
569 {
570 	struct radeon_cs_chunk *ib_chunk;
571 	struct radeon_vm *vm = NULL;
572 	int r;
573 
574 	if (parser->chunk_ib_idx == -1)
575 		return 0;
576 
577 	if (parser->cs_flags & RADEON_CS_USE_VM) {
578 		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
579 		vm = &fpriv->vm;
580 
581 		if ((rdev->family >= CHIP_TAHITI) &&
582 		    (parser->chunk_const_ib_idx != -1)) {
583 			ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
584 			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
585 				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
586 				return -EINVAL;
587 			}
588 			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
589 					   vm, ib_chunk->length_dw * 4);
590 			if (r) {
591 				DRM_ERROR("Failed to get const ib !\n");
592 				return r;
593 			}
594 			parser->const_ib.is_const_ib = true;
595 			parser->const_ib.length_dw = ib_chunk->length_dw;
596 			if (copy_from_user(parser->const_ib.ptr,
597 					       ib_chunk->user_ptr,
598 					       ib_chunk->length_dw * 4))
599 				return -EFAULT;
600 		}
601 
602 		ib_chunk = &parser->chunks[parser->chunk_ib_idx];
603 		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
604 			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
605 			return -EINVAL;
606 		}
607 	}
608 	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
609 
610 	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
611 			   vm, ib_chunk->length_dw * 4);
612 	if (r) {
613 		DRM_ERROR("Failed to get ib !\n");
614 		return r;
615 	}
616 	parser->ib.length_dw = ib_chunk->length_dw;
617 	if (ib_chunk->kdata)
618 		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
619 	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
620 		return -EFAULT;
621 	return 0;
622 }
623 
624 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
625 {
626 	struct radeon_device *rdev = dev->dev_private;
627 	struct radeon_cs_parser parser;
628 	int r;
629 
630 	lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE);
631 	if (!rdev->accel_working) {
632 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
633 		return -EBUSY;
634 	}
635 	if (rdev->in_reset) {
636 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
637 		r = radeon_gpu_reset(rdev);
638 		if (!r)
639 			r = -EAGAIN;
640 		return r;
641 	}
642 	/* initialize parser */
643 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
644 	parser.filp = filp;
645 	parser.rdev = rdev;
646 	parser.dev = rdev->dev;
647 	parser.family = rdev->family;
648 	r = radeon_cs_parser_init(&parser, data);
649 	if (r) {
650 		DRM_ERROR("Failed to initialize parser !\n");
651 		radeon_cs_parser_fini(&parser, r, false);
652 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
653 		r = radeon_cs_handle_lockup(rdev, r);
654 		return r;
655 	}
656 
657 	r = radeon_cs_ib_fill(rdev, &parser);
658 	if (!r) {
659 		r = radeon_cs_parser_relocs(&parser);
660 		if (r && r != -ERESTARTSYS)
661 			DRM_ERROR("Failed to parse relocation %d!\n", r);
662 	}
663 
664 	if (r) {
665 		radeon_cs_parser_fini(&parser, r, false);
666 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
667 		r = radeon_cs_handle_lockup(rdev, r);
668 		return r;
669 	}
670 
671 #ifdef TRACE_TODO
672 	trace_radeon_cs(&parser);
673 #endif
674 
675 	r = radeon_cs_ib_chunk(rdev, &parser);
676 	if (r) {
677 		goto out;
678 	}
679 	r = radeon_cs_ib_vm_chunk(rdev, &parser);
680 	if (r) {
681 		goto out;
682 	}
683 out:
684 	radeon_cs_parser_fini(&parser, r, true);
685 	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
686 	r = radeon_cs_handle_lockup(rdev, r);
687 	return r;
688 }
689 
690 /**
691  * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
692  * @parser:	parser structure holding parsing context.
693  * @pkt:	where to store packet information
694  *
695  * Assume that chunk_ib_index is properly set. Will return -EINVAL
696  * if packet is bigger than remaining ib size. or if packets is unknown.
697  **/
698 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
699 			   struct radeon_cs_packet *pkt,
700 			   unsigned idx)
701 {
702 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
703 	struct radeon_device *rdev = p->rdev;
704 	uint32_t header;
705 
706 	if (idx >= ib_chunk->length_dw) {
707 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
708 			  idx, ib_chunk->length_dw);
709 		return -EINVAL;
710 	}
711 	header = radeon_get_ib_value(p, idx);
712 	pkt->idx = idx;
713 	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
714 	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
715 	pkt->one_reg_wr = 0;
716 	switch (pkt->type) {
717 	case RADEON_PACKET_TYPE0:
718 		if (rdev->family < CHIP_R600) {
719 			pkt->reg = R100_CP_PACKET0_GET_REG(header);
720 			pkt->one_reg_wr =
721 				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
722 		} else
723 			pkt->reg = R600_CP_PACKET0_GET_REG(header);
724 		break;
725 	case RADEON_PACKET_TYPE3:
726 		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
727 		break;
728 	case RADEON_PACKET_TYPE2:
729 		pkt->count = -1;
730 		break;
731 	default:
732 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
733 		return -EINVAL;
734 	}
735 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
736 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
737 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
738 		return -EINVAL;
739 	}
740 	return 0;
741 }
742 
743 /**
744  * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
745  * @p:		structure holding the parser context.
746  *
747  * Check if the next packet is NOP relocation packet3.
748  **/
749 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
750 {
751 	struct radeon_cs_packet p3reloc;
752 	int r;
753 
754 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
755 	if (r)
756 		return false;
757 	if (p3reloc.type != RADEON_PACKET_TYPE3)
758 		return false;
759 	if (p3reloc.opcode != RADEON_PACKET3_NOP)
760 		return false;
761 	return true;
762 }
763 
764 /**
765  * radeon_cs_dump_packet() - dump raw packet context
766  * @p:		structure holding the parser context.
767  * @pkt:	structure holding the packet.
768  *
769  * Used mostly for debugging and error reporting.
770  **/
771 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
772 			   struct radeon_cs_packet *pkt)
773 {
774 	volatile uint32_t *ib;
775 	unsigned i;
776 	unsigned idx;
777 
778 	ib = p->ib.ptr;
779 	idx = pkt->idx;
780 	for (i = 0; i <= (pkt->count + 1); i++, idx++)
781 		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
782 }
783 
784 /**
785  * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
786  * @parser:		parser structure holding parsing context.
787  * @data:		pointer to relocation data
788  * @offset_start:	starting offset
789  * @offset_mask:	offset mask (to align start offset on)
790  * @reloc:		reloc informations
791  *
792  * Check if next packet is relocation packet3, do bo validation and compute
793  * GPU offset using the provided start.
794  **/
795 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
796 				struct radeon_cs_reloc **cs_reloc,
797 				int nomm)
798 {
799 	struct radeon_cs_chunk *relocs_chunk;
800 	struct radeon_cs_packet p3reloc;
801 	unsigned idx;
802 	int r;
803 
804 	if (p->chunk_relocs_idx == -1) {
805 		DRM_ERROR("No relocation chunk !\n");
806 		return -EINVAL;
807 	}
808 	*cs_reloc = NULL;
809 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
810 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
811 	if (r)
812 		return r;
813 	p->idx += p3reloc.count + 2;
814 	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
815 	    p3reloc.opcode != RADEON_PACKET3_NOP) {
816 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
817 			  p3reloc.idx);
818 		radeon_cs_dump_packet(p, &p3reloc);
819 		return -EINVAL;
820 	}
821 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
822 	if (idx >= relocs_chunk->length_dw) {
823 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
824 			  idx, relocs_chunk->length_dw);
825 		radeon_cs_dump_packet(p, &p3reloc);
826 		return -EINVAL;
827 	}
828 	/* FIXME: we assume reloc size is 4 dwords */
829 	if (nomm) {
830 		*cs_reloc = p->relocs;
831 		(*cs_reloc)->gpu_offset =
832 			(u64)relocs_chunk->kdata[idx + 3] << 32;
833 		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
834 	} else
835 		*cs_reloc = p->relocs_ptr[(idx / 4)];
836 	return 0;
837 }
838