1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 #include <linux/list_sort.h> 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon_reg.h" 31 #include "radeon.h" 32 #include "radeon_trace.h" 33 34 #define RADEON_CS_MAX_PRIORITY 32u 35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) 36 37 /* This is based on the bucket sort with O(n) time complexity. 38 * An item with priority "i" is added to bucket[i]. The lists are then 39 * concatenated in descending order. 40 */ 41 struct radeon_cs_buckets { 42 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; 43 }; 44 45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) 46 { 47 unsigned i; 48 49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) 50 INIT_LIST_HEAD(&b->bucket[i]); 51 } 52 53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, 54 struct list_head *item, unsigned priority) 55 { 56 /* Since buffers which appear sooner in the relocation list are 57 * likely to be used more often than buffers which appear later 58 * in the list, the sort mustn't change the ordering of buffers 59 * with the same priority, i.e. it must be stable. 60 */ 61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); 62 } 63 64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, 65 struct list_head *out_list) 66 { 67 unsigned i; 68 69 /* Connect the sorted buckets in the output list. */ 70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { 71 list_splice(&b->bucket[i], out_list); 72 } 73 } 74 75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 76 { 77 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_buckets buckets; 79 unsigned i; 80 bool need_mmap_lock = false; 81 int r; 82 83 if (p->chunk_relocs == NULL) { 84 return 0; 85 } 86 chunk = p->chunk_relocs; 87 p->dma_reloc_idx = 0; 88 /* FIXME: we assume that each relocs use 4 dwords */ 89 p->nrelocs = chunk->length_dw / 4; 90 p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); 91 if (p->relocs == NULL) { 92 return -ENOMEM; 93 } 94 95 radeon_cs_buckets_init(&buckets); 96 97 for (i = 0; i < p->nrelocs; i++) { 98 struct drm_radeon_cs_reloc *r; 99 struct drm_gem_object *gobj; 100 unsigned priority; 101 102 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 103 gobj = drm_gem_object_lookup(p->filp, r->handle); 104 if (gobj == NULL) { 105 DRM_ERROR("gem object lookup failed 0x%x\n", 106 r->handle); 107 return -ENOENT; 108 } 109 p->relocs[i].robj = gem_to_radeon_bo(gobj); 110 111 /* The userspace buffer priorities are from 0 to 15. A higher 112 * number means the buffer is more important. 113 * Also, the buffers used for write have a higher priority than 114 * the buffers used for read only, which doubles the range 115 * to 0 to 31. 32 is reserved for the kernel driver. 116 */ 117 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 118 + !!r->write_domain; 119 120 /* the first reloc of an UVD job is the msg and that must be in 121 VRAM, also but everything into VRAM on AGP cards and older 122 IGP chips to avoid image corruptions */ 123 if (p->ring == R600_RING_TYPE_UVD_INDEX && 124 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) || 125 p->rdev->family == CHIP_RS780 || 126 p->rdev->family == CHIP_RS880)) { 127 128 /* TODO: is this still needed for NI+ ? */ 129 p->relocs[i].prefered_domains = 130 RADEON_GEM_DOMAIN_VRAM; 131 132 p->relocs[i].allowed_domains = 133 RADEON_GEM_DOMAIN_VRAM; 134 135 /* prioritize this over any other relocation */ 136 priority = RADEON_CS_MAX_PRIORITY; 137 } else { 138 uint32_t domain = r->write_domain ? 139 r->write_domain : r->read_domains; 140 141 if (domain & RADEON_GEM_DOMAIN_CPU) { 142 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " 143 "for command submission\n"); 144 return -EINVAL; 145 } 146 147 p->relocs[i].prefered_domains = domain; 148 if (domain == RADEON_GEM_DOMAIN_VRAM) 149 domain |= RADEON_GEM_DOMAIN_GTT; 150 p->relocs[i].allowed_domains = domain; 151 } 152 153 #if 0 154 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { 155 uint32_t domain = p->relocs[i].prefered_domains; 156 if (!(domain & RADEON_GEM_DOMAIN_GTT)) { 157 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " 158 "allowed for userptr BOs\n"); 159 return -EINVAL; 160 } 161 need_mmap_lock = true; 162 domain = RADEON_GEM_DOMAIN_GTT; 163 p->relocs[i].prefered_domains = domain; 164 p->relocs[i].allowed_domains = domain; 165 } 166 #endif 167 168 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 169 p->relocs[i].tv.shared = !r->write_domain; 170 171 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, 172 priority); 173 } 174 175 radeon_cs_buckets_get_list(&buckets, &p->validated); 176 177 if (p->cs_flags & RADEON_CS_USE_VM) 178 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, 179 &p->validated); 180 if (need_mmap_lock) 181 down_read(¤t->mm->mmap_sem); 182 183 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); 184 185 if (need_mmap_lock) 186 up_read(¤t->mm->mmap_sem); 187 188 return r; 189 } 190 191 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 192 { 193 p->priority = priority; 194 195 switch (ring) { 196 default: 197 DRM_ERROR("unknown ring id: %d\n", ring); 198 return -EINVAL; 199 case RADEON_CS_RING_GFX: 200 p->ring = RADEON_RING_TYPE_GFX_INDEX; 201 break; 202 case RADEON_CS_RING_COMPUTE: 203 if (p->rdev->family >= CHIP_TAHITI) { 204 if (p->priority > 0) 205 p->ring = CAYMAN_RING_TYPE_CP1_INDEX; 206 else 207 p->ring = CAYMAN_RING_TYPE_CP2_INDEX; 208 } else 209 p->ring = RADEON_RING_TYPE_GFX_INDEX; 210 break; 211 case RADEON_CS_RING_DMA: 212 if (p->rdev->family >= CHIP_CAYMAN) { 213 if (p->priority > 0) 214 p->ring = R600_RING_TYPE_DMA_INDEX; 215 else 216 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; 217 } else if (p->rdev->family >= CHIP_RV770) { 218 p->ring = R600_RING_TYPE_DMA_INDEX; 219 } else { 220 return -EINVAL; 221 } 222 break; 223 case RADEON_CS_RING_UVD: 224 p->ring = R600_RING_TYPE_UVD_INDEX; 225 break; 226 case RADEON_CS_RING_VCE: 227 /* TODO: only use the low priority ring for now */ 228 p->ring = TN_RING_TYPE_VCE1_INDEX; 229 break; 230 } 231 return 0; 232 } 233 234 static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 235 { 236 struct radeon_bo_list *reloc; 237 int r; 238 239 list_for_each_entry(reloc, &p->validated, tv.head) { 240 struct reservation_object *resv; 241 242 resv = reloc->robj->tbo.resv; 243 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, 244 reloc->tv.shared); 245 if (r) 246 return r; 247 } 248 return 0; 249 } 250 251 /* XXX: note that this is called from the legacy UMS CS ioctl as well */ 252 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 253 { 254 struct drm_radeon_cs *cs = data; 255 uint64_t *chunk_array_ptr; 256 unsigned size, i; 257 u32 ring = RADEON_CS_RING_GFX; 258 s32 priority = 0; 259 260 INIT_LIST_HEAD(&p->validated); 261 262 if (!cs->num_chunks) { 263 return 0; 264 } 265 266 /* get chunks */ 267 p->idx = 0; 268 p->ib.sa_bo = NULL; 269 p->const_ib.sa_bo = NULL; 270 p->chunk_ib = NULL; 271 p->chunk_relocs = NULL; 272 p->chunk_flags = NULL; 273 p->chunk_const_ib = NULL; 274 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 275 if (p->chunks_array == NULL) { 276 return -ENOMEM; 277 } 278 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); 279 if (copy_from_user(p->chunks_array, chunk_array_ptr, 280 sizeof(uint64_t)*cs->num_chunks)) { 281 return -EFAULT; 282 } 283 p->cs_flags = 0; 284 p->nchunks = cs->num_chunks; 285 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 286 if (p->chunks == NULL) { 287 return -ENOMEM; 288 } 289 for (i = 0; i < p->nchunks; i++) { 290 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; 291 struct drm_radeon_cs_chunk user_chunk; 292 uint32_t __user *cdata; 293 294 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; 295 if (copy_from_user(&user_chunk, chunk_ptr, 296 sizeof(struct drm_radeon_cs_chunk))) { 297 return -EFAULT; 298 } 299 p->chunks[i].length_dw = user_chunk.length_dw; 300 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { 301 p->chunk_relocs = &p->chunks[i]; 302 } 303 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { 304 p->chunk_ib = &p->chunks[i]; 305 /* zero length IB isn't useful */ 306 if (p->chunks[i].length_dw == 0) 307 return -EINVAL; 308 } 309 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { 310 p->chunk_const_ib = &p->chunks[i]; 311 /* zero length CONST IB isn't useful */ 312 if (p->chunks[i].length_dw == 0) 313 return -EINVAL; 314 } 315 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { 316 p->chunk_flags = &p->chunks[i]; 317 /* zero length flags aren't useful */ 318 if (p->chunks[i].length_dw == 0) 319 return -EINVAL; 320 } 321 322 size = p->chunks[i].length_dw; 323 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 324 p->chunks[i].user_ptr = cdata; 325 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) 326 continue; 327 328 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { 329 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) 330 continue; 331 } 332 333 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 334 size *= sizeof(uint32_t); 335 if (p->chunks[i].kdata == NULL) { 336 return -ENOMEM; 337 } 338 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 339 return -EFAULT; 340 } 341 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { 342 p->cs_flags = p->chunks[i].kdata[0]; 343 if (p->chunks[i].length_dw > 1) 344 ring = p->chunks[i].kdata[1]; 345 if (p->chunks[i].length_dw > 2) 346 priority = (s32)p->chunks[i].kdata[2]; 347 } 348 } 349 350 /* these are KMS only */ 351 if (p->rdev) { 352 if ((p->cs_flags & RADEON_CS_USE_VM) && 353 !p->rdev->vm_manager.enabled) { 354 DRM_ERROR("VM not active on asic!\n"); 355 return -EINVAL; 356 } 357 358 if (radeon_cs_get_ring(p, ring, priority)) 359 return -EINVAL; 360 361 /* we only support VM on some SI+ rings */ 362 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { 363 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { 364 DRM_ERROR("Ring %d requires VM!\n", p->ring); 365 return -EINVAL; 366 } 367 } else { 368 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { 369 DRM_ERROR("VM not supported on ring %d!\n", 370 p->ring); 371 return -EINVAL; 372 } 373 } 374 } 375 376 return 0; 377 } 378 379 static int cmp_size_smaller_first(void *priv, struct list_head *a, 380 struct list_head *b) 381 { 382 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); 383 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); 384 385 /* Sort A before B if A is smaller. */ 386 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 387 } 388 389 /** 390 * cs_parser_fini() - clean parser states 391 * @parser: parser structure holding parsing context. 392 * @error: error number 393 * 394 * If error is set than unvalidate buffer, otherwise just free memory 395 * used by parsing context. 396 **/ 397 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) 398 { 399 unsigned i; 400 401 if (!error) { 402 /* Sort the buffer list from the smallest to largest buffer, 403 * which affects the order of buffers in the LRU list. 404 * This assures that the smallest buffers are added first 405 * to the LRU list, so they are likely to be later evicted 406 * first, instead of large buffers whose eviction is more 407 * expensive. 408 * 409 * This slightly lowers the number of bytes moved by TTM 410 * per frame under memory pressure. 411 */ 412 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 413 414 ttm_eu_fence_buffer_objects(&parser->ticket, 415 &parser->validated, 416 &parser->ib.fence->base); 417 } else if (backoff) { 418 ttm_eu_backoff_reservation(&parser->ticket, 419 &parser->validated); 420 } 421 422 if (parser->relocs != NULL) { 423 for (i = 0; i < parser->nrelocs; i++) { 424 struct radeon_bo *bo = parser->relocs[i].robj; 425 if (bo == NULL) 426 continue; 427 428 drm_gem_object_unreference_unlocked(&bo->gem_base); 429 } 430 } 431 kfree(parser->track); 432 drm_free_large(parser->relocs); 433 drm_free_large(parser->vm_bos); 434 for (i = 0; i < parser->nchunks; i++) 435 drm_free_large(parser->chunks[i].kdata); 436 kfree(parser->chunks); 437 kfree(parser->chunks_array); 438 radeon_ib_free(parser->rdev, &parser->ib); 439 radeon_ib_free(parser->rdev, &parser->const_ib); 440 } 441 442 static int radeon_cs_ib_chunk(struct radeon_device *rdev, 443 struct radeon_cs_parser *parser) 444 { 445 int r; 446 447 if (parser->chunk_ib == NULL) 448 return 0; 449 450 if (parser->cs_flags & RADEON_CS_USE_VM) 451 return 0; 452 453 r = radeon_cs_parse(rdev, parser->ring, parser); 454 if (r || parser->parser_error) { 455 DRM_ERROR("Invalid command stream !\n"); 456 return r; 457 } 458 459 r = radeon_cs_sync_rings(parser); 460 if (r) { 461 if (r != -ERESTARTSYS) 462 DRM_ERROR("Failed to sync rings: %i\n", r); 463 return r; 464 } 465 466 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 467 radeon_uvd_note_usage(rdev); 468 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || 469 (parser->ring == TN_RING_TYPE_VCE2_INDEX)) 470 radeon_vce_note_usage(rdev); 471 472 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 473 if (r) { 474 DRM_ERROR("Failed to schedule IB !\n"); 475 } 476 return r; 477 } 478 479 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, 480 struct radeon_vm *vm) 481 { 482 struct radeon_device *rdev = p->rdev; 483 struct radeon_bo_va *bo_va; 484 int i, r; 485 486 r = radeon_vm_update_page_directory(rdev, vm); 487 if (r) 488 return r; 489 490 r = radeon_vm_clear_freed(rdev, vm); 491 if (r) 492 return r; 493 494 if (vm->ib_bo_va == NULL) { 495 DRM_ERROR("Tmp BO not in VM!\n"); 496 return -EINVAL; 497 } 498 499 r = radeon_vm_bo_update(rdev, vm->ib_bo_va, 500 &rdev->ring_tmp_bo.bo->tbo.mem); 501 if (r) 502 return r; 503 504 for (i = 0; i < p->nrelocs; i++) { 505 struct radeon_bo *bo; 506 507 bo = p->relocs[i].robj; 508 bo_va = radeon_vm_bo_find(vm, bo); 509 if (bo_va == NULL) { 510 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); 511 return -EINVAL; 512 } 513 514 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); 515 if (r) 516 return r; 517 518 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); 519 } 520 521 return radeon_vm_clear_invalids(rdev, vm); 522 } 523 524 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, 525 struct radeon_cs_parser *parser) 526 { 527 struct radeon_fpriv *fpriv = parser->filp->driver_priv; 528 struct radeon_vm *vm = &fpriv->vm; 529 int r; 530 531 if (parser->chunk_ib == NULL) 532 return 0; 533 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 534 return 0; 535 536 if (parser->const_ib.length_dw) { 537 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); 538 if (r) { 539 return r; 540 } 541 } 542 543 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); 544 if (r) { 545 return r; 546 } 547 548 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 549 radeon_uvd_note_usage(rdev); 550 551 mutex_lock(&vm->mutex); 552 r = radeon_bo_vm_update_pte(parser, vm); 553 if (r) { 554 goto out; 555 } 556 557 r = radeon_cs_sync_rings(parser); 558 if (r) { 559 if (r != -ERESTARTSYS) 560 DRM_ERROR("Failed to sync rings: %i\n", r); 561 goto out; 562 } 563 564 if ((rdev->family >= CHIP_TAHITI) && 565 (parser->chunk_const_ib != NULL)) { 566 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 567 } else { 568 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 569 } 570 571 out: 572 mutex_unlock(&vm->mutex); 573 return r; 574 } 575 576 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) 577 { 578 if (r == -EDEADLK) { 579 r = radeon_gpu_reset(rdev); 580 if (!r) 581 r = -EAGAIN; 582 } 583 return r; 584 } 585 586 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser) 587 { 588 struct radeon_cs_chunk *ib_chunk; 589 struct radeon_vm *vm = NULL; 590 int r; 591 592 if (parser->chunk_ib == NULL) 593 return 0; 594 595 if (parser->cs_flags & RADEON_CS_USE_VM) { 596 struct radeon_fpriv *fpriv = parser->filp->driver_priv; 597 vm = &fpriv->vm; 598 599 if ((rdev->family >= CHIP_TAHITI) && 600 (parser->chunk_const_ib != NULL)) { 601 ib_chunk = parser->chunk_const_ib; 602 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 603 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 604 return -EINVAL; 605 } 606 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, 607 vm, ib_chunk->length_dw * 4); 608 if (r) { 609 DRM_ERROR("Failed to get const ib !\n"); 610 return r; 611 } 612 parser->const_ib.is_const_ib = true; 613 parser->const_ib.length_dw = ib_chunk->length_dw; 614 if (copy_from_user(parser->const_ib.ptr, 615 ib_chunk->user_ptr, 616 ib_chunk->length_dw * 4)) 617 return -EFAULT; 618 } 619 620 ib_chunk = parser->chunk_ib; 621 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 622 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 623 return -EINVAL; 624 } 625 } 626 ib_chunk = parser->chunk_ib; 627 628 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 629 vm, ib_chunk->length_dw * 4); 630 if (r) { 631 DRM_ERROR("Failed to get ib !\n"); 632 return r; 633 } 634 parser->ib.length_dw = ib_chunk->length_dw; 635 if (ib_chunk->kdata) 636 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); 637 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) 638 return -EFAULT; 639 return 0; 640 } 641 642 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 643 { 644 struct radeon_device *rdev = dev->dev_private; 645 struct radeon_cs_parser parser; 646 int r; 647 648 down_read(&rdev->exclusive_lock); 649 if (!rdev->accel_working) { 650 up_read(&rdev->exclusive_lock); 651 return -EBUSY; 652 } 653 if (rdev->in_reset) { 654 up_read(&rdev->exclusive_lock); 655 r = radeon_gpu_reset(rdev); 656 if (!r) 657 r = -EAGAIN; 658 return r; 659 } 660 /* initialize parser */ 661 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 662 parser.filp = filp; 663 parser.rdev = rdev; 664 parser.dev = rdev->dev; 665 parser.family = rdev->family; 666 r = radeon_cs_parser_init(&parser, data); 667 if (r) { 668 DRM_ERROR("Failed to initialize parser !\n"); 669 radeon_cs_parser_fini(&parser, r, false); 670 up_read(&rdev->exclusive_lock); 671 r = radeon_cs_handle_lockup(rdev, r); 672 return r; 673 } 674 675 r = radeon_cs_ib_fill(rdev, &parser); 676 if (!r) { 677 r = radeon_cs_parser_relocs(&parser); 678 if (r && r != -ERESTARTSYS) 679 DRM_ERROR("Failed to parse relocation %d!\n", r); 680 } 681 682 if (r) { 683 radeon_cs_parser_fini(&parser, r, false); 684 up_read(&rdev->exclusive_lock); 685 r = radeon_cs_handle_lockup(rdev, r); 686 return r; 687 } 688 689 #ifdef TRACE_TODO 690 trace_radeon_cs(&parser); 691 #endif 692 693 r = radeon_cs_ib_chunk(rdev, &parser); 694 if (r) { 695 goto out; 696 } 697 r = radeon_cs_ib_vm_chunk(rdev, &parser); 698 if (r) { 699 goto out; 700 } 701 out: 702 radeon_cs_parser_fini(&parser, r, true); 703 up_read(&rdev->exclusive_lock); 704 r = radeon_cs_handle_lockup(rdev, r); 705 return r; 706 } 707 708 /** 709 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet 710 * @parser: parser structure holding parsing context. 711 * @pkt: where to store packet information 712 * 713 * Assume that chunk_ib_index is properly set. Will return -EINVAL 714 * if packet is bigger than remaining ib size. or if packets is unknown. 715 **/ 716 int radeon_cs_packet_parse(struct radeon_cs_parser *p, 717 struct radeon_cs_packet *pkt, 718 unsigned idx) 719 { 720 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 721 struct radeon_device *rdev = p->rdev; 722 uint32_t header; 723 int ret = 0, i; 724 725 if (idx >= ib_chunk->length_dw) { 726 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 727 idx, ib_chunk->length_dw); 728 return -EINVAL; 729 } 730 header = radeon_get_ib_value(p, idx); 731 pkt->idx = idx; 732 pkt->type = RADEON_CP_PACKET_GET_TYPE(header); 733 pkt->count = RADEON_CP_PACKET_GET_COUNT(header); 734 pkt->one_reg_wr = 0; 735 switch (pkt->type) { 736 case RADEON_PACKET_TYPE0: 737 if (rdev->family < CHIP_R600) { 738 pkt->reg = R100_CP_PACKET0_GET_REG(header); 739 pkt->one_reg_wr = 740 RADEON_CP_PACKET0_GET_ONE_REG_WR(header); 741 } else 742 pkt->reg = R600_CP_PACKET0_GET_REG(header); 743 break; 744 case RADEON_PACKET_TYPE3: 745 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header); 746 break; 747 case RADEON_PACKET_TYPE2: 748 pkt->count = -1; 749 break; 750 default: 751 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 752 ret = -EINVAL; 753 goto dump_ib; 754 } 755 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 756 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 757 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 758 ret = -EINVAL; 759 goto dump_ib; 760 } 761 return 0; 762 763 dump_ib: 764 for (i = 0; i < ib_chunk->length_dw; i++) { 765 if (i == idx) 766 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); 767 else 768 printk("\t0x%08x\n", radeon_get_ib_value(p, i)); 769 } 770 return ret; 771 } 772 773 /** 774 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP 775 * @p: structure holding the parser context. 776 * 777 * Check if the next packet is NOP relocation packet3. 778 **/ 779 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 780 { 781 struct radeon_cs_packet p3reloc; 782 int r; 783 784 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 785 if (r) 786 return false; 787 if (p3reloc.type != RADEON_PACKET_TYPE3) 788 return false; 789 if (p3reloc.opcode != RADEON_PACKET3_NOP) 790 return false; 791 return true; 792 } 793 794 /** 795 * radeon_cs_dump_packet() - dump raw packet context 796 * @p: structure holding the parser context. 797 * @pkt: structure holding the packet. 798 * 799 * Used mostly for debugging and error reporting. 800 **/ 801 void radeon_cs_dump_packet(struct radeon_cs_parser *p, 802 struct radeon_cs_packet *pkt) 803 { 804 volatile uint32_t *ib; 805 unsigned i; 806 unsigned idx; 807 808 ib = p->ib.ptr; 809 idx = pkt->idx; 810 for (i = 0; i <= (pkt->count + 1); i++, idx++) 811 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 812 } 813 814 /** 815 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet 816 * @parser: parser structure holding parsing context. 817 * @data: pointer to relocation data 818 * @offset_start: starting offset 819 * @offset_mask: offset mask (to align start offset on) 820 * @reloc: reloc informations 821 * 822 * Check if next packet is relocation packet3, do bo validation and compute 823 * GPU offset using the provided start. 824 **/ 825 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 826 struct radeon_bo_list **cs_reloc, 827 int nomm) 828 { 829 struct radeon_cs_chunk *relocs_chunk; 830 struct radeon_cs_packet p3reloc; 831 unsigned idx; 832 int r; 833 834 if (p->chunk_relocs == NULL) { 835 DRM_ERROR("No relocation chunk !\n"); 836 return -EINVAL; 837 } 838 *cs_reloc = NULL; 839 relocs_chunk = p->chunk_relocs; 840 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 841 if (r) 842 return r; 843 p->idx += p3reloc.count + 2; 844 if (p3reloc.type != RADEON_PACKET_TYPE3 || 845 p3reloc.opcode != RADEON_PACKET3_NOP) { 846 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 847 p3reloc.idx); 848 radeon_cs_dump_packet(p, &p3reloc); 849 return -EINVAL; 850 } 851 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 852 if (idx >= relocs_chunk->length_dw) { 853 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 854 idx, relocs_chunk->length_dw); 855 radeon_cs_dump_packet(p, &p3reloc); 856 return -EINVAL; 857 } 858 /* FIXME: we assume reloc size is 4 dwords */ 859 if (nomm) { 860 *cs_reloc = p->relocs; 861 (*cs_reloc)->gpu_offset = 862 (u64)relocs_chunk->kdata[idx + 3] << 32; 863 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; 864 } else 865 *cs_reloc = &p->relocs[(idx / 4)]; 866 return 0; 867 } 868