1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "radeon.h" 30 #include <uapi_drm/radeon_drm.h> 31 #include "radeon_asic.h" 32 #include "radeon_kms.h" 33 34 #include <linux/slab.h> 35 #ifdef PM_TODO 36 #include <linux/pm_runtime.h> 37 #endif 38 39 #ifdef PM_TODO 40 #if defined(CONFIG_VGA_SWITCHEROO) 41 bool radeon_has_atpx(void); 42 #else 43 static inline bool radeon_has_atpx(void) { return false; } 44 #endif 45 #endif 46 47 /** 48 * radeon_driver_unload_kms - Main unload function for KMS. 49 * 50 * @dev: drm dev pointer 51 * 52 * This is the main unload function for KMS (all asics). 53 * It calls radeon_modeset_fini() to tear down the 54 * displays, and radeon_device_fini() to tear down 55 * the rest of the device (CP, writeback, etc.). 56 * Returns 0 on success. 57 */ 58 int radeon_driver_unload_kms(struct drm_device *dev) 59 { 60 struct radeon_device *rdev = dev->dev_private; 61 62 /* XXX pending drm update */ 63 drm_fini_pdev(&dev->pdev); 64 65 if (rdev == NULL) 66 return 0; 67 68 if (rdev->rmmio == NULL) 69 goto done_free; 70 71 #ifdef PM_TODO 72 pm_runtime_get_sync(dev->dev); 73 #endif 74 75 radeon_acpi_fini(rdev); 76 radeon_modeset_fini(rdev); 77 radeon_device_fini(rdev); 78 79 done_free: 80 kfree(rdev); 81 dev->dev_private = NULL; 82 return 0; 83 } 84 85 /** 86 * radeon_driver_load_kms - Main load function for KMS. 87 * 88 * @dev: drm dev pointer 89 * @flags: device flags 90 * 91 * This is the main load function for KMS (all asics). 92 * It calls radeon_device_init() to set up the non-display 93 * parts of the chip (asic init, CP, writeback, etc.), and 94 * radeon_modeset_init() to set up the display parts 95 * (crtcs, encoders, hotplug detect, etc.). 96 * Returns 0 on success, error on failure. 97 */ 98 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 99 { 100 struct radeon_device *rdev; 101 int r, acpi_status; 102 103 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 104 if (rdev == NULL) { 105 return -ENOMEM; 106 } 107 dev->dev_private = (void *)rdev; 108 109 /* XXX pending drm update */ 110 drm_init_pdev(dev->dev, &dev->pdev); 111 112 /* update BUS flag */ 113 if (drm_device_is_agp(dev)) { 114 DRM_INFO("RADEON_IS_AGP\n"); 115 flags |= RADEON_IS_AGP; 116 } else if (drm_device_is_pcie(dev)) { 117 DRM_INFO("RADEON_IS_PCIE\n"); 118 flags |= RADEON_IS_PCIE; 119 } else { 120 DRM_INFO("RADEON_IS_PCI\n"); 121 flags |= RADEON_IS_PCI; 122 } 123 124 #ifdef PM_TODO 125 if ((radeon_runtime_pm != 0) && 126 radeon_has_atpx() && 127 ((flags & RADEON_IS_IGP) == 0)) 128 #endif 129 130 /* radeon_device_init should report only fatal error 131 * like memory allocation failure or iomapping failure, 132 * or memory manager initialization failure, it must 133 * properly initialize the GPU MC controller and permit 134 * VRAM allocation 135 */ 136 r = radeon_device_init(rdev, dev, dev->pdev, flags); 137 if (r) { 138 dev_err(dev->pdev->dev, "Fatal error during GPU init\n"); 139 goto out; 140 } 141 142 /* Again modeset_init should fail only on fatal error 143 * otherwise it should provide enough functionalities 144 * for shadowfb to run 145 */ 146 r = radeon_modeset_init(rdev); 147 if (r) 148 dev_err(dev->pdev->dev, "Fatal error during modeset init\n"); 149 150 /* Call ACPI methods: require modeset init 151 * but failure is not fatal 152 */ 153 if (!r) { 154 acpi_status = radeon_acpi_init(rdev); 155 if (acpi_status) 156 dev_dbg(dev->pdev->dev, 157 "Error during ACPI methods call\n"); 158 } 159 160 #ifdef PM_TODO 161 if (radeon_is_px(dev)) { 162 pm_runtime_use_autosuspend(dev->dev); 163 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 164 pm_runtime_set_active(dev->dev); 165 pm_runtime_allow(dev->dev); 166 pm_runtime_mark_last_busy(dev->dev); 167 pm_runtime_put_autosuspend(dev->dev); 168 } 169 #endif 170 171 out: 172 if (r) 173 radeon_driver_unload_kms(dev); 174 return r; 175 } 176 177 /** 178 * radeon_set_filp_rights - Set filp right. 179 * 180 * @dev: drm dev pointer 181 * @owner: drm file 182 * @applier: drm file 183 * @value: value 184 * 185 * Sets the filp rights for the device (all asics). 186 */ 187 static void radeon_set_filp_rights(struct drm_device *dev, 188 struct drm_file **owner, 189 struct drm_file *applier, 190 uint32_t *value) 191 { 192 DRM_LOCK(dev); 193 if (*value == 1) { 194 /* wants rights */ 195 if (!*owner) 196 *owner = applier; 197 } else if (*value == 0) { 198 /* revokes rights */ 199 if (*owner == applier) 200 *owner = NULL; 201 } 202 *value = *owner == applier ? 1 : 0; 203 DRM_UNLOCK(dev); 204 } 205 206 /* 207 * Userspace get information ioctl 208 */ 209 /** 210 * radeon_info_ioctl - answer a device specific request. 211 * 212 * @rdev: radeon device pointer 213 * @data: request object 214 * @filp: drm filp 215 * 216 * This function is used to pass device specific parameters to the userspace 217 * drivers. Examples include: pci device id, pipeline parms, tiling params, 218 * etc. (all asics). 219 * Returns 0 on success, -EINVAL on failure. 220 */ 221 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 222 { 223 struct radeon_device *rdev = dev->dev_private; 224 struct drm_radeon_info *info = data; 225 struct radeon_mode_info *minfo = &rdev->mode_info; 226 uint32_t *value, value_tmp, *value_ptr, value_size; 227 uint64_t value64; 228 struct drm_crtc *crtc; 229 int i, found; 230 231 value_ptr = (uint32_t *)((unsigned long)info->value); 232 value = &value_tmp; 233 value_size = sizeof(uint32_t); 234 235 switch (info->request) { 236 case RADEON_INFO_DEVICE_ID: 237 *value = dev->pdev->device; 238 break; 239 case RADEON_INFO_NUM_GB_PIPES: 240 *value = rdev->num_gb_pipes; 241 break; 242 case RADEON_INFO_NUM_Z_PIPES: 243 *value = rdev->num_z_pipes; 244 break; 245 case RADEON_INFO_ACCEL_WORKING: 246 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 247 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 248 *value = false; 249 else 250 *value = rdev->accel_working; 251 break; 252 case RADEON_INFO_CRTC_FROM_ID: 253 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 254 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 255 return -EFAULT; 256 } 257 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 258 crtc = (struct drm_crtc *)minfo->crtcs[i]; 259 if (crtc && crtc->base.id == *value) { 260 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 261 *value = radeon_crtc->crtc_id; 262 found = 1; 263 break; 264 } 265 } 266 if (!found) { 267 DRM_DEBUG_KMS("unknown crtc id %d\n", *value); 268 return -EINVAL; 269 } 270 break; 271 case RADEON_INFO_ACCEL_WORKING2: 272 if (rdev->family == CHIP_HAWAII) { 273 if (rdev->accel_working) 274 *value = 2; 275 else 276 *value = 0; 277 } else { 278 *value = rdev->accel_working; 279 } 280 break; 281 case RADEON_INFO_TILING_CONFIG: 282 if (rdev->family >= CHIP_BONAIRE) 283 *value = rdev->config.cik.tile_config; 284 else if (rdev->family >= CHIP_TAHITI) 285 *value = rdev->config.si.tile_config; 286 else if (rdev->family >= CHIP_CAYMAN) 287 *value = rdev->config.cayman.tile_config; 288 else if (rdev->family >= CHIP_CEDAR) 289 *value = rdev->config.evergreen.tile_config; 290 else if (rdev->family >= CHIP_RV770) 291 *value = rdev->config.rv770.tile_config; 292 else if (rdev->family >= CHIP_R600) 293 *value = rdev->config.r600.tile_config; 294 else { 295 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 296 return -EINVAL; 297 } 298 break; 299 case RADEON_INFO_WANT_HYPERZ: 300 /* The "value" here is both an input and output parameter. 301 * If the input value is 1, filp requests hyper-z access. 302 * If the input value is 0, filp revokes its hyper-z access. 303 * 304 * When returning, the value is 1 if filp owns hyper-z access, 305 * 0 otherwise. */ 306 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 307 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 308 return -EFAULT; 309 } 310 if (*value >= 2) { 311 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value); 312 return -EINVAL; 313 } 314 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); 315 break; 316 case RADEON_INFO_WANT_CMASK: 317 /* The same logic as Hyper-Z. */ 318 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 319 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 320 return -EFAULT; 321 } 322 if (*value >= 2) { 323 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value); 324 return -EINVAL; 325 } 326 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); 327 break; 328 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 329 /* return clock value in KHz */ 330 if (rdev->asic->get_xclk) 331 *value = radeon_get_xclk(rdev) * 10; 332 else 333 *value = rdev->clock.spll.reference_freq * 10; 334 break; 335 case RADEON_INFO_NUM_BACKENDS: 336 if (rdev->family >= CHIP_BONAIRE) 337 *value = rdev->config.cik.max_backends_per_se * 338 rdev->config.cik.max_shader_engines; 339 else if (rdev->family >= CHIP_TAHITI) 340 *value = rdev->config.si.max_backends_per_se * 341 rdev->config.si.max_shader_engines; 342 else if (rdev->family >= CHIP_CAYMAN) 343 *value = rdev->config.cayman.max_backends_per_se * 344 rdev->config.cayman.max_shader_engines; 345 else if (rdev->family >= CHIP_CEDAR) 346 *value = rdev->config.evergreen.max_backends; 347 else if (rdev->family >= CHIP_RV770) 348 *value = rdev->config.rv770.max_backends; 349 else if (rdev->family >= CHIP_R600) 350 *value = rdev->config.r600.max_backends; 351 else { 352 return -EINVAL; 353 } 354 break; 355 case RADEON_INFO_NUM_TILE_PIPES: 356 if (rdev->family >= CHIP_BONAIRE) 357 *value = rdev->config.cik.max_tile_pipes; 358 else if (rdev->family >= CHIP_TAHITI) 359 *value = rdev->config.si.max_tile_pipes; 360 else if (rdev->family >= CHIP_CAYMAN) 361 *value = rdev->config.cayman.max_tile_pipes; 362 else if (rdev->family >= CHIP_CEDAR) 363 *value = rdev->config.evergreen.max_tile_pipes; 364 else if (rdev->family >= CHIP_RV770) 365 *value = rdev->config.rv770.max_tile_pipes; 366 else if (rdev->family >= CHIP_R600) 367 *value = rdev->config.r600.max_tile_pipes; 368 else { 369 return -EINVAL; 370 } 371 break; 372 case RADEON_INFO_FUSION_GART_WORKING: 373 *value = 1; 374 break; 375 case RADEON_INFO_BACKEND_MAP: 376 if (rdev->family >= CHIP_BONAIRE) 377 *value = rdev->config.cik.backend_map; 378 else if (rdev->family >= CHIP_TAHITI) 379 *value = rdev->config.si.backend_map; 380 else if (rdev->family >= CHIP_CAYMAN) 381 *value = rdev->config.cayman.backend_map; 382 else if (rdev->family >= CHIP_CEDAR) 383 *value = rdev->config.evergreen.backend_map; 384 else if (rdev->family >= CHIP_RV770) 385 *value = rdev->config.rv770.backend_map; 386 else if (rdev->family >= CHIP_R600) 387 *value = rdev->config.r600.backend_map; 388 else { 389 return -EINVAL; 390 } 391 break; 392 case RADEON_INFO_VA_START: 393 /* this is where we report if vm is supported or not */ 394 if (rdev->family < CHIP_CAYMAN) 395 return -EINVAL; 396 *value = RADEON_VA_RESERVED_SIZE; 397 break; 398 case RADEON_INFO_IB_VM_MAX_SIZE: 399 /* this is where we report if vm is supported or not */ 400 if (rdev->family < CHIP_CAYMAN) 401 return -EINVAL; 402 *value = RADEON_IB_VM_MAX_SIZE; 403 break; 404 case RADEON_INFO_MAX_PIPES: 405 if (rdev->family >= CHIP_BONAIRE) 406 *value = rdev->config.cik.max_cu_per_sh; 407 else if (rdev->family >= CHIP_TAHITI) 408 *value = rdev->config.si.max_cu_per_sh; 409 else if (rdev->family >= CHIP_CAYMAN) 410 *value = rdev->config.cayman.max_pipes_per_simd; 411 else if (rdev->family >= CHIP_CEDAR) 412 *value = rdev->config.evergreen.max_pipes; 413 else if (rdev->family >= CHIP_RV770) 414 *value = rdev->config.rv770.max_pipes; 415 else if (rdev->family >= CHIP_R600) 416 *value = rdev->config.r600.max_pipes; 417 else { 418 return -EINVAL; 419 } 420 break; 421 case RADEON_INFO_TIMESTAMP: 422 if (rdev->family < CHIP_R600) { 423 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 424 return -EINVAL; 425 } 426 value = (uint32_t*)&value64; 427 value_size = sizeof(uint64_t); 428 value64 = radeon_get_gpu_clock_counter(rdev); 429 break; 430 case RADEON_INFO_MAX_SE: 431 if (rdev->family >= CHIP_BONAIRE) 432 *value = rdev->config.cik.max_shader_engines; 433 else if (rdev->family >= CHIP_TAHITI) 434 *value = rdev->config.si.max_shader_engines; 435 else if (rdev->family >= CHIP_CAYMAN) 436 *value = rdev->config.cayman.max_shader_engines; 437 else if (rdev->family >= CHIP_CEDAR) 438 *value = rdev->config.evergreen.num_ses; 439 else 440 *value = 1; 441 break; 442 case RADEON_INFO_MAX_SH_PER_SE: 443 if (rdev->family >= CHIP_BONAIRE) 444 *value = rdev->config.cik.max_sh_per_se; 445 else if (rdev->family >= CHIP_TAHITI) 446 *value = rdev->config.si.max_sh_per_se; 447 else 448 return -EINVAL; 449 break; 450 case RADEON_INFO_FASTFB_WORKING: 451 *value = rdev->fastfb_working; 452 break; 453 case RADEON_INFO_RING_WORKING: 454 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 455 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 456 return -EFAULT; 457 } 458 switch (*value) { 459 case RADEON_CS_RING_GFX: 460 case RADEON_CS_RING_COMPUTE: 461 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; 462 break; 463 case RADEON_CS_RING_DMA: 464 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; 465 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; 466 break; 467 case RADEON_CS_RING_UVD: 468 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; 469 break; 470 case RADEON_CS_RING_VCE: 471 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; 472 break; 473 default: 474 return -EINVAL; 475 } 476 break; 477 case RADEON_INFO_SI_TILE_MODE_ARRAY: 478 if (rdev->family >= CHIP_BONAIRE) { 479 value = rdev->config.cik.tile_mode_array; 480 value_size = sizeof(uint32_t)*32; 481 } else if (rdev->family >= CHIP_TAHITI) { 482 value = rdev->config.si.tile_mode_array; 483 value_size = sizeof(uint32_t)*32; 484 } else { 485 DRM_DEBUG_KMS("tile mode array is si+ only!\n"); 486 return -EINVAL; 487 } 488 break; 489 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: 490 if (rdev->family >= CHIP_BONAIRE) { 491 value = rdev->config.cik.macrotile_mode_array; 492 value_size = sizeof(uint32_t)*16; 493 } else { 494 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); 495 return -EINVAL; 496 } 497 break; 498 case RADEON_INFO_SI_CP_DMA_COMPUTE: 499 *value = 1; 500 break; 501 case RADEON_INFO_SI_BACKEND_ENABLED_MASK: 502 if (rdev->family >= CHIP_BONAIRE) { 503 *value = rdev->config.cik.backend_enable_mask; 504 } else if (rdev->family >= CHIP_TAHITI) { 505 *value = rdev->config.si.backend_enable_mask; 506 } else { 507 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); 508 } 509 break; 510 case RADEON_INFO_MAX_SCLK: 511 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 512 rdev->pm.dpm_enabled) 513 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 514 else 515 *value = rdev->pm.default_sclk * 10; 516 break; 517 case RADEON_INFO_VCE_FW_VERSION: 518 *value = rdev->vce.fw_version; 519 break; 520 case RADEON_INFO_VCE_FB_VERSION: 521 *value = rdev->vce.fb_version; 522 break; 523 case RADEON_INFO_NUM_BYTES_MOVED: 524 value = (uint32_t*)&value64; 525 value_size = sizeof(uint64_t); 526 value64 = atomic64_read(&rdev->num_bytes_moved); 527 break; 528 case RADEON_INFO_VRAM_USAGE: 529 value = (uint32_t*)&value64; 530 value_size = sizeof(uint64_t); 531 value64 = atomic64_read(&rdev->vram_usage); 532 break; 533 case RADEON_INFO_GTT_USAGE: 534 value = (uint32_t*)&value64; 535 value_size = sizeof(uint64_t); 536 value64 = atomic64_read(&rdev->gtt_usage); 537 break; 538 case RADEON_INFO_ACTIVE_CU_COUNT: 539 if (rdev->family >= CHIP_BONAIRE) 540 *value = rdev->config.cik.active_cus; 541 else if (rdev->family >= CHIP_TAHITI) 542 *value = rdev->config.si.active_cus; 543 else if (rdev->family >= CHIP_CAYMAN) 544 *value = rdev->config.cayman.active_simds; 545 else if (rdev->family >= CHIP_CEDAR) 546 *value = rdev->config.evergreen.active_simds; 547 else if (rdev->family >= CHIP_RV770) 548 *value = rdev->config.rv770.active_simds; 549 else if (rdev->family >= CHIP_R600) 550 *value = rdev->config.r600.active_simds; 551 else 552 *value = 1; 553 break; 554 default: 555 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 556 return -EINVAL; 557 } 558 if (copy_to_user(value_ptr, (char*)value, value_size)) { 559 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 560 return -EFAULT; 561 } 562 return 0; 563 } 564 565 566 /* 567 * Outdated mess for old drm with Xorg being in charge (void function now). 568 */ 569 /** 570 * radeon_driver_firstopen_kms - drm callback for last close 571 * 572 * @dev: drm dev pointer 573 * 574 * Switch vga switcheroo state after last close (all asics). 575 */ 576 void radeon_driver_lastclose_kms(struct drm_device *dev) 577 { 578 #ifdef DUMBBELL_WIP 579 vga_switcheroo_process_delayed_switch(); 580 #endif /* DUMBBELL_WIP */ 581 } 582 583 /** 584 * radeon_driver_open_kms - drm callback for open 585 * 586 * @dev: drm dev pointer 587 * @file_priv: drm file 588 * 589 * On device open, init vm on cayman+ (all asics). 590 * Returns 0 on success, error on failure. 591 */ 592 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 593 { 594 struct radeon_device *rdev = dev->dev_private; 595 596 file_priv->driver_priv = NULL; 597 598 #ifdef PM_TODO 599 r = pm_runtime_get_sync(dev->dev); 600 if (r < 0) 601 return r; 602 #endif 603 604 /* new gpu have virtual address space support */ 605 if (rdev->family >= CHIP_CAYMAN) { 606 struct radeon_fpriv *fpriv; 607 struct radeon_vm *vm; 608 int r; 609 610 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 611 if (unlikely(!fpriv)) { 612 return -ENOMEM; 613 } 614 615 vm = &fpriv->vm; 616 r = radeon_vm_init(rdev, vm); 617 if (r) { 618 kfree(fpriv); 619 return r; 620 } 621 622 if (rdev->accel_working) { 623 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 624 if (r) { 625 radeon_vm_fini(rdev, vm); 626 kfree(fpriv); 627 return r; 628 } 629 630 /* map the ib pool buffer read only into 631 * virtual address space */ 632 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, 633 rdev->ring_tmp_bo.bo); 634 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, 635 RADEON_VA_IB_OFFSET, 636 RADEON_VM_PAGE_READABLE | 637 RADEON_VM_PAGE_SNOOPED); 638 639 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 640 if (r) { 641 radeon_vm_fini(rdev, vm); 642 kfree(fpriv); 643 return r; 644 } 645 } 646 file_priv->driver_priv = fpriv; 647 } 648 649 #ifdef PM_TODO 650 pm_runtime_mark_last_busy(dev->dev); 651 pm_runtime_put_autosuspend(dev->dev); 652 #endif 653 return 0; 654 } 655 656 /** 657 * radeon_driver_postclose_kms - drm callback for post close 658 * 659 * @dev: drm dev pointer 660 * @file_priv: drm file 661 * 662 * On device post close, tear down vm on cayman+ (all asics). 663 */ 664 void radeon_driver_postclose_kms(struct drm_device *dev, 665 struct drm_file *file_priv) 666 { 667 struct radeon_device *rdev = dev->dev_private; 668 669 /* new gpu have virtual address space support */ 670 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 671 struct radeon_fpriv *fpriv = file_priv->driver_priv; 672 struct radeon_vm *vm = &fpriv->vm; 673 int r; 674 675 if (rdev->accel_working) { 676 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 677 if (!r) { 678 if (vm->ib_bo_va) 679 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 680 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 681 } 682 } 683 684 radeon_vm_fini(rdev, vm); 685 kfree(fpriv); 686 file_priv->driver_priv = NULL; 687 } 688 } 689 690 /** 691 * radeon_driver_preclose_kms - drm callback for pre close 692 * 693 * @dev: drm dev pointer 694 * @file_priv: drm file 695 * 696 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx 697 * (all asics). 698 */ 699 void radeon_driver_preclose_kms(struct drm_device *dev, 700 struct drm_file *file_priv) 701 { 702 struct radeon_device *rdev = dev->dev_private; 703 if (rdev->hyperz_filp == file_priv) 704 rdev->hyperz_filp = NULL; 705 if (rdev->cmask_filp == file_priv) 706 rdev->cmask_filp = NULL; 707 radeon_uvd_free_handles(rdev, file_priv); 708 radeon_vce_free_handles(rdev, file_priv); 709 } 710 711 /* 712 * VBlank related functions. 713 */ 714 /** 715 * radeon_get_vblank_counter_kms - get frame count 716 * 717 * @dev: drm dev pointer 718 * @crtc: crtc to get the frame count from 719 * 720 * Gets the frame count on the requested crtc (all asics). 721 * Returns frame count on success, -EINVAL on failure. 722 */ 723 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 724 { 725 struct radeon_device *rdev = dev->dev_private; 726 727 if (crtc < 0 || crtc >= rdev->num_crtc) { 728 DRM_ERROR("Invalid crtc %d\n", crtc); 729 return -EINVAL; 730 } 731 732 return radeon_get_vblank_counter(rdev, crtc); 733 } 734 735 /** 736 * radeon_enable_vblank_kms - enable vblank interrupt 737 * 738 * @dev: drm dev pointer 739 * @crtc: crtc to enable vblank interrupt for 740 * 741 * Enable the interrupt on the requested crtc (all asics). 742 * Returns 0 on success, -EINVAL on failure. 743 */ 744 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 745 { 746 struct radeon_device *rdev = dev->dev_private; 747 int r; 748 749 if (crtc < 0 || crtc >= rdev->num_crtc) { 750 DRM_ERROR("Invalid crtc %d\n", crtc); 751 return -EINVAL; 752 } 753 754 lockmgr(&rdev->irq.lock, LK_EXCLUSIVE); 755 rdev->irq.crtc_vblank_int[crtc] = true; 756 r = radeon_irq_set(rdev); 757 lockmgr(&rdev->irq.lock, LK_RELEASE); 758 return r; 759 } 760 761 /** 762 * radeon_disable_vblank_kms - disable vblank interrupt 763 * 764 * @dev: drm dev pointer 765 * @crtc: crtc to disable vblank interrupt for 766 * 767 * Disable the interrupt on the requested crtc (all asics). 768 */ 769 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 770 { 771 struct radeon_device *rdev = dev->dev_private; 772 773 if (crtc < 0 || crtc >= rdev->num_crtc) { 774 DRM_ERROR("Invalid crtc %d\n", crtc); 775 return; 776 } 777 778 lockmgr(&rdev->irq.lock, LK_EXCLUSIVE); 779 rdev->irq.crtc_vblank_int[crtc] = false; 780 radeon_irq_set(rdev); 781 lockmgr(&rdev->irq.lock, LK_RELEASE); 782 } 783 784 /** 785 * radeon_get_vblank_timestamp_kms - get vblank timestamp 786 * 787 * @dev: drm dev pointer 788 * @crtc: crtc to get the timestamp for 789 * @max_error: max error 790 * @vblank_time: time value 791 * @flags: flags passed to the driver 792 * 793 * Gets the timestamp on the requested crtc based on the 794 * scanout position. (all asics). 795 * Returns postive status flags on success, negative error on failure. 796 */ 797 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 798 int *max_error, 799 struct timeval *vblank_time, 800 unsigned flags) 801 { 802 struct drm_crtc *drmcrtc; 803 struct radeon_device *rdev = dev->dev_private; 804 805 if (crtc < 0 || crtc >= dev->num_crtcs) { 806 DRM_ERROR("Invalid crtc %d\n", crtc); 807 return -EINVAL; 808 } 809 810 /* Get associated drm_crtc: */ 811 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 812 813 /* Helper routine in DRM core does all the work: */ 814 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 815 vblank_time, flags, 816 drmcrtc, &drmcrtc->hwmode); 817 } 818 819 #define KMS_INVALID_IOCTL(name) \ 820 static int name(struct drm_device *dev, void *data, struct drm_file \ 821 *file_priv) \ 822 { \ 823 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ 824 return -EINVAL; \ 825 } 826 827 /* 828 * All these ioctls are invalid in kms world. 829 */ 830 KMS_INVALID_IOCTL(radeon_cp_init_kms) 831 KMS_INVALID_IOCTL(radeon_cp_start_kms) 832 KMS_INVALID_IOCTL(radeon_cp_stop_kms) 833 KMS_INVALID_IOCTL(radeon_cp_reset_kms) 834 KMS_INVALID_IOCTL(radeon_cp_idle_kms) 835 KMS_INVALID_IOCTL(radeon_cp_resume_kms) 836 KMS_INVALID_IOCTL(radeon_engine_reset_kms) 837 KMS_INVALID_IOCTL(radeon_fullscreen_kms) 838 KMS_INVALID_IOCTL(radeon_cp_swap_kms) 839 KMS_INVALID_IOCTL(radeon_cp_clear_kms) 840 KMS_INVALID_IOCTL(radeon_cp_vertex_kms) 841 KMS_INVALID_IOCTL(radeon_cp_indices_kms) 842 KMS_INVALID_IOCTL(radeon_cp_texture_kms) 843 KMS_INVALID_IOCTL(radeon_cp_stipple_kms) 844 KMS_INVALID_IOCTL(radeon_cp_indirect_kms) 845 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) 846 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) 847 KMS_INVALID_IOCTL(radeon_cp_getparam_kms) 848 KMS_INVALID_IOCTL(radeon_cp_flip_kms) 849 KMS_INVALID_IOCTL(radeon_mem_alloc_kms) 850 KMS_INVALID_IOCTL(radeon_mem_free_kms) 851 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) 852 KMS_INVALID_IOCTL(radeon_irq_emit_kms) 853 KMS_INVALID_IOCTL(radeon_irq_wait_kms) 854 KMS_INVALID_IOCTL(radeon_cp_setparam_kms) 855 KMS_INVALID_IOCTL(radeon_surface_alloc_kms) 856 KMS_INVALID_IOCTL(radeon_surface_free_kms) 857 858 859 const struct drm_ioctl_desc radeon_ioctls_kms[] = { 860 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 861 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 862 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 863 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 864 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 865 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 866 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 867 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 868 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 869 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 870 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 871 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 872 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 873 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 874 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 875 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 876 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 877 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 878 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 879 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 880 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 881 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 882 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 883 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 884 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 885 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 886 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 887 /* KMS */ 888 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 889 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 890 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 891 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 892 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 893 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 894 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 895 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 896 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 897 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 898 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 899 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 900 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 901 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 902 }; 903 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 904