1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include "amdgpu.h" 30 #include <drm/amdgpu_drm.h> 31 #include <drm/drm_drv.h> 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "atom.h" 35 36 #include <linux/vga_switcheroo.h> 37 #include <linux/slab.h> 38 #include <linux/uaccess.h> 39 #include <linux/pci.h> 40 #include <linux/pm_runtime.h> 41 #include "amdgpu_amdkfd.h" 42 #include "amdgpu_gem.h" 43 #include "amdgpu_display.h" 44 #include "amdgpu_ras.h" 45 46 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 47 { 48 struct amdgpu_gpu_instance *gpu_instance; 49 int i; 50 51 mutex_lock(&mgpu_info.mutex); 52 53 for (i = 0; i < mgpu_info.num_gpu; i++) { 54 gpu_instance = &(mgpu_info.gpu_ins[i]); 55 if (gpu_instance->adev == adev) { 56 mgpu_info.gpu_ins[i] = 57 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 58 mgpu_info.num_gpu--; 59 if (adev->flags & AMD_IS_APU) 60 mgpu_info.num_apu--; 61 else 62 mgpu_info.num_dgpu--; 63 break; 64 } 65 } 66 67 mutex_unlock(&mgpu_info.mutex); 68 } 69 70 #include <drm/drm_drv.h> 71 72 #include "vga.h" 73 74 #if NVGA > 0 75 #include <dev/ic/mc6845reg.h> 76 #include <dev/ic/pcdisplayvar.h> 77 #include <dev/ic/vgareg.h> 78 #include <dev/ic/vgavar.h> 79 80 extern int vga_console_attached; 81 #endif 82 83 #ifdef __amd64__ 84 #include "efifb.h" 85 #include <machine/biosvar.h> 86 #endif 87 88 #if NEFIFB > 0 89 #include <machine/efifbvar.h> 90 #endif 91 92 int amdgpu_probe(struct device *, void *, void *); 93 void amdgpu_attach(struct device *, struct device *, void *); 94 int amdgpu_detach(struct device *, int); 95 int amdgpu_activate(struct device *, int); 96 void amdgpu_attachhook(struct device *); 97 int amdgpu_forcedetach(struct amdgpu_device *); 98 99 bool amdgpu_msi_ok(struct amdgpu_device *); 100 101 extern const struct pci_device_id amdgpu_pciidlist[]; 102 extern struct drm_driver amdgpu_kms_driver; 103 extern int amdgpu_exp_hw_support; 104 105 /* 106 * set if the mountroot hook has a fatal error 107 * such as not being able to find the firmware 108 */ 109 int amdgpu_fatal_error; 110 111 struct cfattach amdgpu_ca = { 112 sizeof (struct amdgpu_device), amdgpu_probe, amdgpu_attach, 113 amdgpu_detach, amdgpu_activate 114 }; 115 116 struct cfdriver amdgpu_cd = { 117 NULL, "amdgpu", DV_DULL 118 }; 119 120 #ifdef __linux__ 121 /** 122 * amdgpu_driver_unload_kms - Main unload function for KMS. 123 * 124 * @dev: drm dev pointer 125 * 126 * This is the main unload function for KMS (all asics). 127 * Returns 0 on success. 128 */ 129 void amdgpu_driver_unload_kms(struct drm_device *dev) 130 { 131 struct amdgpu_device *adev = drm_to_adev(dev); 132 133 if (adev == NULL) 134 return; 135 136 amdgpu_unregister_gpu_instance(adev); 137 138 if (adev->rmmio == NULL) 139 return; 140 141 if (adev->runpm) { 142 pm_runtime_get_sync(dev->dev); 143 pm_runtime_forbid(dev->dev); 144 } 145 146 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) 147 DRM_WARN("smart shift update failed\n"); 148 149 amdgpu_acpi_fini(adev); 150 amdgpu_device_fini_hw(adev); 151 } 152 #endif /* __linux__ */ 153 154 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 155 { 156 struct amdgpu_gpu_instance *gpu_instance; 157 158 mutex_lock(&mgpu_info.mutex); 159 160 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 161 DRM_ERROR("Cannot register more gpu instance\n"); 162 mutex_unlock(&mgpu_info.mutex); 163 return; 164 } 165 166 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 167 gpu_instance->adev = adev; 168 gpu_instance->mgpu_fan_enabled = 0; 169 170 mgpu_info.num_gpu++; 171 if (adev->flags & AMD_IS_APU) 172 mgpu_info.num_apu++; 173 else 174 mgpu_info.num_dgpu++; 175 176 mutex_unlock(&mgpu_info.mutex); 177 } 178 179 static void amdgpu_get_audio_func(struct amdgpu_device *adev) 180 { 181 STUB(); 182 #ifdef notyet 183 struct pci_dev *p = NULL; 184 185 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 186 adev->pdev->bus->number, 1); 187 if (p) { 188 pm_runtime_get_sync(&p->dev); 189 190 pm_runtime_mark_last_busy(&p->dev); 191 pm_runtime_put_autosuspend(&p->dev); 192 193 pci_dev_put(p); 194 } 195 #endif 196 } 197 198 #ifdef __linux__ 199 /** 200 * amdgpu_driver_load_kms - Main load function for KMS. 201 * 202 * @adev: pointer to struct amdgpu_device 203 * @flags: device flags 204 * 205 * This is the main load function for KMS (all asics). 206 * Returns 0 on success, error on failure. 207 */ 208 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) 209 { 210 struct drm_device *dev; 211 struct pci_dev *parent; 212 int r, acpi_status; 213 214 dev = adev_to_drm(adev); 215 216 if (amdgpu_has_atpx() && 217 (amdgpu_is_atpx_hybrid() || 218 amdgpu_has_atpx_dgpu_power_cntl()) && 219 ((flags & AMD_IS_APU) == 0) && 220 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) 221 flags |= AMD_IS_PX; 222 223 parent = pci_upstream_bridge(adev->pdev); 224 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 225 226 /* amdgpu_device_init should report only fatal error 227 * like memory allocation failure or iomapping failure, 228 * or memory manager initialization failure, it must 229 * properly initialize the GPU MC controller and permit 230 * VRAM allocation 231 */ 232 r = amdgpu_device_init(adev, flags); 233 if (r) { 234 dev_err(dev->dev, "Fatal error during GPU init\n"); 235 goto out; 236 } 237 238 if (amdgpu_device_supports_px(dev) && 239 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */ 240 adev->runpm = true; 241 dev_info(adev->dev, "Using ATPX for runtime pm\n"); 242 } else if (amdgpu_device_supports_boco(dev) && 243 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ 244 adev->runpm = true; 245 dev_info(adev->dev, "Using BOCO for runtime pm\n"); 246 } else if (amdgpu_device_supports_baco(dev) && 247 (amdgpu_runtime_pm != 0)) { 248 switch (adev->asic_type) { 249 case CHIP_VEGA20: 250 case CHIP_ARCTURUS: 251 /* enable runpm if runpm=1 */ 252 if (amdgpu_runtime_pm > 0) 253 adev->runpm = true; 254 break; 255 case CHIP_VEGA10: 256 /* turn runpm on if noretry=0 */ 257 if (!adev->gmc.noretry) 258 adev->runpm = true; 259 break; 260 default: 261 /* enable runpm on CI+ */ 262 adev->runpm = true; 263 break; 264 } 265 /* XXX: disable runtime pm if we are the primary adapter 266 * to avoid displays being re-enabled after DPMS. 267 * This needs to be sorted out and fixed properly. 268 */ 269 if (adev->is_fw_fb) 270 adev->runpm = false; 271 if (adev->runpm) 272 dev_info(adev->dev, "Using BACO for runtime pm\n"); 273 } 274 275 /* Call ACPI methods: require modeset init 276 * but failure is not fatal 277 */ 278 279 acpi_status = amdgpu_acpi_init(adev); 280 if (acpi_status) 281 dev_dbg(dev->dev, "Error during ACPI methods call\n"); 282 283 if (adev->runpm) { 284 /* only need to skip on ATPX */ 285 if (amdgpu_device_supports_px(dev)) 286 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 287 /* we want direct complete for BOCO */ 288 if (amdgpu_device_supports_boco(dev)) 289 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE | 290 DPM_FLAG_SMART_SUSPEND | 291 DPM_FLAG_MAY_SKIP_RESUME); 292 pm_runtime_use_autosuspend(dev->dev); 293 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 294 295 pm_runtime_allow(dev->dev); 296 297 pm_runtime_mark_last_busy(dev->dev); 298 pm_runtime_put_autosuspend(dev->dev); 299 300 /* 301 * For runpm implemented via BACO, PMFW will handle the 302 * timing for BACO in and out: 303 * - put ASIC into BACO state only when both video and 304 * audio functions are in D3 state. 305 * - pull ASIC out of BACO state when either video or 306 * audio function is in D0 state. 307 * Also, at startup, PMFW assumes both functions are in 308 * D0 state. 309 * 310 * So if snd driver was loaded prior to amdgpu driver 311 * and audio function was put into D3 state, there will 312 * be no PMFW-aware D-state transition(D0->D3) on runpm 313 * suspend. Thus the BACO will be not correctly kicked in. 314 * 315 * Via amdgpu_get_audio_func(), the audio dev is put 316 * into D0 state. Then there will be a PMFW-aware D-state 317 * transition(D0->D3) on runpm suspend. 318 */ 319 if (amdgpu_device_supports_baco(dev) && 320 !(adev->flags & AMD_IS_APU) && 321 (adev->asic_type >= CHIP_NAVI10)) 322 amdgpu_get_audio_func(adev); 323 } 324 325 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) 326 DRM_WARN("smart shift update failed\n"); 327 328 out: 329 if (r) { 330 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 331 if (adev->rmmio && adev->runpm) 332 pm_runtime_put_noidle(dev->dev); 333 amdgpu_driver_unload_kms(dev); 334 } 335 336 return r; 337 } 338 #endif /* __linux__ */ 339 340 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 341 struct drm_amdgpu_query_fw *query_fw, 342 struct amdgpu_device *adev) 343 { 344 switch (query_fw->fw_type) { 345 case AMDGPU_INFO_FW_VCE: 346 fw_info->ver = adev->vce.fw_version; 347 fw_info->feature = adev->vce.fb_version; 348 break; 349 case AMDGPU_INFO_FW_UVD: 350 fw_info->ver = adev->uvd.fw_version; 351 fw_info->feature = 0; 352 break; 353 case AMDGPU_INFO_FW_VCN: 354 fw_info->ver = adev->vcn.fw_version; 355 fw_info->feature = 0; 356 break; 357 case AMDGPU_INFO_FW_GMC: 358 fw_info->ver = adev->gmc.fw_version; 359 fw_info->feature = 0; 360 break; 361 case AMDGPU_INFO_FW_GFX_ME: 362 fw_info->ver = adev->gfx.me_fw_version; 363 fw_info->feature = adev->gfx.me_feature_version; 364 break; 365 case AMDGPU_INFO_FW_GFX_PFP: 366 fw_info->ver = adev->gfx.pfp_fw_version; 367 fw_info->feature = adev->gfx.pfp_feature_version; 368 break; 369 case AMDGPU_INFO_FW_GFX_CE: 370 fw_info->ver = adev->gfx.ce_fw_version; 371 fw_info->feature = adev->gfx.ce_feature_version; 372 break; 373 case AMDGPU_INFO_FW_GFX_RLC: 374 fw_info->ver = adev->gfx.rlc_fw_version; 375 fw_info->feature = adev->gfx.rlc_feature_version; 376 break; 377 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 378 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 379 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 380 break; 381 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 382 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 383 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 384 break; 385 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 386 fw_info->ver = adev->gfx.rlc_srls_fw_version; 387 fw_info->feature = adev->gfx.rlc_srls_feature_version; 388 break; 389 case AMDGPU_INFO_FW_GFX_MEC: 390 if (query_fw->index == 0) { 391 fw_info->ver = adev->gfx.mec_fw_version; 392 fw_info->feature = adev->gfx.mec_feature_version; 393 } else if (query_fw->index == 1) { 394 fw_info->ver = adev->gfx.mec2_fw_version; 395 fw_info->feature = adev->gfx.mec2_feature_version; 396 } else 397 return -EINVAL; 398 break; 399 case AMDGPU_INFO_FW_SMC: 400 fw_info->ver = adev->pm.fw_version; 401 fw_info->feature = 0; 402 break; 403 case AMDGPU_INFO_FW_TA: 404 switch (query_fw->index) { 405 case TA_FW_TYPE_PSP_XGMI: 406 fw_info->ver = adev->psp.ta_fw_version; 407 fw_info->feature = adev->psp.xgmi.feature_version; 408 break; 409 case TA_FW_TYPE_PSP_RAS: 410 fw_info->ver = adev->psp.ta_fw_version; 411 fw_info->feature = adev->psp.ras.feature_version; 412 break; 413 case TA_FW_TYPE_PSP_HDCP: 414 fw_info->ver = adev->psp.ta_fw_version; 415 fw_info->feature = adev->psp.hdcp.feature_version; 416 break; 417 case TA_FW_TYPE_PSP_DTM: 418 fw_info->ver = adev->psp.ta_fw_version; 419 fw_info->feature = adev->psp.dtm.feature_version; 420 break; 421 case TA_FW_TYPE_PSP_RAP: 422 fw_info->ver = adev->psp.ta_fw_version; 423 fw_info->feature = adev->psp.rap.feature_version; 424 break; 425 case TA_FW_TYPE_PSP_SECUREDISPLAY: 426 fw_info->ver = adev->psp.ta_fw_version; 427 fw_info->feature = adev->psp.securedisplay.feature_version; 428 break; 429 default: 430 return -EINVAL; 431 } 432 break; 433 case AMDGPU_INFO_FW_SDMA: 434 if (query_fw->index >= adev->sdma.num_instances) 435 return -EINVAL; 436 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 437 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 438 break; 439 case AMDGPU_INFO_FW_SOS: 440 fw_info->ver = adev->psp.sos.fw_version; 441 fw_info->feature = adev->psp.sos.feature_version; 442 break; 443 case AMDGPU_INFO_FW_ASD: 444 fw_info->ver = adev->psp.asd.fw_version; 445 fw_info->feature = adev->psp.asd.feature_version; 446 break; 447 case AMDGPU_INFO_FW_DMCU: 448 fw_info->ver = adev->dm.dmcu_fw_version; 449 fw_info->feature = 0; 450 break; 451 case AMDGPU_INFO_FW_DMCUB: 452 fw_info->ver = adev->dm.dmcub_fw_version; 453 fw_info->feature = 0; 454 break; 455 case AMDGPU_INFO_FW_TOC: 456 fw_info->ver = adev->psp.toc.fw_version; 457 fw_info->feature = adev->psp.toc.feature_version; 458 break; 459 default: 460 return -EINVAL; 461 } 462 return 0; 463 } 464 465 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 466 struct drm_amdgpu_info *info, 467 struct drm_amdgpu_info_hw_ip *result) 468 { 469 uint32_t ib_start_alignment = 0; 470 uint32_t ib_size_alignment = 0; 471 enum amd_ip_block_type type; 472 unsigned int num_rings = 0; 473 unsigned int i, j; 474 475 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 476 return -EINVAL; 477 478 switch (info->query_hw_ip.type) { 479 case AMDGPU_HW_IP_GFX: 480 type = AMD_IP_BLOCK_TYPE_GFX; 481 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 482 if (adev->gfx.gfx_ring[i].sched.ready) 483 ++num_rings; 484 ib_start_alignment = 32; 485 ib_size_alignment = 32; 486 break; 487 case AMDGPU_HW_IP_COMPUTE: 488 type = AMD_IP_BLOCK_TYPE_GFX; 489 for (i = 0; i < adev->gfx.num_compute_rings; i++) 490 if (adev->gfx.compute_ring[i].sched.ready) 491 ++num_rings; 492 ib_start_alignment = 32; 493 ib_size_alignment = 32; 494 break; 495 case AMDGPU_HW_IP_DMA: 496 type = AMD_IP_BLOCK_TYPE_SDMA; 497 for (i = 0; i < adev->sdma.num_instances; i++) 498 if (adev->sdma.instance[i].ring.sched.ready) 499 ++num_rings; 500 ib_start_alignment = 256; 501 ib_size_alignment = 4; 502 break; 503 case AMDGPU_HW_IP_UVD: 504 type = AMD_IP_BLOCK_TYPE_UVD; 505 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 506 if (adev->uvd.harvest_config & (1 << i)) 507 continue; 508 509 if (adev->uvd.inst[i].ring.sched.ready) 510 ++num_rings; 511 } 512 ib_start_alignment = 64; 513 ib_size_alignment = 64; 514 break; 515 case AMDGPU_HW_IP_VCE: 516 type = AMD_IP_BLOCK_TYPE_VCE; 517 for (i = 0; i < adev->vce.num_rings; i++) 518 if (adev->vce.ring[i].sched.ready) 519 ++num_rings; 520 ib_start_alignment = 4; 521 ib_size_alignment = 1; 522 break; 523 case AMDGPU_HW_IP_UVD_ENC: 524 type = AMD_IP_BLOCK_TYPE_UVD; 525 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 526 if (adev->uvd.harvest_config & (1 << i)) 527 continue; 528 529 for (j = 0; j < adev->uvd.num_enc_rings; j++) 530 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 531 ++num_rings; 532 } 533 ib_start_alignment = 64; 534 ib_size_alignment = 64; 535 break; 536 case AMDGPU_HW_IP_VCN_DEC: 537 type = AMD_IP_BLOCK_TYPE_VCN; 538 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 539 if (adev->uvd.harvest_config & (1 << i)) 540 continue; 541 542 if (adev->vcn.inst[i].ring_dec.sched.ready) 543 ++num_rings; 544 } 545 ib_start_alignment = 16; 546 ib_size_alignment = 16; 547 break; 548 case AMDGPU_HW_IP_VCN_ENC: 549 type = AMD_IP_BLOCK_TYPE_VCN; 550 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 551 if (adev->uvd.harvest_config & (1 << i)) 552 continue; 553 554 for (j = 0; j < adev->vcn.num_enc_rings; j++) 555 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 556 ++num_rings; 557 } 558 ib_start_alignment = 64; 559 ib_size_alignment = 1; 560 break; 561 case AMDGPU_HW_IP_VCN_JPEG: 562 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 563 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 564 565 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 566 if (adev->jpeg.harvest_config & (1 << i)) 567 continue; 568 569 if (adev->jpeg.inst[i].ring_dec.sched.ready) 570 ++num_rings; 571 } 572 ib_start_alignment = 16; 573 ib_size_alignment = 16; 574 break; 575 default: 576 return -EINVAL; 577 } 578 579 for (i = 0; i < adev->num_ip_blocks; i++) 580 if (adev->ip_blocks[i].version->type == type && 581 adev->ip_blocks[i].status.valid) 582 break; 583 584 if (i == adev->num_ip_blocks) 585 return 0; 586 587 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 588 num_rings); 589 590 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 591 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 592 result->capabilities_flags = 0; 593 result->available_rings = (1 << num_rings) - 1; 594 result->ib_start_alignment = ib_start_alignment; 595 result->ib_size_alignment = ib_size_alignment; 596 return 0; 597 } 598 599 /* 600 * Userspace get information ioctl 601 */ 602 /** 603 * amdgpu_info_ioctl - answer a device specific request. 604 * 605 * @dev: drm device pointer 606 * @data: request object 607 * @filp: drm filp 608 * 609 * This function is used to pass device specific parameters to the userspace 610 * drivers. Examples include: pci device id, pipeline parms, tiling params, 611 * etc. (all asics). 612 * Returns 0 on success, -EINVAL on failure. 613 */ 614 int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 615 { 616 struct amdgpu_device *adev = drm_to_adev(dev); 617 struct drm_amdgpu_info *info = data; 618 struct amdgpu_mode_info *minfo = &adev->mode_info; 619 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 620 uint32_t size = info->return_size; 621 struct drm_crtc *crtc; 622 uint32_t ui32 = 0; 623 uint64_t ui64 = 0; 624 int i, found; 625 int ui32_size = sizeof(ui32); 626 627 if (!info->return_size || !info->return_pointer) 628 return -EINVAL; 629 630 switch (info->query) { 631 case AMDGPU_INFO_ACCEL_WORKING: 632 ui32 = adev->accel_working; 633 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 634 case AMDGPU_INFO_CRTC_FROM_ID: 635 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 636 crtc = (struct drm_crtc *)minfo->crtcs[i]; 637 if (crtc && crtc->base.id == info->mode_crtc.id) { 638 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 639 ui32 = amdgpu_crtc->crtc_id; 640 found = 1; 641 break; 642 } 643 } 644 if (!found) { 645 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 646 return -EINVAL; 647 } 648 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 649 case AMDGPU_INFO_HW_IP_INFO: { 650 struct drm_amdgpu_info_hw_ip ip = {}; 651 int ret; 652 653 ret = amdgpu_hw_ip_info(adev, info, &ip); 654 if (ret) 655 return ret; 656 657 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 658 return ret ? -EFAULT : 0; 659 } 660 case AMDGPU_INFO_HW_IP_COUNT: { 661 enum amd_ip_block_type type; 662 uint32_t count = 0; 663 664 switch (info->query_hw_ip.type) { 665 case AMDGPU_HW_IP_GFX: 666 type = AMD_IP_BLOCK_TYPE_GFX; 667 break; 668 case AMDGPU_HW_IP_COMPUTE: 669 type = AMD_IP_BLOCK_TYPE_GFX; 670 break; 671 case AMDGPU_HW_IP_DMA: 672 type = AMD_IP_BLOCK_TYPE_SDMA; 673 break; 674 case AMDGPU_HW_IP_UVD: 675 type = AMD_IP_BLOCK_TYPE_UVD; 676 break; 677 case AMDGPU_HW_IP_VCE: 678 type = AMD_IP_BLOCK_TYPE_VCE; 679 break; 680 case AMDGPU_HW_IP_UVD_ENC: 681 type = AMD_IP_BLOCK_TYPE_UVD; 682 break; 683 case AMDGPU_HW_IP_VCN_DEC: 684 case AMDGPU_HW_IP_VCN_ENC: 685 type = AMD_IP_BLOCK_TYPE_VCN; 686 break; 687 case AMDGPU_HW_IP_VCN_JPEG: 688 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 689 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 690 break; 691 default: 692 return -EINVAL; 693 } 694 695 for (i = 0; i < adev->num_ip_blocks; i++) 696 if (adev->ip_blocks[i].version->type == type && 697 adev->ip_blocks[i].status.valid && 698 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 699 count++; 700 701 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 702 } 703 case AMDGPU_INFO_TIMESTAMP: 704 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 705 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 706 case AMDGPU_INFO_FW_VERSION: { 707 struct drm_amdgpu_info_firmware fw_info; 708 int ret; 709 710 /* We only support one instance of each IP block right now. */ 711 if (info->query_fw.ip_instance != 0) 712 return -EINVAL; 713 714 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 715 if (ret) 716 return ret; 717 718 return copy_to_user(out, &fw_info, 719 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 720 } 721 case AMDGPU_INFO_NUM_BYTES_MOVED: 722 ui64 = atomic64_read(&adev->num_bytes_moved); 723 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 724 case AMDGPU_INFO_NUM_EVICTIONS: 725 ui64 = atomic64_read(&adev->num_evictions); 726 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 727 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 728 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 729 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 730 case AMDGPU_INFO_VRAM_USAGE: 731 ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 732 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 733 case AMDGPU_INFO_VIS_VRAM_USAGE: 734 ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 735 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 736 case AMDGPU_INFO_GTT_USAGE: 737 ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); 738 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 739 case AMDGPU_INFO_GDS_CONFIG: { 740 struct drm_amdgpu_info_gds gds_info; 741 742 memset(&gds_info, 0, sizeof(gds_info)); 743 gds_info.compute_partition_size = adev->gds.gds_size; 744 gds_info.gds_total_size = adev->gds.gds_size; 745 gds_info.gws_per_compute_partition = adev->gds.gws_size; 746 gds_info.oa_per_compute_partition = adev->gds.oa_size; 747 return copy_to_user(out, &gds_info, 748 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 749 } 750 case AMDGPU_INFO_VRAM_GTT: { 751 struct drm_amdgpu_info_vram_gtt vram_gtt; 752 753 vram_gtt.vram_size = adev->gmc.real_vram_size - 754 atomic64_read(&adev->vram_pin_size) - 755 AMDGPU_VM_RESERVED_VRAM; 756 vram_gtt.vram_cpu_accessible_size = 757 min(adev->gmc.visible_vram_size - 758 atomic64_read(&adev->visible_pin_size), 759 vram_gtt.vram_size); 760 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; 761 vram_gtt.gtt_size *= PAGE_SIZE; 762 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 763 return copy_to_user(out, &vram_gtt, 764 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 765 } 766 case AMDGPU_INFO_MEMORY: { 767 struct drm_amdgpu_memory_info mem; 768 struct ttm_resource_manager *vram_man = 769 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 770 struct ttm_resource_manager *gtt_man = 771 ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); 772 memset(&mem, 0, sizeof(mem)); 773 mem.vram.total_heap_size = adev->gmc.real_vram_size; 774 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 775 atomic64_read(&adev->vram_pin_size) - 776 AMDGPU_VM_RESERVED_VRAM; 777 mem.vram.heap_usage = 778 amdgpu_vram_mgr_usage(vram_man); 779 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 780 781 mem.cpu_accessible_vram.total_heap_size = 782 adev->gmc.visible_vram_size; 783 mem.cpu_accessible_vram.usable_heap_size = 784 min(adev->gmc.visible_vram_size - 785 atomic64_read(&adev->visible_pin_size), 786 mem.vram.usable_heap_size); 787 mem.cpu_accessible_vram.heap_usage = 788 amdgpu_vram_mgr_vis_usage(vram_man); 789 mem.cpu_accessible_vram.max_allocation = 790 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 791 792 mem.gtt.total_heap_size = gtt_man->size; 793 mem.gtt.total_heap_size *= PAGE_SIZE; 794 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 795 atomic64_read(&adev->gart_pin_size); 796 mem.gtt.heap_usage = 797 amdgpu_gtt_mgr_usage(gtt_man); 798 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 799 800 return copy_to_user(out, &mem, 801 min((size_t)size, sizeof(mem))) 802 ? -EFAULT : 0; 803 } 804 case AMDGPU_INFO_READ_MMR_REG: { 805 unsigned n, alloc_size; 806 uint32_t *regs; 807 unsigned se_num = (info->read_mmr_reg.instance >> 808 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 809 AMDGPU_INFO_MMR_SE_INDEX_MASK; 810 unsigned sh_num = (info->read_mmr_reg.instance >> 811 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 812 AMDGPU_INFO_MMR_SH_INDEX_MASK; 813 814 /* set full masks if the userspace set all bits 815 * in the bitfields */ 816 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 817 se_num = 0xffffffff; 818 else if (se_num >= AMDGPU_GFX_MAX_SE) 819 return -EINVAL; 820 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 821 sh_num = 0xffffffff; 822 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) 823 return -EINVAL; 824 825 if (info->read_mmr_reg.count > 128) 826 return -EINVAL; 827 828 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 829 if (!regs) 830 return -ENOMEM; 831 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 832 833 amdgpu_gfx_off_ctrl(adev, false); 834 for (i = 0; i < info->read_mmr_reg.count; i++) { 835 if (amdgpu_asic_read_register(adev, se_num, sh_num, 836 info->read_mmr_reg.dword_offset + i, 837 ®s[i])) { 838 DRM_DEBUG_KMS("unallowed offset %#x\n", 839 info->read_mmr_reg.dword_offset + i); 840 kfree(regs); 841 amdgpu_gfx_off_ctrl(adev, true); 842 return -EFAULT; 843 } 844 } 845 amdgpu_gfx_off_ctrl(adev, true); 846 n = copy_to_user(out, regs, min(size, alloc_size)); 847 kfree(regs); 848 return n ? -EFAULT : 0; 849 } 850 case AMDGPU_INFO_DEV_INFO: { 851 struct drm_amdgpu_info_device *dev_info; 852 uint64_t vm_size; 853 int ret; 854 855 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); 856 if (!dev_info) 857 return -ENOMEM; 858 859 dev_info->device_id = adev->pdev->device; 860 dev_info->chip_rev = adev->rev_id; 861 dev_info->external_rev = adev->external_rev_id; 862 dev_info->pci_rev = adev->pdev->revision; 863 dev_info->family = adev->family; 864 dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; 865 dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 866 /* return all clocks in KHz */ 867 dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 868 if (adev->pm.dpm_enabled) { 869 dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 870 dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 871 } else { 872 dev_info->max_engine_clock = adev->clock.default_sclk * 10; 873 dev_info->max_memory_clock = adev->clock.default_mclk * 10; 874 } 875 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 876 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * 877 adev->gfx.config.max_shader_engines; 878 dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 879 dev_info->_pad = 0; 880 dev_info->ids_flags = 0; 881 if (adev->flags & AMD_IS_APU) 882 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 883 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 884 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 885 if (amdgpu_is_tmz(adev)) 886 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ; 887 888 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 889 vm_size -= AMDGPU_VA_RESERVED_SIZE; 890 891 /* Older VCE FW versions are buggy and can handle only 40bits */ 892 if (adev->vce.fw_version && 893 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 894 vm_size = min(vm_size, 1ULL << 40); 895 896 dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 897 dev_info->virtual_address_max = 898 min(vm_size, AMDGPU_GMC_HOLE_START); 899 900 if (vm_size > AMDGPU_GMC_HOLE_START) { 901 dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; 902 dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 903 } 904 dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 905 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 906 dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 907 dev_info->cu_active_number = adev->gfx.cu_info.number; 908 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 909 dev_info->ce_ram_size = adev->gfx.ce_ram_size; 910 memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 911 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 912 memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 913 sizeof(adev->gfx.cu_info.bitmap)); 914 dev_info->vram_type = adev->gmc.vram_type; 915 dev_info->vram_bit_width = adev->gmc.vram_width; 916 dev_info->vce_harvest_config = adev->vce.harvest_config; 917 dev_info->gc_double_offchip_lds_buf = 918 adev->gfx.config.double_offchip_lds_buf; 919 dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size; 920 dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs; 921 dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 922 dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 923 dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 924 dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 925 dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 926 927 if (adev->family >= AMDGPU_FAMILY_NV) 928 dev_info->pa_sc_tile_steering_override = 929 adev->gfx.config.pa_sc_tile_steering_override; 930 931 dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 932 933 ret = copy_to_user(out, dev_info, 934 min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; 935 kfree(dev_info); 936 return ret; 937 } 938 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 939 unsigned i; 940 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 941 struct amd_vce_state *vce_state; 942 943 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 944 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 945 if (vce_state) { 946 vce_clk_table.entries[i].sclk = vce_state->sclk; 947 vce_clk_table.entries[i].mclk = vce_state->mclk; 948 vce_clk_table.entries[i].eclk = vce_state->evclk; 949 vce_clk_table.num_valid_entries++; 950 } 951 } 952 953 return copy_to_user(out, &vce_clk_table, 954 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 955 } 956 case AMDGPU_INFO_VBIOS: { 957 uint32_t bios_size = adev->bios_size; 958 959 switch (info->vbios_info.type) { 960 case AMDGPU_INFO_VBIOS_SIZE: 961 return copy_to_user(out, &bios_size, 962 min((size_t)size, sizeof(bios_size))) 963 ? -EFAULT : 0; 964 case AMDGPU_INFO_VBIOS_IMAGE: { 965 uint8_t *bios; 966 uint32_t bios_offset = info->vbios_info.offset; 967 968 if (bios_offset >= bios_size) 969 return -EINVAL; 970 971 bios = adev->bios + bios_offset; 972 return copy_to_user(out, bios, 973 min((size_t)size, (size_t)(bios_size - bios_offset))) 974 ? -EFAULT : 0; 975 } 976 case AMDGPU_INFO_VBIOS_INFO: { 977 struct drm_amdgpu_info_vbios vbios_info = {}; 978 struct atom_context *atom_context; 979 980 atom_context = adev->mode_info.atom_context; 981 memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); 982 memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); 983 vbios_info.version = atom_context->version; 984 memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, 985 sizeof(atom_context->vbios_ver_str)); 986 memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); 987 988 return copy_to_user(out, &vbios_info, 989 min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; 990 } 991 default: 992 DRM_DEBUG_KMS("Invalid request %d\n", 993 info->vbios_info.type); 994 return -EINVAL; 995 } 996 } 997 case AMDGPU_INFO_NUM_HANDLES: { 998 struct drm_amdgpu_info_num_handles handle; 999 1000 switch (info->query_hw_ip.type) { 1001 case AMDGPU_HW_IP_UVD: 1002 /* Starting Polaris, we support unlimited UVD handles */ 1003 if (adev->asic_type < CHIP_POLARIS10) { 1004 handle.uvd_max_handles = adev->uvd.max_handles; 1005 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 1006 1007 return copy_to_user(out, &handle, 1008 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 1009 } else { 1010 return -ENODATA; 1011 } 1012 1013 break; 1014 default: 1015 return -EINVAL; 1016 } 1017 } 1018 case AMDGPU_INFO_SENSOR: { 1019 if (!adev->pm.dpm_enabled) 1020 return -ENOENT; 1021 1022 switch (info->sensor_info.type) { 1023 case AMDGPU_INFO_SENSOR_GFX_SCLK: 1024 /* get sclk in Mhz */ 1025 if (amdgpu_dpm_read_sensor(adev, 1026 AMDGPU_PP_SENSOR_GFX_SCLK, 1027 (void *)&ui32, &ui32_size)) { 1028 return -EINVAL; 1029 } 1030 ui32 /= 100; 1031 break; 1032 case AMDGPU_INFO_SENSOR_GFX_MCLK: 1033 /* get mclk in Mhz */ 1034 if (amdgpu_dpm_read_sensor(adev, 1035 AMDGPU_PP_SENSOR_GFX_MCLK, 1036 (void *)&ui32, &ui32_size)) { 1037 return -EINVAL; 1038 } 1039 ui32 /= 100; 1040 break; 1041 case AMDGPU_INFO_SENSOR_GPU_TEMP: 1042 /* get temperature in millidegrees C */ 1043 if (amdgpu_dpm_read_sensor(adev, 1044 AMDGPU_PP_SENSOR_GPU_TEMP, 1045 (void *)&ui32, &ui32_size)) { 1046 return -EINVAL; 1047 } 1048 break; 1049 case AMDGPU_INFO_SENSOR_GPU_LOAD: 1050 /* get GPU load */ 1051 if (amdgpu_dpm_read_sensor(adev, 1052 AMDGPU_PP_SENSOR_GPU_LOAD, 1053 (void *)&ui32, &ui32_size)) { 1054 return -EINVAL; 1055 } 1056 break; 1057 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 1058 /* get average GPU power */ 1059 if (amdgpu_dpm_read_sensor(adev, 1060 AMDGPU_PP_SENSOR_GPU_POWER, 1061 (void *)&ui32, &ui32_size)) { 1062 return -EINVAL; 1063 } 1064 ui32 >>= 8; 1065 break; 1066 case AMDGPU_INFO_SENSOR_VDDNB: 1067 /* get VDDNB in millivolts */ 1068 if (amdgpu_dpm_read_sensor(adev, 1069 AMDGPU_PP_SENSOR_VDDNB, 1070 (void *)&ui32, &ui32_size)) { 1071 return -EINVAL; 1072 } 1073 break; 1074 case AMDGPU_INFO_SENSOR_VDDGFX: 1075 /* get VDDGFX in millivolts */ 1076 if (amdgpu_dpm_read_sensor(adev, 1077 AMDGPU_PP_SENSOR_VDDGFX, 1078 (void *)&ui32, &ui32_size)) { 1079 return -EINVAL; 1080 } 1081 break; 1082 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 1083 /* get stable pstate sclk in Mhz */ 1084 if (amdgpu_dpm_read_sensor(adev, 1085 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 1086 (void *)&ui32, &ui32_size)) { 1087 return -EINVAL; 1088 } 1089 ui32 /= 100; 1090 break; 1091 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 1092 /* get stable pstate mclk in Mhz */ 1093 if (amdgpu_dpm_read_sensor(adev, 1094 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 1095 (void *)&ui32, &ui32_size)) { 1096 return -EINVAL; 1097 } 1098 ui32 /= 100; 1099 break; 1100 default: 1101 DRM_DEBUG_KMS("Invalid request %d\n", 1102 info->sensor_info.type); 1103 return -EINVAL; 1104 } 1105 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 1106 } 1107 case AMDGPU_INFO_VRAM_LOST_COUNTER: 1108 ui32 = atomic_read(&adev->vram_lost_counter); 1109 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 1110 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 1111 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1112 uint64_t ras_mask; 1113 1114 if (!ras) 1115 return -EINVAL; 1116 ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features; 1117 1118 return copy_to_user(out, &ras_mask, 1119 min_t(u64, size, sizeof(ras_mask))) ? 1120 -EFAULT : 0; 1121 } 1122 case AMDGPU_INFO_VIDEO_CAPS: { 1123 const struct amdgpu_video_codecs *codecs; 1124 struct drm_amdgpu_info_video_caps *caps; 1125 int r; 1126 1127 switch (info->video_cap.type) { 1128 case AMDGPU_INFO_VIDEO_CAPS_DECODE: 1129 r = amdgpu_asic_query_video_codecs(adev, false, &codecs); 1130 if (r) 1131 return -EINVAL; 1132 break; 1133 case AMDGPU_INFO_VIDEO_CAPS_ENCODE: 1134 r = amdgpu_asic_query_video_codecs(adev, true, &codecs); 1135 if (r) 1136 return -EINVAL; 1137 break; 1138 default: 1139 DRM_DEBUG_KMS("Invalid request %d\n", 1140 info->video_cap.type); 1141 return -EINVAL; 1142 } 1143 1144 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 1145 if (!caps) 1146 return -ENOMEM; 1147 1148 for (i = 0; i < codecs->codec_count; i++) { 1149 int idx = codecs->codec_array[i].codec_type; 1150 1151 switch (idx) { 1152 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2: 1153 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4: 1154 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1: 1155 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC: 1156 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC: 1157 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG: 1158 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9: 1159 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1: 1160 caps->codec_info[idx].valid = 1; 1161 caps->codec_info[idx].max_width = 1162 codecs->codec_array[i].max_width; 1163 caps->codec_info[idx].max_height = 1164 codecs->codec_array[i].max_height; 1165 caps->codec_info[idx].max_pixels_per_frame = 1166 codecs->codec_array[i].max_pixels_per_frame; 1167 caps->codec_info[idx].max_level = 1168 codecs->codec_array[i].max_level; 1169 break; 1170 default: 1171 break; 1172 } 1173 } 1174 r = copy_to_user(out, caps, 1175 min((size_t)size, sizeof(*caps))) ? -EFAULT : 0; 1176 kfree(caps); 1177 return r; 1178 } 1179 default: 1180 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 1181 return -EINVAL; 1182 } 1183 return 0; 1184 } 1185 1186 1187 /* 1188 * Outdated mess for old drm with Xorg being in charge (void function now). 1189 */ 1190 /** 1191 * amdgpu_driver_lastclose_kms - drm callback for last close 1192 * 1193 * @dev: drm dev pointer 1194 * 1195 * Switch vga_switcheroo state after last close (all asics). 1196 */ 1197 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 1198 { 1199 drm_fb_helper_lastclose(dev); 1200 vga_switcheroo_process_delayed_switch(); 1201 } 1202 1203 /** 1204 * amdgpu_driver_open_kms - drm callback for open 1205 * 1206 * @dev: drm dev pointer 1207 * @file_priv: drm file 1208 * 1209 * On device open, init vm on cayman+ (all asics). 1210 * Returns 0 on success, error on failure. 1211 */ 1212 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 1213 { 1214 struct amdgpu_device *adev = drm_to_adev(dev); 1215 struct amdgpu_fpriv *fpriv; 1216 int r, pasid; 1217 1218 /* Ensure IB tests are run on ring */ 1219 flush_delayed_work(&adev->delayed_init_work); 1220 1221 1222 if (amdgpu_ras_intr_triggered()) { 1223 DRM_ERROR("RAS Intr triggered, device disabled!!"); 1224 return -EHWPOISON; 1225 } 1226 1227 file_priv->driver_priv = NULL; 1228 1229 r = pm_runtime_get_sync(dev->dev); 1230 if (r < 0) 1231 goto pm_put; 1232 1233 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 1234 if (unlikely(!fpriv)) { 1235 r = -ENOMEM; 1236 goto out_suspend; 1237 } 1238 1239 pasid = amdgpu_pasid_alloc(16); 1240 if (pasid < 0) { 1241 dev_warn(adev->dev, "No more PASIDs available!"); 1242 pasid = 0; 1243 } 1244 1245 r = amdgpu_vm_init(adev, &fpriv->vm); 1246 if (r) 1247 goto error_pasid; 1248 1249 r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); 1250 if (r) 1251 goto error_vm; 1252 1253 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1254 if (!fpriv->prt_va) { 1255 r = -ENOMEM; 1256 goto error_vm; 1257 } 1258 1259 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1260 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1261 1262 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1263 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1264 if (r) 1265 goto error_vm; 1266 } 1267 1268 rw_init(&fpriv->bo_list_lock, "agbo"); 1269 idr_init(&fpriv->bo_list_handles); 1270 1271 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1272 1273 file_priv->driver_priv = fpriv; 1274 goto out_suspend; 1275 1276 error_vm: 1277 amdgpu_vm_fini(adev, &fpriv->vm); 1278 1279 error_pasid: 1280 if (pasid) { 1281 amdgpu_pasid_free(pasid); 1282 amdgpu_vm_set_pasid(adev, &fpriv->vm, 0); 1283 } 1284 1285 kfree(fpriv); 1286 1287 out_suspend: 1288 pm_runtime_mark_last_busy(dev->dev); 1289 pm_put: 1290 pm_runtime_put_autosuspend(dev->dev); 1291 1292 return r; 1293 } 1294 1295 /** 1296 * amdgpu_driver_postclose_kms - drm callback for post close 1297 * 1298 * @dev: drm dev pointer 1299 * @file_priv: drm file 1300 * 1301 * On device post close, tear down vm on cayman+ (all asics). 1302 */ 1303 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1304 struct drm_file *file_priv) 1305 { 1306 struct amdgpu_device *adev = drm_to_adev(dev); 1307 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1308 struct amdgpu_bo_list *list; 1309 struct amdgpu_bo *pd; 1310 u32 pasid; 1311 int handle; 1312 1313 if (!fpriv) 1314 return; 1315 1316 pm_runtime_get_sync(dev->dev); 1317 1318 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1319 amdgpu_uvd_free_handles(adev, file_priv); 1320 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1321 amdgpu_vce_free_handles(adev, file_priv); 1322 1323 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1324 1325 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1326 /* TODO: how to handle reserve failure */ 1327 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1328 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1329 fpriv->csa_va = NULL; 1330 amdgpu_bo_unreserve(adev->virt.csa_obj); 1331 } 1332 1333 pasid = fpriv->vm.pasid; 1334 pd = amdgpu_bo_ref(fpriv->vm.root.bo); 1335 1336 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1337 amdgpu_vm_fini(adev, &fpriv->vm); 1338 1339 if (pasid) 1340 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1341 amdgpu_bo_unref(&pd); 1342 1343 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1344 amdgpu_bo_list_put(list); 1345 1346 idr_destroy(&fpriv->bo_list_handles); 1347 mutex_destroy(&fpriv->bo_list_lock); 1348 1349 kfree(fpriv); 1350 file_priv->driver_priv = NULL; 1351 1352 pm_runtime_mark_last_busy(dev->dev); 1353 pm_runtime_put_autosuspend(dev->dev); 1354 } 1355 1356 1357 void amdgpu_driver_release_kms(struct drm_device *dev) 1358 { 1359 struct amdgpu_device *adev = drm_to_adev(dev); 1360 1361 amdgpu_device_fini_sw(adev); 1362 pci_set_drvdata(adev->pdev, NULL); 1363 } 1364 1365 /* 1366 * VBlank related functions. 1367 */ 1368 /** 1369 * amdgpu_get_vblank_counter_kms - get frame count 1370 * 1371 * @crtc: crtc to get the frame count from 1372 * 1373 * Gets the frame count on the requested crtc (all asics). 1374 * Returns frame count on success, -EINVAL on failure. 1375 */ 1376 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) 1377 { 1378 struct drm_device *dev = crtc->dev; 1379 unsigned int pipe = crtc->index; 1380 struct amdgpu_device *adev = drm_to_adev(dev); 1381 int vpos, hpos, stat; 1382 u32 count; 1383 1384 if (pipe >= adev->mode_info.num_crtc) { 1385 DRM_ERROR("Invalid crtc %u\n", pipe); 1386 return -EINVAL; 1387 } 1388 1389 /* The hw increments its frame counter at start of vsync, not at start 1390 * of vblank, as is required by DRM core vblank counter handling. 1391 * Cook the hw count here to make it appear to the caller as if it 1392 * incremented at start of vblank. We measure distance to start of 1393 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1394 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1395 * result by 1 to give the proper appearance to caller. 1396 */ 1397 if (adev->mode_info.crtcs[pipe]) { 1398 /* Repeat readout if needed to provide stable result if 1399 * we cross start of vsync during the queries. 1400 */ 1401 do { 1402 count = amdgpu_display_vblank_get_counter(adev, pipe); 1403 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1404 * vpos as distance to start of vblank, instead of 1405 * regular vertical scanout pos. 1406 */ 1407 stat = amdgpu_display_get_crtc_scanoutpos( 1408 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1409 &vpos, &hpos, NULL, NULL, 1410 &adev->mode_info.crtcs[pipe]->base.hwmode); 1411 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1412 1413 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1414 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1415 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1416 } else { 1417 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1418 pipe, vpos); 1419 1420 /* Bump counter if we are at >= leading edge of vblank, 1421 * but before vsync where vpos would turn negative and 1422 * the hw counter really increments. 1423 */ 1424 if (vpos >= 0) 1425 count++; 1426 } 1427 } else { 1428 /* Fallback to use value as is. */ 1429 count = amdgpu_display_vblank_get_counter(adev, pipe); 1430 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1431 } 1432 1433 return count; 1434 } 1435 1436 /** 1437 * amdgpu_enable_vblank_kms - enable vblank interrupt 1438 * 1439 * @crtc: crtc to enable vblank interrupt for 1440 * 1441 * Enable the interrupt on the requested crtc (all asics). 1442 * Returns 0 on success, -EINVAL on failure. 1443 */ 1444 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) 1445 { 1446 struct drm_device *dev = crtc->dev; 1447 unsigned int pipe = crtc->index; 1448 struct amdgpu_device *adev = drm_to_adev(dev); 1449 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1450 1451 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1452 } 1453 1454 /** 1455 * amdgpu_disable_vblank_kms - disable vblank interrupt 1456 * 1457 * @crtc: crtc to disable vblank interrupt for 1458 * 1459 * Disable the interrupt on the requested crtc (all asics). 1460 */ 1461 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) 1462 { 1463 struct drm_device *dev = crtc->dev; 1464 unsigned int pipe = crtc->index; 1465 struct amdgpu_device *adev = drm_to_adev(dev); 1466 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1467 1468 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1469 } 1470 1471 /* 1472 * Debugfs info 1473 */ 1474 #if defined(CONFIG_DEBUG_FS) 1475 1476 static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) 1477 { 1478 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1479 struct drm_amdgpu_info_firmware fw_info; 1480 struct drm_amdgpu_query_fw query_fw; 1481 struct atom_context *ctx = adev->mode_info.atom_context; 1482 int ret, i; 1483 1484 static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { 1485 #define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type 1486 TA_FW_NAME(XGMI), 1487 TA_FW_NAME(RAS), 1488 TA_FW_NAME(HDCP), 1489 TA_FW_NAME(DTM), 1490 TA_FW_NAME(RAP), 1491 TA_FW_NAME(SECUREDISPLAY), 1492 #undef TA_FW_NAME 1493 }; 1494 1495 /* VCE */ 1496 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1497 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1498 if (ret) 1499 return ret; 1500 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1501 fw_info.feature, fw_info.ver); 1502 1503 /* UVD */ 1504 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1505 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1506 if (ret) 1507 return ret; 1508 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1509 fw_info.feature, fw_info.ver); 1510 1511 /* GMC */ 1512 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1513 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1514 if (ret) 1515 return ret; 1516 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1517 fw_info.feature, fw_info.ver); 1518 1519 /* ME */ 1520 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1521 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1522 if (ret) 1523 return ret; 1524 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1525 fw_info.feature, fw_info.ver); 1526 1527 /* PFP */ 1528 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1529 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1530 if (ret) 1531 return ret; 1532 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1533 fw_info.feature, fw_info.ver); 1534 1535 /* CE */ 1536 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1537 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1538 if (ret) 1539 return ret; 1540 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1541 fw_info.feature, fw_info.ver); 1542 1543 /* RLC */ 1544 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1545 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1546 if (ret) 1547 return ret; 1548 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1549 fw_info.feature, fw_info.ver); 1550 1551 /* RLC SAVE RESTORE LIST CNTL */ 1552 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1553 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1554 if (ret) 1555 return ret; 1556 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1557 fw_info.feature, fw_info.ver); 1558 1559 /* RLC SAVE RESTORE LIST GPM MEM */ 1560 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1561 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1562 if (ret) 1563 return ret; 1564 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1565 fw_info.feature, fw_info.ver); 1566 1567 /* RLC SAVE RESTORE LIST SRM MEM */ 1568 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1569 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1570 if (ret) 1571 return ret; 1572 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1573 fw_info.feature, fw_info.ver); 1574 1575 /* MEC */ 1576 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1577 query_fw.index = 0; 1578 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1579 if (ret) 1580 return ret; 1581 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1582 fw_info.feature, fw_info.ver); 1583 1584 /* MEC2 */ 1585 if (adev->gfx.mec2_fw) { 1586 query_fw.index = 1; 1587 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1588 if (ret) 1589 return ret; 1590 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1591 fw_info.feature, fw_info.ver); 1592 } 1593 1594 /* PSP SOS */ 1595 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1596 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1597 if (ret) 1598 return ret; 1599 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1600 fw_info.feature, fw_info.ver); 1601 1602 1603 /* PSP ASD */ 1604 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1605 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1606 if (ret) 1607 return ret; 1608 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1609 fw_info.feature, fw_info.ver); 1610 1611 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1612 for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) { 1613 query_fw.index = i; 1614 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1615 if (ret) 1616 continue; 1617 1618 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1619 ta_fw_name[i], fw_info.feature, fw_info.ver); 1620 } 1621 1622 /* SMC */ 1623 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1624 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1625 if (ret) 1626 return ret; 1627 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1628 fw_info.feature, fw_info.ver); 1629 1630 /* SDMA */ 1631 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1632 for (i = 0; i < adev->sdma.num_instances; i++) { 1633 query_fw.index = i; 1634 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1635 if (ret) 1636 return ret; 1637 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1638 i, fw_info.feature, fw_info.ver); 1639 } 1640 1641 /* VCN */ 1642 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1643 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1644 if (ret) 1645 return ret; 1646 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1647 fw_info.feature, fw_info.ver); 1648 1649 /* DMCU */ 1650 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1651 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1652 if (ret) 1653 return ret; 1654 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1655 fw_info.feature, fw_info.ver); 1656 1657 /* DMCUB */ 1658 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; 1659 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1660 if (ret) 1661 return ret; 1662 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", 1663 fw_info.feature, fw_info.ver); 1664 1665 /* TOC */ 1666 query_fw.fw_type = AMDGPU_INFO_FW_TOC; 1667 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1668 if (ret) 1669 return ret; 1670 seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n", 1671 fw_info.feature, fw_info.ver); 1672 1673 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1674 1675 return 0; 1676 } 1677 1678 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info); 1679 1680 #endif 1681 1682 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1683 { 1684 #if defined(CONFIG_DEBUG_FS) 1685 struct drm_minor *minor = adev_to_drm(adev)->primary; 1686 struct dentry *root = minor->debugfs_root; 1687 1688 debugfs_create_file("amdgpu_firmware_info", 0444, root, 1689 adev, &amdgpu_debugfs_firmware_info_fops); 1690 1691 #endif 1692 } 1693 1694 int 1695 amdgpu_probe(struct device *parent, void *match, void *aux) 1696 { 1697 struct pci_attach_args *pa = aux; 1698 const struct pci_device_id *id_entry; 1699 unsigned long flags = 0; 1700 1701 if (amdgpu_fatal_error) 1702 return 0; 1703 1704 id_entry = drm_find_description(PCI_VENDOR(pa->pa_id), 1705 PCI_PRODUCT(pa->pa_id), amdgpu_pciidlist); 1706 if (id_entry != NULL) { 1707 flags = id_entry->driver_data; 1708 if (flags & AMD_EXP_HW_SUPPORT) 1709 return 0; 1710 else 1711 return 20; 1712 } 1713 1714 return 0; 1715 } 1716 1717 /* 1718 * some functions are only called once on init regardless of how many times 1719 * amdgpu attaches in linux this is handled via module_init()/module_exit() 1720 */ 1721 int amdgpu_refcnt; 1722 1723 int __init drm_sched_fence_slab_init(void); 1724 void __exit drm_sched_fence_slab_fini(void); 1725 irqreturn_t amdgpu_irq_handler(void *); 1726 1727 void 1728 amdgpu_attach(struct device *parent, struct device *self, void *aux) 1729 { 1730 struct amdgpu_device *adev = (struct amdgpu_device *)self; 1731 struct drm_device *dev; 1732 struct pci_attach_args *pa = aux; 1733 const struct pci_device_id *id_entry; 1734 pcireg_t type; 1735 int i; 1736 uint8_t rmmio_bar; 1737 paddr_t fb_aper; 1738 pcireg_t addr, mask; 1739 int s; 1740 bool supports_atomic = false; 1741 1742 id_entry = drm_find_description(PCI_VENDOR(pa->pa_id), 1743 PCI_PRODUCT(pa->pa_id), amdgpu_pciidlist); 1744 adev->flags = id_entry->driver_data; 1745 adev->family = adev->flags & AMD_ASIC_MASK; 1746 adev->pc = pa->pa_pc; 1747 adev->pa_tag = pa->pa_tag; 1748 adev->iot = pa->pa_iot; 1749 adev->memt = pa->pa_memt; 1750 adev->dmat = pa->pa_dmat; 1751 1752 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY && 1753 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA && 1754 (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) 1755 & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) 1756 == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) { 1757 adev->primary = 1; 1758 #if NVGA > 0 1759 adev->console = vga_is_console(pa->pa_iot, -1); 1760 vga_console_attached = 1; 1761 #endif 1762 } 1763 #if NEFIFB > 0 1764 if (efifb_is_primary(pa)) { 1765 adev->primary = 1; 1766 adev->console = efifb_is_console(pa); 1767 efifb_detach(); 1768 } 1769 #endif 1770 1771 #define AMDGPU_PCI_MEM 0x10 1772 1773 type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AMDGPU_PCI_MEM); 1774 if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM || 1775 pci_mapreg_info(pa->pa_pc, pa->pa_tag, AMDGPU_PCI_MEM, 1776 type, &adev->fb_aper_offset, &adev->fb_aper_size, NULL)) { 1777 printf(": can't get frambuffer info\n"); 1778 return; 1779 } 1780 1781 if (adev->fb_aper_offset == 0) { 1782 bus_size_t start, end, pci_mem_end; 1783 bus_addr_t base; 1784 1785 KASSERT(pa->pa_memex != NULL); 1786 1787 start = max(PCI_MEM_START, pa->pa_memex->ex_start); 1788 if (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT) 1789 pci_mem_end = PCI_MEM64_END; 1790 else 1791 pci_mem_end = PCI_MEM_END; 1792 end = min(pci_mem_end, pa->pa_memex->ex_end); 1793 if (extent_alloc_subregion(pa->pa_memex, start, end, 1794 adev->fb_aper_size, adev->fb_aper_size, 0, 0, 0, &base)) { 1795 printf(": can't reserve framebuffer space\n"); 1796 return; 1797 } 1798 pci_conf_write(pa->pa_pc, pa->pa_tag, AMDGPU_PCI_MEM, base); 1799 if (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT) 1800 pci_conf_write(pa->pa_pc, pa->pa_tag, 1801 AMDGPU_PCI_MEM + 4, (uint64_t)base >> 32); 1802 adev->fb_aper_offset = base; 1803 } 1804 1805 if (adev->family >= CHIP_BONAIRE) { 1806 type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x18); 1807 if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM || 1808 pci_mapreg_map(pa, 0x18, type, BUS_SPACE_MAP_LINEAR, 1809 &adev->doorbell.bst, &adev->doorbell.bsh, 1810 &adev->doorbell.base, &adev->doorbell.size, 0)) { 1811 printf(": can't map doorbell space\n"); 1812 return; 1813 } 1814 adev->doorbell.ptr = bus_space_vaddr(adev->doorbell.bst, 1815 adev->doorbell.bsh); 1816 } 1817 1818 if (adev->family >= CHIP_BONAIRE) 1819 rmmio_bar = 0x24; 1820 else 1821 rmmio_bar = 0x18; 1822 1823 type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rmmio_bar); 1824 if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM || 1825 pci_mapreg_map(pa, rmmio_bar, type, BUS_SPACE_MAP_LINEAR, 1826 &adev->rmmio_bst, &adev->rmmio_bsh, &adev->rmmio_base, 1827 &adev->rmmio_size, 0)) { 1828 printf(": can't map rmmio space\n"); 1829 return; 1830 } 1831 adev->rmmio = bus_space_vaddr(adev->rmmio_bst, adev->rmmio_bsh); 1832 1833 /* 1834 * Make sure we have a base address for the ROM such that we 1835 * can map it later. 1836 */ 1837 s = splhigh(); 1838 addr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 1839 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, ~PCI_ROM_ENABLE); 1840 mask = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 1841 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, addr); 1842 splx(s); 1843 1844 if (addr == 0 && PCI_ROM_SIZE(mask) != 0 && pa->pa_memex) { 1845 bus_size_t size, start, end; 1846 bus_addr_t base; 1847 1848 size = PCI_ROM_SIZE(mask); 1849 start = max(PCI_MEM_START, pa->pa_memex->ex_start); 1850 end = min(PCI_MEM_END, pa->pa_memex->ex_end); 1851 if (extent_alloc_subregion(pa->pa_memex, start, end, size, 1852 size, 0, 0, 0, &base) == 0) 1853 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, base); 1854 } 1855 1856 printf("\n"); 1857 1858 /* from amdgpu_pci_probe() */ 1859 1860 if (!amdgpu_virtual_display && 1861 amdgpu_device_asic_has_dc_support(adev->family)) 1862 supports_atomic = true; 1863 1864 if ((adev->flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { 1865 DRM_INFO("This hardware requires experimental hardware support.\n"); 1866 return; 1867 } 1868 1869 /* 1870 * Initialize amdkfd before starting radeon. 1871 */ 1872 amdgpu_amdkfd_init(); 1873 1874 dev = drm_attach_pci(&amdgpu_kms_driver, pa, 0, adev->primary, 1875 self, &adev->ddev); 1876 if (dev == NULL) { 1877 printf("%s: drm attach failed\n", adev->self.dv_xname); 1878 return; 1879 } 1880 adev->pdev = dev->pdev; 1881 adev->is_fw_fb = adev->primary; 1882 1883 if (!supports_atomic) 1884 dev->driver_features &= ~DRIVER_ATOMIC; 1885 1886 if (!amdgpu_msi_ok(adev)) 1887 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 1888 1889 /* from amdgpu_init() */ 1890 if (amdgpu_refcnt == 0) { 1891 drm_sched_fence_slab_init(); 1892 1893 if (amdgpu_sync_init()) { 1894 printf(": amdgpu_sync_init failed\n"); 1895 return; 1896 } 1897 1898 if (amdgpu_fence_slab_init()) { 1899 amdgpu_sync_fini(); 1900 printf(": amdgpu_fence_slab_init failed\n"); 1901 return; 1902 } 1903 1904 amdgpu_register_atpx_handler(); 1905 amdgpu_acpi_detect(); 1906 } 1907 amdgpu_refcnt++; 1908 1909 adev->irq.msi_enabled = false; 1910 if (pci_intr_map_msi(pa, &adev->intrh) == 0) 1911 adev->irq.msi_enabled = true; 1912 else if (pci_intr_map(pa, &adev->intrh) != 0) { 1913 printf(": couldn't map interrupt\n"); 1914 return; 1915 } 1916 printf("%s: %s\n", adev->self.dv_xname, 1917 pci_intr_string(pa->pa_pc, adev->intrh)); 1918 1919 adev->irqh = pci_intr_establish(pa->pa_pc, adev->intrh, IPL_TTY, 1920 amdgpu_irq_handler, &adev->ddev, adev->self.dv_xname); 1921 if (adev->irqh == NULL) { 1922 printf("%s: couldn't establish interrupt\n", 1923 adev->self.dv_xname); 1924 return; 1925 } 1926 adev->pdev->irq = 0; 1927 1928 fb_aper = bus_space_mmap(adev->memt, adev->fb_aper_offset, 0, 0, 0); 1929 if (fb_aper != -1) 1930 rasops_claim_framebuffer(fb_aper, adev->fb_aper_size, self); 1931 1932 1933 adev->shutdown = true; 1934 config_mountroot(self, amdgpu_attachhook); 1935 } 1936 1937 int 1938 amdgpu_forcedetach(struct amdgpu_device *adev) 1939 { 1940 struct pci_softc *sc = (struct pci_softc *)adev->self.dv_parent; 1941 pcitag_t tag = adev->pa_tag; 1942 1943 #if NVGA > 0 1944 if (adev->primary) 1945 vga_console_attached = 0; 1946 #endif 1947 1948 /* reprobe pci device for non efi systems */ 1949 #if NEFIFB > 0 1950 if (bios_efiinfo == NULL && !efifb_cb_found()) { 1951 #endif 1952 config_detach(&adev->self, 0); 1953 return pci_probe_device(sc, tag, NULL, NULL); 1954 #if NEFIFB > 0 1955 } else if (adev->primary) { 1956 efifb_reattach(); 1957 } 1958 #endif 1959 1960 return 0; 1961 } 1962 1963 void amdgpu_burner(void *, u_int, u_int); 1964 int amdgpu_wsioctl(void *, u_long, caddr_t, int, struct proc *); 1965 paddr_t amdgpu_wsmmap(void *, off_t, int); 1966 int amdgpu_alloc_screen(void *, const struct wsscreen_descr *, 1967 void **, int *, int *, uint32_t *); 1968 void amdgpu_free_screen(void *, void *); 1969 int amdgpu_show_screen(void *, void *, int, 1970 void (*)(void *, int, int), void *); 1971 void amdgpu_doswitch(void *); 1972 void amdgpu_enter_ddb(void *, void *); 1973 1974 struct wsscreen_descr amdgpu_stdscreen = { 1975 "std", 1976 0, 0, 1977 0, 1978 0, 0, 1979 WSSCREEN_UNDERLINE | WSSCREEN_HILIT | 1980 WSSCREEN_REVERSE | WSSCREEN_WSCOLORS 1981 }; 1982 1983 const struct wsscreen_descr *amdgpu_scrlist[] = { 1984 &amdgpu_stdscreen, 1985 }; 1986 1987 struct wsscreen_list amdgpu_screenlist = { 1988 nitems(amdgpu_scrlist), amdgpu_scrlist 1989 }; 1990 1991 struct wsdisplay_accessops amdgpu_accessops = { 1992 .ioctl = amdgpu_wsioctl, 1993 .mmap = amdgpu_wsmmap, 1994 .alloc_screen = amdgpu_alloc_screen, 1995 .free_screen = amdgpu_free_screen, 1996 .show_screen = amdgpu_show_screen, 1997 .enter_ddb = amdgpu_enter_ddb, 1998 .getchar = rasops_getchar, 1999 .load_font = rasops_load_font, 2000 .list_font = rasops_list_font, 2001 .scrollback = rasops_scrollback, 2002 .burn_screen = amdgpu_burner 2003 }; 2004 2005 int 2006 amdgpu_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p) 2007 { 2008 struct rasops_info *ri = v; 2009 struct amdgpu_device *adev = ri->ri_hw; 2010 struct backlight_device *bd = adev->dm.backlight_dev[0]; 2011 struct wsdisplay_param *dp = (struct wsdisplay_param *)data; 2012 struct wsdisplay_fbinfo *wdf; 2013 2014 switch (cmd) { 2015 case WSDISPLAYIO_GTYPE: 2016 *(u_int *)data = WSDISPLAY_TYPE_RADEONDRM; 2017 return 0; 2018 case WSDISPLAYIO_GINFO: 2019 wdf = (struct wsdisplay_fbinfo *)data; 2020 wdf->width = ri->ri_width; 2021 wdf->height = ri->ri_height; 2022 wdf->depth = ri->ri_depth; 2023 wdf->cmsize = 0; 2024 return 0; 2025 case WSDISPLAYIO_GETPARAM: 2026 if (bd == NULL) 2027 return -1; 2028 2029 switch (dp->param) { 2030 case WSDISPLAYIO_PARAM_BRIGHTNESS: 2031 dp->min = 0; 2032 dp->max = bd->props.max_brightness; 2033 dp->curval = bd->props.brightness; 2034 return (dp->max > dp->min) ? 0 : -1; 2035 } 2036 break; 2037 case WSDISPLAYIO_SETPARAM: 2038 if (bd == NULL || dp->curval > bd->props.max_brightness) 2039 return -1; 2040 2041 switch (dp->param) { 2042 case WSDISPLAYIO_PARAM_BRIGHTNESS: 2043 bd->props.brightness = dp->curval; 2044 backlight_update_status(bd); 2045 return 0; 2046 } 2047 break; 2048 } 2049 2050 return (-1); 2051 } 2052 2053 paddr_t 2054 amdgpu_wsmmap(void *v, off_t off, int prot) 2055 { 2056 return (-1); 2057 } 2058 2059 int 2060 amdgpu_alloc_screen(void *v, const struct wsscreen_descr *type, 2061 void **cookiep, int *curxp, int *curyp, uint32_t *attrp) 2062 { 2063 return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp); 2064 } 2065 2066 void 2067 amdgpu_free_screen(void *v, void *cookie) 2068 { 2069 return rasops_free_screen(v, cookie); 2070 } 2071 2072 int 2073 amdgpu_show_screen(void *v, void *cookie, int waitok, 2074 void (*cb)(void *, int, int), void *cbarg) 2075 { 2076 struct rasops_info *ri = v; 2077 struct amdgpu_device *adev = ri->ri_hw; 2078 2079 if (cookie == ri->ri_active) 2080 return (0); 2081 2082 adev->switchcb = cb; 2083 adev->switchcbarg = cbarg; 2084 adev->switchcookie = cookie; 2085 if (cb) { 2086 task_add(systq, &adev->switchtask); 2087 return (EAGAIN); 2088 } 2089 2090 amdgpu_doswitch(v); 2091 2092 return (0); 2093 } 2094 2095 void 2096 amdgpu_doswitch(void *v) 2097 { 2098 struct rasops_info *ri = v; 2099 struct amdgpu_device *adev = ri->ri_hw; 2100 struct amdgpu_crtc *amdgpu_crtc; 2101 int i, crtc; 2102 2103 rasops_show_screen(ri, adev->switchcookie, 0, NULL, NULL); 2104 drm_fb_helper_restore_fbdev_mode_unlocked((void *)adev->mode_info.rfbdev); 2105 2106 if (adev->switchcb) 2107 (adev->switchcb)(adev->switchcbarg, 0, 0); 2108 } 2109 2110 void 2111 amdgpu_enter_ddb(void *v, void *cookie) 2112 { 2113 struct rasops_info *ri = v; 2114 struct amdgpu_device *adev = ri->ri_hw; 2115 struct drm_fb_helper *fb_helper = (void *)adev->mode_info.rfbdev; 2116 2117 if (cookie == ri->ri_active) 2118 return; 2119 2120 rasops_show_screen(ri, cookie, 0, NULL, NULL); 2121 drm_fb_helper_debug_enter(fb_helper->fbdev); 2122 } 2123 2124 2125 void 2126 amdgpu_attachhook(struct device *self) 2127 { 2128 struct amdgpu_device *adev = (struct amdgpu_device *)self; 2129 struct drm_device *dev = &adev->ddev; 2130 int r, acpi_status; 2131 2132 if (amdgpu_has_atpx() && 2133 (amdgpu_is_atpx_hybrid() || 2134 amdgpu_has_atpx_dgpu_power_cntl()) && 2135 ((adev->flags & AMD_IS_APU) == 0) && 2136 !pci_is_thunderbolt_attached(dev->pdev)) 2137 adev->flags |= AMD_IS_PX; 2138 2139 /* amdgpu_device_init should report only fatal error 2140 * like memory allocation failure or iomapping failure, 2141 * or memory manager initialization failure, it must 2142 * properly initialize the GPU MC controller and permit 2143 * VRAM allocation 2144 */ 2145 r = amdgpu_device_init(adev, adev->flags); 2146 if (r) { 2147 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 2148 goto out; 2149 } 2150 2151 if (amdgpu_device_supports_boco(dev) && 2152 (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */ 2153 adev->runpm = true; 2154 else if (amdgpu_device_supports_baco(dev) && 2155 (amdgpu_runtime_pm != 0) && 2156 (adev->asic_type >= CHIP_TOPAZ) && 2157 (adev->asic_type != CHIP_VEGA10) && 2158 (adev->asic_type != CHIP_VEGA20) && 2159 (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */ 2160 adev->runpm = true; 2161 else if (amdgpu_device_supports_baco(dev) && 2162 (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */ 2163 adev->runpm = true; 2164 2165 /* Call ACPI methods: require modeset init 2166 * but failure is not fatal 2167 */ 2168 if (!r) { 2169 acpi_status = amdgpu_acpi_init(adev); 2170 if (acpi_status) 2171 dev_dbg(&dev->pdev->dev, 2172 "Error during ACPI methods call\n"); 2173 } 2174 2175 if (adev->runpm) { 2176 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); 2177 pm_runtime_use_autosuspend(dev->dev); 2178 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 2179 pm_runtime_set_active(dev->dev); 2180 pm_runtime_allow(dev->dev); 2181 pm_runtime_mark_last_busy(dev->dev); 2182 pm_runtime_put_autosuspend(dev->dev); 2183 } 2184 { 2185 struct wsemuldisplaydev_attach_args aa; 2186 struct rasops_info *ri = &adev->ro; 2187 2188 task_set(&adev->switchtask, amdgpu_doswitch, ri); 2189 2190 if (ri->ri_bits == NULL) 2191 return; 2192 2193 ri->ri_flg = RI_CENTER | RI_VCONS | RI_WRONLY; 2194 rasops_init(ri, 160, 160); 2195 2196 ri->ri_hw = adev; 2197 2198 amdgpu_stdscreen.capabilities = ri->ri_caps; 2199 amdgpu_stdscreen.nrows = ri->ri_rows; 2200 amdgpu_stdscreen.ncols = ri->ri_cols; 2201 amdgpu_stdscreen.textops = &ri->ri_ops; 2202 amdgpu_stdscreen.fontwidth = ri->ri_font->fontwidth; 2203 amdgpu_stdscreen.fontheight = ri->ri_font->fontheight; 2204 2205 aa.console = adev->console; 2206 aa.primary = adev->primary; 2207 aa.scrdata = &amdgpu_screenlist; 2208 aa.accessops = &amdgpu_accessops; 2209 aa.accesscookie = ri; 2210 aa.defaultscreens = 0; 2211 2212 if (adev->console) { 2213 uint32_t defattr; 2214 2215 ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr); 2216 wsdisplay_cnattach(&amdgpu_stdscreen, ri->ri_active, 2217 ri->ri_ccol, ri->ri_crow, defattr); 2218 } 2219 2220 /* 2221 * Now that we've taken over the console, disable decoding of 2222 * VGA legacy addresses, and opt out of arbitration. 2223 */ 2224 amdgpu_asic_set_vga_state(adev, false); 2225 pci_disable_legacy_vga(&adev->self); 2226 2227 printf("%s: %dx%d, %dbpp\n", adev->self.dv_xname, 2228 ri->ri_width, ri->ri_height, ri->ri_depth); 2229 2230 config_found_sm(&adev->self, &aa, wsemuldisplaydevprint, 2231 wsemuldisplaydevsubmatch); 2232 2233 /* 2234 * in linux via amdgpu_pci_probe -> drm_dev_register 2235 */ 2236 drm_dev_register(dev, adev->flags); 2237 } 2238 2239 out: 2240 if (r) { 2241 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 2242 if (adev->runpm) 2243 pm_runtime_put_noidle(dev->dev); 2244 amdgpu_fatal_error = 1; 2245 amdgpu_forcedetach(adev); 2246 } 2247 } 2248 2249 /* from amdgpu_exit amdgpu_driver_unload_kms */ 2250 int 2251 amdgpu_detach(struct device *self, int flags) 2252 { 2253 struct amdgpu_device *adev = (struct amdgpu_device *)self; 2254 struct drm_device *dev = &adev->ddev; 2255 2256 if (adev == NULL) 2257 return 0; 2258 2259 amdgpu_refcnt--; 2260 2261 if (amdgpu_refcnt == 0) 2262 amdgpu_amdkfd_fini(); 2263 2264 pci_intr_disestablish(adev->pc, adev->irqh); 2265 2266 amdgpu_unregister_gpu_instance(adev); 2267 2268 if (adev->runpm) { 2269 pm_runtime_get_sync(dev->dev); 2270 pm_runtime_forbid(dev->dev); 2271 } 2272 2273 amdgpu_acpi_fini(adev); 2274 amdgpu_device_fini_hw(adev); 2275 2276 if (amdgpu_refcnt == 0) { 2277 amdgpu_unregister_atpx_handler(); 2278 amdgpu_sync_fini(); 2279 amdgpu_fence_slab_fini(); 2280 2281 drm_sched_fence_slab_fini(); 2282 } 2283 2284 config_detach(adev->ddev.dev, flags); 2285 2286 return 0; 2287 } 2288 2289 int 2290 amdgpu_activate(struct device *self, int act) 2291 { 2292 struct amdgpu_device *adev = (struct amdgpu_device *)self; 2293 struct drm_device *dev = &adev->ddev; 2294 int rv = 0; 2295 2296 if (dev->dev == NULL) 2297 return (0); 2298 2299 switch (act) { 2300 case DVACT_QUIESCE: 2301 rv = config_activate_children(self, act); 2302 amdgpu_device_suspend(dev, true); 2303 break; 2304 case DVACT_SUSPEND: 2305 break; 2306 case DVACT_RESUME: 2307 break; 2308 case DVACT_WAKEUP: 2309 amdgpu_device_resume(dev, true); 2310 rv = config_activate_children(self, act); 2311 break; 2312 } 2313 2314 return (rv); 2315 } 2316