1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/slab.h> 24 #include <drm/drmP.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_ih.h" 28 #include "amdgpu_uvd.h" 29 #include "amdgpu_vce.h" 30 #include "amdgpu_ucode.h" 31 #include "atom.h" 32 #include "amd_pcie.h" 33 34 #include "gmc/gmc_8_1_d.h" 35 #include "gmc/gmc_8_1_sh_mask.h" 36 37 #include "oss/oss_3_0_d.h" 38 #include "oss/oss_3_0_sh_mask.h" 39 40 #include "bif/bif_5_0_d.h" 41 #include "bif/bif_5_0_sh_mask.h" 42 43 #include "gca/gfx_8_0_d.h" 44 #include "gca/gfx_8_0_sh_mask.h" 45 46 #include "smu/smu_7_1_1_d.h" 47 #include "smu/smu_7_1_1_sh_mask.h" 48 49 #include "uvd/uvd_5_0_d.h" 50 #include "uvd/uvd_5_0_sh_mask.h" 51 52 #include "vce/vce_3_0_d.h" 53 #include "vce/vce_3_0_sh_mask.h" 54 55 #include "dce/dce_10_0_d.h" 56 #include "dce/dce_10_0_sh_mask.h" 57 58 #include "vid.h" 59 #include "vi.h" 60 #include "vi_dpm.h" 61 #include "gmc_v8_0.h" 62 #include "gmc_v7_0.h" 63 #include "gfx_v8_0.h" 64 #include "sdma_v2_4.h" 65 #include "sdma_v3_0.h" 66 #include "dce_v10_0.h" 67 #include "dce_v11_0.h" 68 #include "iceland_ih.h" 69 #include "tonga_ih.h" 70 #include "cz_ih.h" 71 #include "uvd_v5_0.h" 72 #include "uvd_v6_0.h" 73 #include "vce_v3_0.h" 74 #if defined(CONFIG_DRM_AMD_ACP) 75 #include "amdgpu_acp.h" 76 #endif 77 #include "dce_virtual.h" 78 #include "mxgpu_vi.h" 79 #include "amdgpu_dm.h" 80 81 /* 82 * Indirect registers accessor 83 */ 84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 85 { 86 unsigned long flags; 87 u32 r; 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32(mmPCIE_INDEX, reg); 91 (void)RREG32(mmPCIE_INDEX); 92 r = RREG32(mmPCIE_DATA); 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 return r; 95 } 96 97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 WREG32(mmPCIE_INDEX, reg); 103 (void)RREG32(mmPCIE_INDEX); 104 WREG32(mmPCIE_DATA, v); 105 (void)RREG32(mmPCIE_DATA); 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 } 108 109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 110 { 111 unsigned long flags; 112 u32 r; 113 114 spin_lock_irqsave(&adev->smc_idx_lock, flags); 115 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 116 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 118 return r; 119 } 120 121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 122 { 123 unsigned long flags; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_11, (reg)); 127 WREG32(mmSMC_IND_DATA_11, (v)); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 } 130 131 /* smu_8_0_d.h */ 132 #define mmMP0PUB_IND_INDEX 0x180 133 #define mmMP0PUB_IND_DATA 0x181 134 135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 136 { 137 unsigned long flags; 138 u32 r; 139 140 spin_lock_irqsave(&adev->smc_idx_lock, flags); 141 WREG32(mmMP0PUB_IND_INDEX, (reg)); 142 r = RREG32(mmMP0PUB_IND_DATA); 143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 144 return r; 145 } 146 147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 WREG32(mmMP0PUB_IND_DATA, (v)); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 } 156 157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 158 { 159 unsigned long flags; 160 u32 r; 161 162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 164 r = RREG32(mmUVD_CTX_DATA); 165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 166 return r; 167 } 168 169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 WREG32(mmUVD_CTX_DATA, (v)); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 } 178 179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 180 { 181 unsigned long flags; 182 u32 r; 183 184 spin_lock_irqsave(&adev->didt_idx_lock, flags); 185 WREG32(mmDIDT_IND_INDEX, (reg)); 186 r = RREG32(mmDIDT_IND_DATA); 187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 188 return r; 189 } 190 191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 192 { 193 unsigned long flags; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 WREG32(mmDIDT_IND_DATA, (v)); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 } 200 201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 202 { 203 unsigned long flags; 204 u32 r; 205 206 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 207 WREG32(mmGC_CAC_IND_INDEX, (reg)); 208 r = RREG32(mmGC_CAC_IND_DATA); 209 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 210 return r; 211 } 212 213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 WREG32(mmGC_CAC_IND_DATA, (v)); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 } 222 223 224 static const u32 tonga_mgcg_cgcg_init[] = 225 { 226 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 227 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 228 mmPCIE_DATA, 0x000f0000, 0x00000000, 229 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 230 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 233 }; 234 235 static const u32 fiji_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 iceland_mgcg_cgcg_init[] = 247 { 248 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 249 mmPCIE_DATA, 0x000f0000, 0x00000000, 250 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 251 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 253 }; 254 255 static const u32 cz_mgcg_cgcg_init[] = 256 { 257 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 258 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 259 mmPCIE_DATA, 0x000f0000, 0x00000000, 260 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 262 }; 263 264 static const u32 stoney_mgcg_cgcg_init[] = 265 { 266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 267 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 268 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 269 }; 270 271 static void vi_init_golden_registers(struct amdgpu_device *adev) 272 { 273 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 274 mutex_lock(&adev->grbm_idx_mutex); 275 276 if (amdgpu_sriov_vf(adev)) { 277 xgpu_vi_init_golden_registers(adev); 278 mutex_unlock(&adev->grbm_idx_mutex); 279 return; 280 } 281 282 switch (adev->asic_type) { 283 case CHIP_TOPAZ: 284 amdgpu_device_program_register_sequence(adev, 285 iceland_mgcg_cgcg_init, 286 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 287 break; 288 case CHIP_FIJI: 289 amdgpu_device_program_register_sequence(adev, 290 fiji_mgcg_cgcg_init, 291 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 292 break; 293 case CHIP_TONGA: 294 amdgpu_device_program_register_sequence(adev, 295 tonga_mgcg_cgcg_init, 296 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 297 break; 298 case CHIP_CARRIZO: 299 amdgpu_device_program_register_sequence(adev, 300 cz_mgcg_cgcg_init, 301 ARRAY_SIZE(cz_mgcg_cgcg_init)); 302 break; 303 case CHIP_STONEY: 304 amdgpu_device_program_register_sequence(adev, 305 stoney_mgcg_cgcg_init, 306 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 307 break; 308 case CHIP_POLARIS10: 309 case CHIP_POLARIS11: 310 case CHIP_POLARIS12: 311 case CHIP_VEGAM: 312 default: 313 break; 314 } 315 mutex_unlock(&adev->grbm_idx_mutex); 316 } 317 318 /** 319 * vi_get_xclk - get the xclk 320 * 321 * @adev: amdgpu_device pointer 322 * 323 * Returns the reference clock used by the gfx engine 324 * (VI). 325 */ 326 static u32 vi_get_xclk(struct amdgpu_device *adev) 327 { 328 u32 reference_clock = adev->clock.spll.reference_freq; 329 u32 tmp; 330 331 if (adev->flags & AMD_IS_APU) 332 return reference_clock; 333 334 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 335 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 336 return 1000; 337 338 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 339 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 340 return reference_clock / 4; 341 342 return reference_clock; 343 } 344 345 /** 346 * vi_srbm_select - select specific register instances 347 * 348 * @adev: amdgpu_device pointer 349 * @me: selected ME (micro engine) 350 * @pipe: pipe 351 * @queue: queue 352 * @vmid: VMID 353 * 354 * Switches the currently active registers instances. Some 355 * registers are instanced per VMID, others are instanced per 356 * me/pipe/queue combination. 357 */ 358 void vi_srbm_select(struct amdgpu_device *adev, 359 u32 me, u32 pipe, u32 queue, u32 vmid) 360 { 361 u32 srbm_gfx_cntl = 0; 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 366 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 367 } 368 369 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 370 { 371 /* todo */ 372 } 373 374 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 375 { 376 u32 bus_cntl; 377 u32 d1vga_control = 0; 378 u32 d2vga_control = 0; 379 u32 vga_render_control = 0; 380 u32 rom_cntl; 381 bool r; 382 383 bus_cntl = RREG32(mmBUS_CNTL); 384 if (adev->mode_info.num_crtc) { 385 d1vga_control = RREG32(mmD1VGA_CONTROL); 386 d2vga_control = RREG32(mmD2VGA_CONTROL); 387 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 388 } 389 rom_cntl = RREG32_SMC(ixROM_CNTL); 390 391 /* enable the rom */ 392 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 393 if (adev->mode_info.num_crtc) { 394 /* Disable VGA mode */ 395 WREG32(mmD1VGA_CONTROL, 396 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 397 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 398 WREG32(mmD2VGA_CONTROL, 399 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 400 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 401 WREG32(mmVGA_RENDER_CONTROL, 402 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 403 } 404 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 405 406 r = amdgpu_read_bios(adev); 407 408 /* restore regs */ 409 WREG32(mmBUS_CNTL, bus_cntl); 410 if (adev->mode_info.num_crtc) { 411 WREG32(mmD1VGA_CONTROL, d1vga_control); 412 WREG32(mmD2VGA_CONTROL, d2vga_control); 413 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 414 } 415 WREG32_SMC(ixROM_CNTL, rom_cntl); 416 return r; 417 } 418 419 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 420 u8 *bios, u32 length_bytes) 421 { 422 u32 *dw_ptr; 423 unsigned long flags; 424 u32 i, length_dw; 425 426 if (bios == NULL) 427 return false; 428 if (length_bytes == 0) 429 return false; 430 /* APU vbios image is part of sbios image */ 431 if (adev->flags & AMD_IS_APU) 432 return false; 433 434 dw_ptr = (u32 *)bios; 435 length_dw = ALIGN(length_bytes, 4) / 4; 436 /* take the smc lock since we are using the smc index */ 437 spin_lock_irqsave(&adev->smc_idx_lock, flags); 438 /* set rom index to 0 */ 439 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 440 WREG32(mmSMC_IND_DATA_11, 0); 441 /* set index to data for continous read */ 442 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 443 for (i = 0; i < length_dw; i++) 444 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 445 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 446 447 return true; 448 } 449 450 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 451 { 452 uint32_t reg = 0; 453 454 if (adev->asic_type == CHIP_TONGA || 455 adev->asic_type == CHIP_FIJI) { 456 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 457 /* bit0: 0 means pf and 1 means vf */ 458 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 459 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 /* bit31: 0 means disable IOV and 1 means enable */ 461 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 462 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 } 464 465 if (reg == 0) { 466 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 467 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 468 } 469 } 470 471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 472 {mmGRBM_STATUS}, 473 {mmGRBM_STATUS2}, 474 {mmGRBM_STATUS_SE0}, 475 {mmGRBM_STATUS_SE1}, 476 {mmGRBM_STATUS_SE2}, 477 {mmGRBM_STATUS_SE3}, 478 {mmSRBM_STATUS}, 479 {mmSRBM_STATUS2}, 480 {mmSRBM_STATUS3}, 481 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 482 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 483 {mmCP_STAT}, 484 {mmCP_STALLED_STAT1}, 485 {mmCP_STALLED_STAT2}, 486 {mmCP_STALLED_STAT3}, 487 {mmCP_CPF_BUSY_STAT}, 488 {mmCP_CPF_STALLED_STAT1}, 489 {mmCP_CPF_STATUS}, 490 {mmCP_CPC_BUSY_STAT}, 491 {mmCP_CPC_STALLED_STAT1}, 492 {mmCP_CPC_STATUS}, 493 {mmGB_ADDR_CONFIG}, 494 {mmMC_ARB_RAMCFG}, 495 {mmGB_TILE_MODE0}, 496 {mmGB_TILE_MODE1}, 497 {mmGB_TILE_MODE2}, 498 {mmGB_TILE_MODE3}, 499 {mmGB_TILE_MODE4}, 500 {mmGB_TILE_MODE5}, 501 {mmGB_TILE_MODE6}, 502 {mmGB_TILE_MODE7}, 503 {mmGB_TILE_MODE8}, 504 {mmGB_TILE_MODE9}, 505 {mmGB_TILE_MODE10}, 506 {mmGB_TILE_MODE11}, 507 {mmGB_TILE_MODE12}, 508 {mmGB_TILE_MODE13}, 509 {mmGB_TILE_MODE14}, 510 {mmGB_TILE_MODE15}, 511 {mmGB_TILE_MODE16}, 512 {mmGB_TILE_MODE17}, 513 {mmGB_TILE_MODE18}, 514 {mmGB_TILE_MODE19}, 515 {mmGB_TILE_MODE20}, 516 {mmGB_TILE_MODE21}, 517 {mmGB_TILE_MODE22}, 518 {mmGB_TILE_MODE23}, 519 {mmGB_TILE_MODE24}, 520 {mmGB_TILE_MODE25}, 521 {mmGB_TILE_MODE26}, 522 {mmGB_TILE_MODE27}, 523 {mmGB_TILE_MODE28}, 524 {mmGB_TILE_MODE29}, 525 {mmGB_TILE_MODE30}, 526 {mmGB_TILE_MODE31}, 527 {mmGB_MACROTILE_MODE0}, 528 {mmGB_MACROTILE_MODE1}, 529 {mmGB_MACROTILE_MODE2}, 530 {mmGB_MACROTILE_MODE3}, 531 {mmGB_MACROTILE_MODE4}, 532 {mmGB_MACROTILE_MODE5}, 533 {mmGB_MACROTILE_MODE6}, 534 {mmGB_MACROTILE_MODE7}, 535 {mmGB_MACROTILE_MODE8}, 536 {mmGB_MACROTILE_MODE9}, 537 {mmGB_MACROTILE_MODE10}, 538 {mmGB_MACROTILE_MODE11}, 539 {mmGB_MACROTILE_MODE12}, 540 {mmGB_MACROTILE_MODE13}, 541 {mmGB_MACROTILE_MODE14}, 542 {mmGB_MACROTILE_MODE15}, 543 {mmCC_RB_BACKEND_DISABLE, true}, 544 {mmGC_USER_RB_BACKEND_DISABLE, true}, 545 {mmGB_BACKEND_MAP, false}, 546 {mmPA_SC_RASTER_CONFIG, true}, 547 {mmPA_SC_RASTER_CONFIG_1, true}, 548 }; 549 550 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 551 bool indexed, u32 se_num, 552 u32 sh_num, u32 reg_offset) 553 { 554 if (indexed) { 555 uint32_t val; 556 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 557 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 558 559 switch (reg_offset) { 560 case mmCC_RB_BACKEND_DISABLE: 561 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 562 case mmGC_USER_RB_BACKEND_DISABLE: 563 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 564 case mmPA_SC_RASTER_CONFIG: 565 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 566 case mmPA_SC_RASTER_CONFIG_1: 567 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 568 } 569 570 mutex_lock(&adev->grbm_idx_mutex); 571 if (se_num != 0xffffffff || sh_num != 0xffffffff) 572 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 573 574 val = RREG32(reg_offset); 575 576 if (se_num != 0xffffffff || sh_num != 0xffffffff) 577 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 578 mutex_unlock(&adev->grbm_idx_mutex); 579 return val; 580 } else { 581 unsigned idx; 582 583 switch (reg_offset) { 584 case mmGB_ADDR_CONFIG: 585 return adev->gfx.config.gb_addr_config; 586 case mmMC_ARB_RAMCFG: 587 return adev->gfx.config.mc_arb_ramcfg; 588 case mmGB_TILE_MODE0: 589 case mmGB_TILE_MODE1: 590 case mmGB_TILE_MODE2: 591 case mmGB_TILE_MODE3: 592 case mmGB_TILE_MODE4: 593 case mmGB_TILE_MODE5: 594 case mmGB_TILE_MODE6: 595 case mmGB_TILE_MODE7: 596 case mmGB_TILE_MODE8: 597 case mmGB_TILE_MODE9: 598 case mmGB_TILE_MODE10: 599 case mmGB_TILE_MODE11: 600 case mmGB_TILE_MODE12: 601 case mmGB_TILE_MODE13: 602 case mmGB_TILE_MODE14: 603 case mmGB_TILE_MODE15: 604 case mmGB_TILE_MODE16: 605 case mmGB_TILE_MODE17: 606 case mmGB_TILE_MODE18: 607 case mmGB_TILE_MODE19: 608 case mmGB_TILE_MODE20: 609 case mmGB_TILE_MODE21: 610 case mmGB_TILE_MODE22: 611 case mmGB_TILE_MODE23: 612 case mmGB_TILE_MODE24: 613 case mmGB_TILE_MODE25: 614 case mmGB_TILE_MODE26: 615 case mmGB_TILE_MODE27: 616 case mmGB_TILE_MODE28: 617 case mmGB_TILE_MODE29: 618 case mmGB_TILE_MODE30: 619 case mmGB_TILE_MODE31: 620 idx = (reg_offset - mmGB_TILE_MODE0); 621 return adev->gfx.config.tile_mode_array[idx]; 622 case mmGB_MACROTILE_MODE0: 623 case mmGB_MACROTILE_MODE1: 624 case mmGB_MACROTILE_MODE2: 625 case mmGB_MACROTILE_MODE3: 626 case mmGB_MACROTILE_MODE4: 627 case mmGB_MACROTILE_MODE5: 628 case mmGB_MACROTILE_MODE6: 629 case mmGB_MACROTILE_MODE7: 630 case mmGB_MACROTILE_MODE8: 631 case mmGB_MACROTILE_MODE9: 632 case mmGB_MACROTILE_MODE10: 633 case mmGB_MACROTILE_MODE11: 634 case mmGB_MACROTILE_MODE12: 635 case mmGB_MACROTILE_MODE13: 636 case mmGB_MACROTILE_MODE14: 637 case mmGB_MACROTILE_MODE15: 638 idx = (reg_offset - mmGB_MACROTILE_MODE0); 639 return adev->gfx.config.macrotile_mode_array[idx]; 640 default: 641 return RREG32(reg_offset); 642 } 643 } 644 } 645 646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 647 u32 sh_num, u32 reg_offset, u32 *value) 648 { 649 uint32_t i; 650 651 *value = 0; 652 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 653 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 654 655 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 656 continue; 657 658 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 659 reg_offset); 660 return 0; 661 } 662 return -EINVAL; 663 } 664 665 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 666 { 667 u32 i; 668 669 dev_info(adev->dev, "GPU pci config reset\n"); 670 671 /* disable BM */ 672 pci_clear_master(adev->pdev); 673 /* reset */ 674 amdgpu_device_pci_config_reset(adev); 675 676 udelay(100); 677 678 /* wait for asic to come out of reset */ 679 for (i = 0; i < adev->usec_timeout; i++) { 680 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 681 /* enable BM */ 682 pci_set_master(adev->pdev); 683 adev->has_hw_reset = true; 684 return 0; 685 } 686 udelay(1); 687 } 688 return -EINVAL; 689 } 690 691 /** 692 * vi_asic_reset - soft reset GPU 693 * 694 * @adev: amdgpu_device pointer 695 * 696 * Look up which blocks are hung and attempt 697 * to reset them. 698 * Returns 0 for success. 699 */ 700 static int vi_asic_reset(struct amdgpu_device *adev) 701 { 702 int r; 703 704 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 705 706 r = vi_gpu_pci_config_reset(adev); 707 708 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 709 710 return r; 711 } 712 713 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 714 { 715 return RREG32(mmCONFIG_MEMSIZE); 716 } 717 718 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 719 u32 cntl_reg, u32 status_reg) 720 { 721 int r, i; 722 struct atom_clock_dividers dividers; 723 uint32_t tmp; 724 725 r = amdgpu_atombios_get_clock_dividers(adev, 726 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 727 clock, false, ÷rs); 728 if (r) 729 return r; 730 731 tmp = RREG32_SMC(cntl_reg); 732 733 if (adev->flags & AMD_IS_APU) 734 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 735 else 736 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 737 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 738 tmp |= dividers.post_divider; 739 WREG32_SMC(cntl_reg, tmp); 740 741 for (i = 0; i < 100; i++) { 742 tmp = RREG32_SMC(status_reg); 743 if (adev->flags & AMD_IS_APU) { 744 if (tmp & 0x10000) 745 break; 746 } else { 747 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 748 break; 749 } 750 mdelay(10); 751 } 752 if (i == 100) 753 return -ETIMEDOUT; 754 return 0; 755 } 756 757 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 758 #define ixGNB_CLK1_STATUS 0xD822010C 759 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 760 #define ixGNB_CLK2_STATUS 0xD822012C 761 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 762 #define ixGNB_CLK3_STATUS 0xD822014C 763 764 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 765 { 766 int r; 767 768 if (adev->flags & AMD_IS_APU) { 769 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 770 if (r) 771 return r; 772 773 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 774 if (r) 775 return r; 776 } else { 777 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 778 if (r) 779 return r; 780 781 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 782 if (r) 783 return r; 784 } 785 786 return 0; 787 } 788 789 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 790 { 791 int r, i; 792 struct atom_clock_dividers dividers; 793 u32 tmp; 794 u32 reg_ctrl; 795 u32 reg_status; 796 u32 status_mask; 797 u32 reg_mask; 798 799 if (adev->flags & AMD_IS_APU) { 800 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 801 reg_status = ixGNB_CLK3_STATUS; 802 status_mask = 0x00010000; 803 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 804 } else { 805 reg_ctrl = ixCG_ECLK_CNTL; 806 reg_status = ixCG_ECLK_STATUS; 807 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 808 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 809 } 810 811 r = amdgpu_atombios_get_clock_dividers(adev, 812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 813 ecclk, false, ÷rs); 814 if (r) 815 return r; 816 817 for (i = 0; i < 100; i++) { 818 if (RREG32_SMC(reg_status) & status_mask) 819 break; 820 mdelay(10); 821 } 822 823 if (i == 100) 824 return -ETIMEDOUT; 825 826 tmp = RREG32_SMC(reg_ctrl); 827 tmp &= ~reg_mask; 828 tmp |= dividers.post_divider; 829 WREG32_SMC(reg_ctrl, tmp); 830 831 for (i = 0; i < 100; i++) { 832 if (RREG32_SMC(reg_status) & status_mask) 833 break; 834 mdelay(10); 835 } 836 837 if (i == 100) 838 return -ETIMEDOUT; 839 840 return 0; 841 } 842 843 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 844 { 845 #if 0 846 if (pci_is_root_bus(adev->pdev->bus)) 847 return; 848 #endif 849 850 if (amdgpu_pcie_gen2 == 0) 851 return; 852 853 if (adev->flags & AMD_IS_APU) 854 return; 855 856 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 857 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 858 return; 859 860 /* todo */ 861 } 862 863 static void vi_program_aspm(struct amdgpu_device *adev) 864 { 865 866 if (amdgpu_aspm == 0) 867 return; 868 869 /* todo */ 870 } 871 872 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 873 bool enable) 874 { 875 u32 tmp; 876 877 /* not necessary on CZ */ 878 if (adev->flags & AMD_IS_APU) 879 return; 880 881 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 882 if (enable) 883 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 884 else 885 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 886 887 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 888 } 889 890 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 891 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 892 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 893 894 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 895 { 896 if (adev->flags & AMD_IS_APU) 897 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 898 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 899 else 900 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 901 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 902 } 903 904 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 905 { 906 if (!ring || !ring->funcs->emit_wreg) { 907 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 908 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 909 } else { 910 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 911 } 912 } 913 914 static void vi_invalidate_hdp(struct amdgpu_device *adev, 915 struct amdgpu_ring *ring) 916 { 917 if (!ring || !ring->funcs->emit_wreg) { 918 WREG32(mmHDP_DEBUG0, 1); 919 RREG32(mmHDP_DEBUG0); 920 } else { 921 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 922 } 923 } 924 925 static bool vi_need_full_reset(struct amdgpu_device *adev) 926 { 927 switch (adev->asic_type) { 928 case CHIP_CARRIZO: 929 case CHIP_STONEY: 930 /* CZ has hang issues with full reset at the moment */ 931 return false; 932 case CHIP_FIJI: 933 case CHIP_TONGA: 934 /* XXX: soft reset should work on fiji and tonga */ 935 return true; 936 case CHIP_POLARIS10: 937 case CHIP_POLARIS11: 938 case CHIP_POLARIS12: 939 case CHIP_TOPAZ: 940 default: 941 /* change this when we support soft reset */ 942 return true; 943 } 944 } 945 946 static const struct amdgpu_asic_funcs vi_asic_funcs = 947 { 948 .read_disabled_bios = &vi_read_disabled_bios, 949 .read_bios_from_rom = &vi_read_bios_from_rom, 950 .read_register = &vi_read_register, 951 .reset = &vi_asic_reset, 952 .set_vga_state = &vi_vga_set_state, 953 .get_xclk = &vi_get_xclk, 954 .set_uvd_clocks = &vi_set_uvd_clocks, 955 .set_vce_clocks = &vi_set_vce_clocks, 956 .get_config_memsize = &vi_get_config_memsize, 957 .flush_hdp = &vi_flush_hdp, 958 .invalidate_hdp = &vi_invalidate_hdp, 959 .need_full_reset = &vi_need_full_reset, 960 }; 961 962 #define CZ_REV_BRISTOL(rev) \ 963 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 964 965 static int vi_common_early_init(void *handle) 966 { 967 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 968 969 if (adev->flags & AMD_IS_APU) { 970 adev->smc_rreg = &cz_smc_rreg; 971 adev->smc_wreg = &cz_smc_wreg; 972 } else { 973 adev->smc_rreg = &vi_smc_rreg; 974 adev->smc_wreg = &vi_smc_wreg; 975 } 976 adev->pcie_rreg = &vi_pcie_rreg; 977 adev->pcie_wreg = &vi_pcie_wreg; 978 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 979 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 980 adev->didt_rreg = &vi_didt_rreg; 981 adev->didt_wreg = &vi_didt_wreg; 982 adev->gc_cac_rreg = &vi_gc_cac_rreg; 983 adev->gc_cac_wreg = &vi_gc_cac_wreg; 984 985 adev->asic_funcs = &vi_asic_funcs; 986 987 adev->rev_id = vi_get_rev_id(adev); 988 adev->external_rev_id = 0xFF; 989 switch (adev->asic_type) { 990 case CHIP_TOPAZ: 991 adev->cg_flags = 0; 992 adev->pg_flags = 0; 993 adev->external_rev_id = 0x1; 994 break; 995 case CHIP_FIJI: 996 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 997 AMD_CG_SUPPORT_GFX_MGLS | 998 AMD_CG_SUPPORT_GFX_RLC_LS | 999 AMD_CG_SUPPORT_GFX_CP_LS | 1000 AMD_CG_SUPPORT_GFX_CGTS | 1001 AMD_CG_SUPPORT_GFX_CGTS_LS | 1002 AMD_CG_SUPPORT_GFX_CGCG | 1003 AMD_CG_SUPPORT_GFX_CGLS | 1004 AMD_CG_SUPPORT_SDMA_MGCG | 1005 AMD_CG_SUPPORT_SDMA_LS | 1006 AMD_CG_SUPPORT_BIF_LS | 1007 AMD_CG_SUPPORT_HDP_MGCG | 1008 AMD_CG_SUPPORT_HDP_LS | 1009 AMD_CG_SUPPORT_ROM_MGCG | 1010 AMD_CG_SUPPORT_MC_MGCG | 1011 AMD_CG_SUPPORT_MC_LS | 1012 AMD_CG_SUPPORT_UVD_MGCG; 1013 adev->pg_flags = 0; 1014 adev->external_rev_id = adev->rev_id + 0x3c; 1015 break; 1016 case CHIP_TONGA: 1017 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1018 AMD_CG_SUPPORT_GFX_CGCG | 1019 AMD_CG_SUPPORT_GFX_CGLS | 1020 AMD_CG_SUPPORT_SDMA_MGCG | 1021 AMD_CG_SUPPORT_SDMA_LS | 1022 AMD_CG_SUPPORT_BIF_LS | 1023 AMD_CG_SUPPORT_HDP_MGCG | 1024 AMD_CG_SUPPORT_HDP_LS | 1025 AMD_CG_SUPPORT_ROM_MGCG | 1026 AMD_CG_SUPPORT_MC_MGCG | 1027 AMD_CG_SUPPORT_MC_LS | 1028 AMD_CG_SUPPORT_DRM_LS | 1029 AMD_CG_SUPPORT_UVD_MGCG; 1030 adev->pg_flags = 0; 1031 adev->external_rev_id = adev->rev_id + 0x14; 1032 break; 1033 case CHIP_POLARIS11: 1034 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1035 AMD_CG_SUPPORT_GFX_RLC_LS | 1036 AMD_CG_SUPPORT_GFX_CP_LS | 1037 AMD_CG_SUPPORT_GFX_CGCG | 1038 AMD_CG_SUPPORT_GFX_CGLS | 1039 AMD_CG_SUPPORT_GFX_3D_CGCG | 1040 AMD_CG_SUPPORT_GFX_3D_CGLS | 1041 AMD_CG_SUPPORT_SDMA_MGCG | 1042 AMD_CG_SUPPORT_SDMA_LS | 1043 AMD_CG_SUPPORT_BIF_MGCG | 1044 AMD_CG_SUPPORT_BIF_LS | 1045 AMD_CG_SUPPORT_HDP_MGCG | 1046 AMD_CG_SUPPORT_HDP_LS | 1047 AMD_CG_SUPPORT_ROM_MGCG | 1048 AMD_CG_SUPPORT_MC_MGCG | 1049 AMD_CG_SUPPORT_MC_LS | 1050 AMD_CG_SUPPORT_DRM_LS | 1051 AMD_CG_SUPPORT_UVD_MGCG | 1052 AMD_CG_SUPPORT_VCE_MGCG; 1053 adev->pg_flags = 0; 1054 adev->external_rev_id = adev->rev_id + 0x5A; 1055 break; 1056 case CHIP_POLARIS10: 1057 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1058 AMD_CG_SUPPORT_GFX_RLC_LS | 1059 AMD_CG_SUPPORT_GFX_CP_LS | 1060 AMD_CG_SUPPORT_GFX_CGCG | 1061 AMD_CG_SUPPORT_GFX_CGLS | 1062 AMD_CG_SUPPORT_GFX_3D_CGCG | 1063 AMD_CG_SUPPORT_GFX_3D_CGLS | 1064 AMD_CG_SUPPORT_SDMA_MGCG | 1065 AMD_CG_SUPPORT_SDMA_LS | 1066 AMD_CG_SUPPORT_BIF_MGCG | 1067 AMD_CG_SUPPORT_BIF_LS | 1068 AMD_CG_SUPPORT_HDP_MGCG | 1069 AMD_CG_SUPPORT_HDP_LS | 1070 AMD_CG_SUPPORT_ROM_MGCG | 1071 AMD_CG_SUPPORT_MC_MGCG | 1072 AMD_CG_SUPPORT_MC_LS | 1073 AMD_CG_SUPPORT_DRM_LS | 1074 AMD_CG_SUPPORT_UVD_MGCG | 1075 AMD_CG_SUPPORT_VCE_MGCG; 1076 adev->pg_flags = 0; 1077 adev->external_rev_id = adev->rev_id + 0x50; 1078 break; 1079 case CHIP_POLARIS12: 1080 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1081 AMD_CG_SUPPORT_GFX_RLC_LS | 1082 AMD_CG_SUPPORT_GFX_CP_LS | 1083 AMD_CG_SUPPORT_GFX_CGCG | 1084 AMD_CG_SUPPORT_GFX_CGLS | 1085 AMD_CG_SUPPORT_GFX_3D_CGCG | 1086 AMD_CG_SUPPORT_GFX_3D_CGLS | 1087 AMD_CG_SUPPORT_SDMA_MGCG | 1088 AMD_CG_SUPPORT_SDMA_LS | 1089 AMD_CG_SUPPORT_BIF_MGCG | 1090 AMD_CG_SUPPORT_BIF_LS | 1091 AMD_CG_SUPPORT_HDP_MGCG | 1092 AMD_CG_SUPPORT_HDP_LS | 1093 AMD_CG_SUPPORT_ROM_MGCG | 1094 AMD_CG_SUPPORT_MC_MGCG | 1095 AMD_CG_SUPPORT_MC_LS | 1096 AMD_CG_SUPPORT_DRM_LS | 1097 AMD_CG_SUPPORT_UVD_MGCG | 1098 AMD_CG_SUPPORT_VCE_MGCG; 1099 adev->pg_flags = 0; 1100 adev->external_rev_id = adev->rev_id + 0x64; 1101 break; 1102 case CHIP_VEGAM: 1103 adev->cg_flags = 0; 1104 /*AMD_CG_SUPPORT_GFX_MGCG | 1105 AMD_CG_SUPPORT_GFX_RLC_LS | 1106 AMD_CG_SUPPORT_GFX_CP_LS | 1107 AMD_CG_SUPPORT_GFX_CGCG | 1108 AMD_CG_SUPPORT_GFX_CGLS | 1109 AMD_CG_SUPPORT_GFX_3D_CGCG | 1110 AMD_CG_SUPPORT_GFX_3D_CGLS | 1111 AMD_CG_SUPPORT_SDMA_MGCG | 1112 AMD_CG_SUPPORT_SDMA_LS | 1113 AMD_CG_SUPPORT_BIF_MGCG | 1114 AMD_CG_SUPPORT_BIF_LS | 1115 AMD_CG_SUPPORT_HDP_MGCG | 1116 AMD_CG_SUPPORT_HDP_LS | 1117 AMD_CG_SUPPORT_ROM_MGCG | 1118 AMD_CG_SUPPORT_MC_MGCG | 1119 AMD_CG_SUPPORT_MC_LS | 1120 AMD_CG_SUPPORT_DRM_LS | 1121 AMD_CG_SUPPORT_UVD_MGCG | 1122 AMD_CG_SUPPORT_VCE_MGCG;*/ 1123 adev->pg_flags = 0; 1124 adev->external_rev_id = adev->rev_id + 0x6E; 1125 break; 1126 case CHIP_CARRIZO: 1127 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1128 AMD_CG_SUPPORT_GFX_MGCG | 1129 AMD_CG_SUPPORT_GFX_MGLS | 1130 AMD_CG_SUPPORT_GFX_RLC_LS | 1131 AMD_CG_SUPPORT_GFX_CP_LS | 1132 AMD_CG_SUPPORT_GFX_CGTS | 1133 AMD_CG_SUPPORT_GFX_CGTS_LS | 1134 AMD_CG_SUPPORT_GFX_CGCG | 1135 AMD_CG_SUPPORT_GFX_CGLS | 1136 AMD_CG_SUPPORT_BIF_LS | 1137 AMD_CG_SUPPORT_HDP_MGCG | 1138 AMD_CG_SUPPORT_HDP_LS | 1139 AMD_CG_SUPPORT_SDMA_MGCG | 1140 AMD_CG_SUPPORT_SDMA_LS | 1141 AMD_CG_SUPPORT_VCE_MGCG; 1142 /* rev0 hardware requires workarounds to support PG */ 1143 adev->pg_flags = 0; 1144 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1145 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1146 AMD_PG_SUPPORT_GFX_PIPELINE | 1147 AMD_PG_SUPPORT_CP | 1148 AMD_PG_SUPPORT_UVD | 1149 AMD_PG_SUPPORT_VCE; 1150 } 1151 adev->external_rev_id = adev->rev_id + 0x1; 1152 break; 1153 case CHIP_STONEY: 1154 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1155 AMD_CG_SUPPORT_GFX_MGCG | 1156 AMD_CG_SUPPORT_GFX_MGLS | 1157 AMD_CG_SUPPORT_GFX_RLC_LS | 1158 AMD_CG_SUPPORT_GFX_CP_LS | 1159 AMD_CG_SUPPORT_GFX_CGTS | 1160 AMD_CG_SUPPORT_GFX_CGTS_LS | 1161 AMD_CG_SUPPORT_GFX_CGLS | 1162 AMD_CG_SUPPORT_BIF_LS | 1163 AMD_CG_SUPPORT_HDP_MGCG | 1164 AMD_CG_SUPPORT_HDP_LS | 1165 AMD_CG_SUPPORT_SDMA_MGCG | 1166 AMD_CG_SUPPORT_SDMA_LS | 1167 AMD_CG_SUPPORT_VCE_MGCG; 1168 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1169 AMD_PG_SUPPORT_GFX_SMG | 1170 AMD_PG_SUPPORT_GFX_PIPELINE | 1171 AMD_PG_SUPPORT_CP | 1172 AMD_PG_SUPPORT_UVD | 1173 AMD_PG_SUPPORT_VCE; 1174 adev->external_rev_id = adev->rev_id + 0x61; 1175 break; 1176 default: 1177 /* FIXME: not supported yet */ 1178 return -EINVAL; 1179 } 1180 1181 if (amdgpu_sriov_vf(adev)) { 1182 amdgpu_virt_init_setting(adev); 1183 xgpu_vi_mailbox_set_irq_funcs(adev); 1184 } 1185 1186 return 0; 1187 } 1188 1189 static int vi_common_late_init(void *handle) 1190 { 1191 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1192 1193 if (amdgpu_sriov_vf(adev)) 1194 xgpu_vi_mailbox_get_irq(adev); 1195 1196 return 0; 1197 } 1198 1199 static int vi_common_sw_init(void *handle) 1200 { 1201 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1202 1203 if (amdgpu_sriov_vf(adev)) 1204 xgpu_vi_mailbox_add_irq_id(adev); 1205 1206 return 0; 1207 } 1208 1209 static int vi_common_sw_fini(void *handle) 1210 { 1211 return 0; 1212 } 1213 1214 static int vi_common_hw_init(void *handle) 1215 { 1216 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1217 1218 /* move the golden regs per IP block */ 1219 vi_init_golden_registers(adev); 1220 /* enable pcie gen2/3 link */ 1221 vi_pcie_gen3_enable(adev); 1222 /* enable aspm */ 1223 vi_program_aspm(adev); 1224 /* enable the doorbell aperture */ 1225 vi_enable_doorbell_aperture(adev, true); 1226 1227 return 0; 1228 } 1229 1230 static int vi_common_hw_fini(void *handle) 1231 { 1232 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1233 1234 /* enable the doorbell aperture */ 1235 vi_enable_doorbell_aperture(adev, false); 1236 1237 if (amdgpu_sriov_vf(adev)) 1238 xgpu_vi_mailbox_put_irq(adev); 1239 1240 return 0; 1241 } 1242 1243 static int vi_common_suspend(void *handle) 1244 { 1245 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1246 1247 return vi_common_hw_fini(adev); 1248 } 1249 1250 static int vi_common_resume(void *handle) 1251 { 1252 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1253 1254 return vi_common_hw_init(adev); 1255 } 1256 1257 static bool vi_common_is_idle(void *handle) 1258 { 1259 return true; 1260 } 1261 1262 static int vi_common_wait_for_idle(void *handle) 1263 { 1264 return 0; 1265 } 1266 1267 static int vi_common_soft_reset(void *handle) 1268 { 1269 return 0; 1270 } 1271 1272 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1273 bool enable) 1274 { 1275 uint32_t temp, data; 1276 1277 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1278 1279 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1280 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1281 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1282 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1283 else 1284 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1285 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1286 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1287 1288 if (temp != data) 1289 WREG32_PCIE(ixPCIE_CNTL2, data); 1290 } 1291 1292 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1293 bool enable) 1294 { 1295 uint32_t temp, data; 1296 1297 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1298 1299 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1300 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1301 else 1302 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1303 1304 if (temp != data) 1305 WREG32(mmHDP_HOST_PATH_CNTL, data); 1306 } 1307 1308 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1309 bool enable) 1310 { 1311 uint32_t temp, data; 1312 1313 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1314 1315 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1316 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1317 else 1318 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1319 1320 if (temp != data) 1321 WREG32(mmHDP_MEM_POWER_LS, data); 1322 } 1323 1324 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1325 bool enable) 1326 { 1327 uint32_t temp, data; 1328 1329 temp = data = RREG32(0x157a); 1330 1331 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1332 data |= 1; 1333 else 1334 data &= ~1; 1335 1336 if (temp != data) 1337 WREG32(0x157a, data); 1338 } 1339 1340 1341 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1342 bool enable) 1343 { 1344 uint32_t temp, data; 1345 1346 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1347 1348 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1349 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1350 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1351 else 1352 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1353 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1354 1355 if (temp != data) 1356 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1357 } 1358 1359 static int vi_common_set_clockgating_state_by_smu(void *handle, 1360 enum amd_clockgating_state state) 1361 { 1362 uint32_t msg_id, pp_state = 0; 1363 uint32_t pp_support_state = 0; 1364 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1365 1366 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1367 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1368 pp_support_state = PP_STATE_SUPPORT_LS; 1369 pp_state = PP_STATE_LS; 1370 } 1371 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1372 pp_support_state |= PP_STATE_SUPPORT_CG; 1373 pp_state |= PP_STATE_CG; 1374 } 1375 if (state == AMD_CG_STATE_UNGATE) 1376 pp_state = 0; 1377 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1378 PP_BLOCK_SYS_MC, 1379 pp_support_state, 1380 pp_state); 1381 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1382 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1383 } 1384 1385 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1386 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1387 pp_support_state = PP_STATE_SUPPORT_LS; 1388 pp_state = PP_STATE_LS; 1389 } 1390 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1391 pp_support_state |= PP_STATE_SUPPORT_CG; 1392 pp_state |= PP_STATE_CG; 1393 } 1394 if (state == AMD_CG_STATE_UNGATE) 1395 pp_state = 0; 1396 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1397 PP_BLOCK_SYS_SDMA, 1398 pp_support_state, 1399 pp_state); 1400 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1401 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1402 } 1403 1404 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1405 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1406 pp_support_state = PP_STATE_SUPPORT_LS; 1407 pp_state = PP_STATE_LS; 1408 } 1409 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1410 pp_support_state |= PP_STATE_SUPPORT_CG; 1411 pp_state |= PP_STATE_CG; 1412 } 1413 if (state == AMD_CG_STATE_UNGATE) 1414 pp_state = 0; 1415 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1416 PP_BLOCK_SYS_HDP, 1417 pp_support_state, 1418 pp_state); 1419 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1420 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1421 } 1422 1423 1424 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1425 if (state == AMD_CG_STATE_UNGATE) 1426 pp_state = 0; 1427 else 1428 pp_state = PP_STATE_LS; 1429 1430 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1431 PP_BLOCK_SYS_BIF, 1432 PP_STATE_SUPPORT_LS, 1433 pp_state); 1434 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1435 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1436 } 1437 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1438 if (state == AMD_CG_STATE_UNGATE) 1439 pp_state = 0; 1440 else 1441 pp_state = PP_STATE_CG; 1442 1443 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1444 PP_BLOCK_SYS_BIF, 1445 PP_STATE_SUPPORT_CG, 1446 pp_state); 1447 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1448 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1449 } 1450 1451 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1452 1453 if (state == AMD_CG_STATE_UNGATE) 1454 pp_state = 0; 1455 else 1456 pp_state = PP_STATE_LS; 1457 1458 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1459 PP_BLOCK_SYS_DRM, 1460 PP_STATE_SUPPORT_LS, 1461 pp_state); 1462 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1463 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1464 } 1465 1466 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1467 1468 if (state == AMD_CG_STATE_UNGATE) 1469 pp_state = 0; 1470 else 1471 pp_state = PP_STATE_CG; 1472 1473 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1474 PP_BLOCK_SYS_ROM, 1475 PP_STATE_SUPPORT_CG, 1476 pp_state); 1477 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1478 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1479 } 1480 return 0; 1481 } 1482 1483 static int vi_common_set_clockgating_state(void *handle, 1484 enum amd_clockgating_state state) 1485 { 1486 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1487 1488 if (amdgpu_sriov_vf(adev)) 1489 return 0; 1490 1491 switch (adev->asic_type) { 1492 case CHIP_FIJI: 1493 vi_update_bif_medium_grain_light_sleep(adev, 1494 state == AMD_CG_STATE_GATE); 1495 vi_update_hdp_medium_grain_clock_gating(adev, 1496 state == AMD_CG_STATE_GATE); 1497 vi_update_hdp_light_sleep(adev, 1498 state == AMD_CG_STATE_GATE); 1499 vi_update_rom_medium_grain_clock_gating(adev, 1500 state == AMD_CG_STATE_GATE); 1501 break; 1502 case CHIP_CARRIZO: 1503 case CHIP_STONEY: 1504 vi_update_bif_medium_grain_light_sleep(adev, 1505 state == AMD_CG_STATE_GATE); 1506 vi_update_hdp_medium_grain_clock_gating(adev, 1507 state == AMD_CG_STATE_GATE); 1508 vi_update_hdp_light_sleep(adev, 1509 state == AMD_CG_STATE_GATE); 1510 vi_update_drm_light_sleep(adev, 1511 state == AMD_CG_STATE_GATE); 1512 break; 1513 case CHIP_TONGA: 1514 case CHIP_POLARIS10: 1515 case CHIP_POLARIS11: 1516 case CHIP_POLARIS12: 1517 case CHIP_VEGAM: 1518 vi_common_set_clockgating_state_by_smu(adev, state); 1519 default: 1520 break; 1521 } 1522 return 0; 1523 } 1524 1525 static int vi_common_set_powergating_state(void *handle, 1526 enum amd_powergating_state state) 1527 { 1528 return 0; 1529 } 1530 1531 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1532 { 1533 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1534 int data; 1535 1536 if (amdgpu_sriov_vf(adev)) 1537 *flags = 0; 1538 1539 /* AMD_CG_SUPPORT_BIF_LS */ 1540 data = RREG32_PCIE(ixPCIE_CNTL2); 1541 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1542 *flags |= AMD_CG_SUPPORT_BIF_LS; 1543 1544 /* AMD_CG_SUPPORT_HDP_LS */ 1545 data = RREG32(mmHDP_MEM_POWER_LS); 1546 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1547 *flags |= AMD_CG_SUPPORT_HDP_LS; 1548 1549 /* AMD_CG_SUPPORT_HDP_MGCG */ 1550 data = RREG32(mmHDP_HOST_PATH_CNTL); 1551 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1552 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1553 1554 /* AMD_CG_SUPPORT_ROM_MGCG */ 1555 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1556 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1557 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1558 } 1559 1560 static const struct amd_ip_funcs vi_common_ip_funcs = { 1561 .name = "vi_common", 1562 .early_init = vi_common_early_init, 1563 .late_init = vi_common_late_init, 1564 .sw_init = vi_common_sw_init, 1565 .sw_fini = vi_common_sw_fini, 1566 .hw_init = vi_common_hw_init, 1567 .hw_fini = vi_common_hw_fini, 1568 .suspend = vi_common_suspend, 1569 .resume = vi_common_resume, 1570 .is_idle = vi_common_is_idle, 1571 .wait_for_idle = vi_common_wait_for_idle, 1572 .soft_reset = vi_common_soft_reset, 1573 .set_clockgating_state = vi_common_set_clockgating_state, 1574 .set_powergating_state = vi_common_set_powergating_state, 1575 .get_clockgating_state = vi_common_get_clockgating_state, 1576 }; 1577 1578 static const struct amdgpu_ip_block_version vi_common_ip_block = 1579 { 1580 .type = AMD_IP_BLOCK_TYPE_COMMON, 1581 .major = 1, 1582 .minor = 0, 1583 .rev = 0, 1584 .funcs = &vi_common_ip_funcs, 1585 }; 1586 1587 int vi_set_ip_blocks(struct amdgpu_device *adev) 1588 { 1589 /* in early init stage, vbios code won't work */ 1590 vi_detect_hw_virtualization(adev); 1591 1592 if (amdgpu_sriov_vf(adev)) 1593 adev->virt.ops = &xgpu_vi_virt_ops; 1594 1595 switch (adev->asic_type) { 1596 case CHIP_TOPAZ: 1597 /* topaz has no DCE, UVD, VCE */ 1598 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1599 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1600 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1601 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1602 if (adev->enable_virtual_display) 1603 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1604 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1605 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1606 break; 1607 case CHIP_FIJI: 1608 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1609 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1610 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1611 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1612 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1613 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1614 #if defined(CONFIG_DRM_AMD_DC) 1615 else if (amdgpu_device_has_dc_support(adev)) 1616 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1617 #endif 1618 else 1619 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1620 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1621 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1622 if (!amdgpu_sriov_vf(adev)) { 1623 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1624 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1625 } 1626 break; 1627 case CHIP_TONGA: 1628 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1629 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1630 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1631 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1632 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1633 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1634 #if defined(CONFIG_DRM_AMD_DC) 1635 else if (amdgpu_device_has_dc_support(adev)) 1636 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1637 #endif 1638 else 1639 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1640 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1641 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1642 if (!amdgpu_sriov_vf(adev)) { 1643 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1644 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1645 } 1646 break; 1647 case CHIP_POLARIS10: 1648 case CHIP_POLARIS11: 1649 case CHIP_POLARIS12: 1650 case CHIP_VEGAM: 1651 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1652 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1653 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1654 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1655 if (adev->enable_virtual_display) 1656 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1657 #if defined(CONFIG_DRM_AMD_DC) 1658 else if (amdgpu_device_has_dc_support(adev)) 1659 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1660 #endif 1661 else 1662 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1663 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1664 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1665 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1666 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1667 break; 1668 case CHIP_CARRIZO: 1669 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1670 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1671 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1672 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1673 if (adev->enable_virtual_display) 1674 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1675 #if defined(CONFIG_DRM_AMD_DC) 1676 else if (amdgpu_device_has_dc_support(adev)) 1677 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1678 #endif 1679 else 1680 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1681 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1682 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1683 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1684 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1685 #if defined(CONFIG_DRM_AMD_ACP) 1686 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1687 #endif 1688 break; 1689 case CHIP_STONEY: 1690 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1691 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1692 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1693 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1694 if (adev->enable_virtual_display) 1695 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1696 #if defined(CONFIG_DRM_AMD_DC) 1697 else if (amdgpu_device_has_dc_support(adev)) 1698 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1699 #endif 1700 else 1701 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1702 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1703 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1704 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1705 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1706 #if defined(CONFIG_DRM_AMD_ACP) 1707 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1708 #endif 1709 break; 1710 default: 1711 /* FIXME: not supported yet */ 1712 return -EINVAL; 1713 } 1714 1715 return 0; 1716 } 1717