1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 * $FreeBSD: head/sys/dev/drm2/radeon/ni.c 254885 2013-08-25 19:37:15Z dumbbell $ 24 */ 25 26 #include <drm/drmP.h> 27 #include "radeon.h" 28 #include "radeon_asic.h" 29 #include <uapi_drm/radeon_drm.h> 30 #include "nid.h" 31 #include "atom.h" 32 #include "ni_reg.h" 33 #include "cayman_blit_shaders.h" 34 35 extern bool evergreen_is_display_hung(struct radeon_device *rdev); 36 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 37 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 38 39 #define EVERGREEN_PFP_UCODE_SIZE 1120 40 #define EVERGREEN_PM4_UCODE_SIZE 1376 41 #define EVERGREEN_RLC_UCODE_SIZE 768 42 #define BTC_MC_UCODE_SIZE 6024 43 44 #define CAYMAN_PFP_UCODE_SIZE 2176 45 #define CAYMAN_PM4_UCODE_SIZE 2176 46 #define CAYMAN_RLC_UCODE_SIZE 1024 47 #define CAYMAN_MC_UCODE_SIZE 6037 48 49 #define ARUBA_RLC_UCODE_SIZE 1536 50 51 #define BTC_IO_MC_REGS_SIZE 29 52 53 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 54 {0x00000077, 0xff010100}, 55 {0x00000078, 0x00000000}, 56 {0x00000079, 0x00001434}, 57 {0x0000007a, 0xcc08ec08}, 58 {0x0000007b, 0x00040000}, 59 {0x0000007c, 0x000080c0}, 60 {0x0000007d, 0x09000000}, 61 {0x0000007e, 0x00210404}, 62 {0x00000081, 0x08a8e800}, 63 {0x00000082, 0x00030444}, 64 {0x00000083, 0x00000000}, 65 {0x00000085, 0x00000001}, 66 {0x00000086, 0x00000002}, 67 {0x00000087, 0x48490000}, 68 {0x00000088, 0x20244647}, 69 {0x00000089, 0x00000005}, 70 {0x0000008b, 0x66030000}, 71 {0x0000008c, 0x00006603}, 72 {0x0000008d, 0x00000100}, 73 {0x0000008f, 0x00001c0a}, 74 {0x00000090, 0xff000001}, 75 {0x00000094, 0x00101101}, 76 {0x00000095, 0x00000fff}, 77 {0x00000096, 0x00116fff}, 78 {0x00000097, 0x60010000}, 79 {0x00000098, 0x10010000}, 80 {0x00000099, 0x00006000}, 81 {0x0000009a, 0x00001000}, 82 {0x0000009f, 0x00946a00} 83 }; 84 85 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 86 {0x00000077, 0xff010100}, 87 {0x00000078, 0x00000000}, 88 {0x00000079, 0x00001434}, 89 {0x0000007a, 0xcc08ec08}, 90 {0x0000007b, 0x00040000}, 91 {0x0000007c, 0x000080c0}, 92 {0x0000007d, 0x09000000}, 93 {0x0000007e, 0x00210404}, 94 {0x00000081, 0x08a8e800}, 95 {0x00000082, 0x00030444}, 96 {0x00000083, 0x00000000}, 97 {0x00000085, 0x00000001}, 98 {0x00000086, 0x00000002}, 99 {0x00000087, 0x48490000}, 100 {0x00000088, 0x20244647}, 101 {0x00000089, 0x00000005}, 102 {0x0000008b, 0x66030000}, 103 {0x0000008c, 0x00006603}, 104 {0x0000008d, 0x00000100}, 105 {0x0000008f, 0x00001c0a}, 106 {0x00000090, 0xff000001}, 107 {0x00000094, 0x00101101}, 108 {0x00000095, 0x00000fff}, 109 {0x00000096, 0x00116fff}, 110 {0x00000097, 0x60010000}, 111 {0x00000098, 0x10010000}, 112 {0x00000099, 0x00006000}, 113 {0x0000009a, 0x00001000}, 114 {0x0000009f, 0x00936a00} 115 }; 116 117 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 118 {0x00000077, 0xff010100}, 119 {0x00000078, 0x00000000}, 120 {0x00000079, 0x00001434}, 121 {0x0000007a, 0xcc08ec08}, 122 {0x0000007b, 0x00040000}, 123 {0x0000007c, 0x000080c0}, 124 {0x0000007d, 0x09000000}, 125 {0x0000007e, 0x00210404}, 126 {0x00000081, 0x08a8e800}, 127 {0x00000082, 0x00030444}, 128 {0x00000083, 0x00000000}, 129 {0x00000085, 0x00000001}, 130 {0x00000086, 0x00000002}, 131 {0x00000087, 0x48490000}, 132 {0x00000088, 0x20244647}, 133 {0x00000089, 0x00000005}, 134 {0x0000008b, 0x66030000}, 135 {0x0000008c, 0x00006603}, 136 {0x0000008d, 0x00000100}, 137 {0x0000008f, 0x00001c0a}, 138 {0x00000090, 0xff000001}, 139 {0x00000094, 0x00101101}, 140 {0x00000095, 0x00000fff}, 141 {0x00000096, 0x00116fff}, 142 {0x00000097, 0x60010000}, 143 {0x00000098, 0x10010000}, 144 {0x00000099, 0x00006000}, 145 {0x0000009a, 0x00001000}, 146 {0x0000009f, 0x00916a00} 147 }; 148 149 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 150 {0x00000077, 0xff010100}, 151 {0x00000078, 0x00000000}, 152 {0x00000079, 0x00001434}, 153 {0x0000007a, 0xcc08ec08}, 154 {0x0000007b, 0x00040000}, 155 {0x0000007c, 0x000080c0}, 156 {0x0000007d, 0x09000000}, 157 {0x0000007e, 0x00210404}, 158 {0x00000081, 0x08a8e800}, 159 {0x00000082, 0x00030444}, 160 {0x00000083, 0x00000000}, 161 {0x00000085, 0x00000001}, 162 {0x00000086, 0x00000002}, 163 {0x00000087, 0x48490000}, 164 {0x00000088, 0x20244647}, 165 {0x00000089, 0x00000005}, 166 {0x0000008b, 0x66030000}, 167 {0x0000008c, 0x00006603}, 168 {0x0000008d, 0x00000100}, 169 {0x0000008f, 0x00001c0a}, 170 {0x00000090, 0xff000001}, 171 {0x00000094, 0x00101101}, 172 {0x00000095, 0x00000fff}, 173 {0x00000096, 0x00116fff}, 174 {0x00000097, 0x60010000}, 175 {0x00000098, 0x10010000}, 176 {0x00000099, 0x00006000}, 177 {0x0000009a, 0x00001000}, 178 {0x0000009f, 0x00976b00} 179 }; 180 181 int ni_mc_load_microcode(struct radeon_device *rdev) 182 { 183 const __be32 *fw_data; 184 u32 mem_type, running, blackout = 0; 185 u32 *io_mc_regs; 186 int i, ucode_size, regs_size; 187 188 if (!rdev->mc_fw) 189 return -EINVAL; 190 191 switch (rdev->family) { 192 case CHIP_BARTS: 193 io_mc_regs = (u32 *)&barts_io_mc_regs; 194 ucode_size = BTC_MC_UCODE_SIZE; 195 regs_size = BTC_IO_MC_REGS_SIZE; 196 break; 197 case CHIP_TURKS: 198 io_mc_regs = (u32 *)&turks_io_mc_regs; 199 ucode_size = BTC_MC_UCODE_SIZE; 200 regs_size = BTC_IO_MC_REGS_SIZE; 201 break; 202 case CHIP_CAICOS: 203 default: 204 io_mc_regs = (u32 *)&caicos_io_mc_regs; 205 ucode_size = BTC_MC_UCODE_SIZE; 206 regs_size = BTC_IO_MC_REGS_SIZE; 207 break; 208 case CHIP_CAYMAN: 209 io_mc_regs = (u32 *)&cayman_io_mc_regs; 210 ucode_size = CAYMAN_MC_UCODE_SIZE; 211 regs_size = BTC_IO_MC_REGS_SIZE; 212 break; 213 } 214 215 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 216 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 217 218 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 219 if (running) { 220 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 221 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 222 } 223 224 /* reset the engine and set to writable */ 225 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 226 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 227 228 /* load mc io regs */ 229 for (i = 0; i < regs_size; i++) { 230 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 231 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 232 } 233 /* load the MC ucode */ 234 fw_data = (const __be32 *)rdev->mc_fw->data; 235 for (i = 0; i < ucode_size; i++) 236 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 237 238 /* put the engine back into the active state */ 239 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 240 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 241 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 242 243 /* wait for training to complete */ 244 for (i = 0; i < rdev->usec_timeout; i++) { 245 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 246 break; 247 DRM_UDELAY(1); 248 } 249 250 if (running) 251 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 252 } 253 254 return 0; 255 } 256 257 int ni_init_microcode(struct radeon_device *rdev) 258 { 259 const char *chip_name; 260 const char *rlc_chip_name; 261 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 262 char fw_name[30]; 263 int err; 264 265 DRM_DEBUG("\n"); 266 267 switch (rdev->family) { 268 case CHIP_BARTS: 269 chip_name = "BARTS"; 270 rlc_chip_name = "BTC"; 271 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 272 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 273 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 274 mc_req_size = BTC_MC_UCODE_SIZE * 4; 275 break; 276 case CHIP_TURKS: 277 chip_name = "TURKS"; 278 rlc_chip_name = "BTC"; 279 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 280 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 281 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 282 mc_req_size = BTC_MC_UCODE_SIZE * 4; 283 break; 284 case CHIP_CAICOS: 285 chip_name = "CAICOS"; 286 rlc_chip_name = "BTC"; 287 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 288 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 289 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 290 mc_req_size = BTC_MC_UCODE_SIZE * 4; 291 break; 292 case CHIP_CAYMAN: 293 chip_name = "CAYMAN"; 294 rlc_chip_name = "CAYMAN"; 295 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 296 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 297 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 298 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 299 break; 300 case CHIP_ARUBA: 301 chip_name = "ARUBA"; 302 rlc_chip_name = "ARUBA"; 303 /* pfp/me same size as CAYMAN */ 304 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 305 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 306 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 307 mc_req_size = 0; 308 break; 309 default: panic("%s: Unsupported family %d", __func__, rdev->family); 310 } 311 312 DRM_INFO("Loading %s Microcode\n", chip_name); 313 err = 0; 314 315 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 316 rdev->pfp_fw = firmware_get(fw_name); 317 if (rdev->pfp_fw == NULL) { 318 err = -ENOENT; 319 goto out; 320 } 321 if (rdev->pfp_fw->datasize != pfp_req_size) { 322 DRM_ERROR( 323 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 324 rdev->pfp_fw->datasize, fw_name); 325 err = -EINVAL; 326 goto out; 327 } 328 329 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 330 rdev->me_fw = firmware_get(fw_name); 331 if (rdev->me_fw == NULL) { 332 err = -ENOENT; 333 goto out; 334 } 335 if (rdev->me_fw->datasize != me_req_size) { 336 DRM_ERROR( 337 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 338 rdev->me_fw->datasize, fw_name); 339 err = -EINVAL; 340 } 341 342 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", 343 rlc_chip_name); 344 rdev->rlc_fw = firmware_get(fw_name); 345 if (rdev->rlc_fw == NULL) { 346 err = -ENOENT; 347 goto out; 348 } 349 if (rdev->rlc_fw->datasize != rlc_req_size) { 350 DRM_ERROR( 351 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 352 rdev->rlc_fw->datasize, fw_name); 353 err = -EINVAL; 354 } 355 356 /* no MC ucode on TN */ 357 if (!(rdev->flags & RADEON_IS_IGP)) { 358 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", 359 chip_name); 360 rdev->mc_fw = firmware_get(fw_name); 361 if (rdev->mc_fw == NULL) { 362 err = -ENOENT; 363 goto out; 364 } 365 if (rdev->mc_fw->datasize != mc_req_size) { 366 DRM_ERROR( 367 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 368 rdev->mc_fw->datasize, fw_name); 369 err = -EINVAL; 370 } 371 } 372 out: 373 if (err) { 374 if (err != -EINVAL) 375 DRM_ERROR( 376 "ni_cp: Failed to load firmware \"%s\"\n", 377 fw_name); 378 if (rdev->pfp_fw != NULL) { 379 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 380 rdev->pfp_fw = NULL; 381 } 382 if (rdev->me_fw != NULL) { 383 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 384 rdev->me_fw = NULL; 385 } 386 if (rdev->rlc_fw != NULL) { 387 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 388 rdev->rlc_fw = NULL; 389 } 390 if (rdev->mc_fw != NULL) { 391 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 392 rdev->mc_fw = NULL; 393 } 394 } 395 return err; 396 } 397 398 /** 399 * ni_fini_microcode - drop the firmwares image references 400 * 401 * @rdev: radeon_device pointer 402 * 403 * Drop the pfp, me, mc and rlc firmwares image references. 404 * Called at driver shutdown. 405 */ 406 void ni_fini_microcode(struct radeon_device *rdev) 407 { 408 409 if (rdev->pfp_fw != NULL) { 410 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 411 rdev->pfp_fw = NULL; 412 } 413 414 if (rdev->me_fw != NULL) { 415 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 416 rdev->me_fw = NULL; 417 } 418 419 if (rdev->rlc_fw != NULL) { 420 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 421 rdev->rlc_fw = NULL; 422 } 423 424 if (rdev->mc_fw != NULL) { 425 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 426 rdev->mc_fw = NULL; 427 } 428 } 429 430 431 /* 432 * Core functions 433 */ 434 static void cayman_gpu_init(struct radeon_device *rdev) 435 { 436 u32 gb_addr_config = 0; 437 u32 mc_shared_chmap, mc_arb_ramcfg; 438 u32 cgts_tcc_disable; 439 u32 sx_debug_1; 440 u32 smx_dc_ctl0; 441 u32 cgts_sm_ctrl_reg; 442 u32 hdp_host_path_cntl; 443 u32 tmp; 444 u32 disabled_rb_mask; 445 int i, j; 446 447 switch (rdev->family) { 448 case CHIP_CAYMAN: 449 rdev->config.cayman.max_shader_engines = 2; 450 rdev->config.cayman.max_pipes_per_simd = 4; 451 rdev->config.cayman.max_tile_pipes = 8; 452 rdev->config.cayman.max_simds_per_se = 12; 453 rdev->config.cayman.max_backends_per_se = 4; 454 rdev->config.cayman.max_texture_channel_caches = 8; 455 rdev->config.cayman.max_gprs = 256; 456 rdev->config.cayman.max_threads = 256; 457 rdev->config.cayman.max_gs_threads = 32; 458 rdev->config.cayman.max_stack_entries = 512; 459 rdev->config.cayman.sx_num_of_sets = 8; 460 rdev->config.cayman.sx_max_export_size = 256; 461 rdev->config.cayman.sx_max_export_pos_size = 64; 462 rdev->config.cayman.sx_max_export_smx_size = 192; 463 rdev->config.cayman.max_hw_contexts = 8; 464 rdev->config.cayman.sq_num_cf_insts = 2; 465 466 rdev->config.cayman.sc_prim_fifo_size = 0x100; 467 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 468 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 469 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; 470 break; 471 case CHIP_ARUBA: 472 default: 473 rdev->config.cayman.max_shader_engines = 1; 474 rdev->config.cayman.max_pipes_per_simd = 4; 475 rdev->config.cayman.max_tile_pipes = 2; 476 if ((rdev->ddev->pci_device == 0x9900) || 477 (rdev->ddev->pci_device == 0x9901) || 478 (rdev->ddev->pci_device == 0x9905) || 479 (rdev->ddev->pci_device == 0x9906) || 480 (rdev->ddev->pci_device == 0x9907) || 481 (rdev->ddev->pci_device == 0x9908) || 482 (rdev->ddev->pci_device == 0x9909) || 483 (rdev->ddev->pci_device == 0x990B) || 484 (rdev->ddev->pci_device == 0x990C) || 485 (rdev->ddev->pci_device == 0x990F) || 486 (rdev->ddev->pci_device == 0x9910) || 487 (rdev->ddev->pci_device == 0x9917) || 488 (rdev->ddev->pci_device == 0x9999)) { 489 rdev->config.cayman.max_simds_per_se = 6; 490 rdev->config.cayman.max_backends_per_se = 2; 491 } else if ((rdev->ddev->pci_device == 0x9903) || 492 (rdev->ddev->pci_device == 0x9904) || 493 (rdev->ddev->pci_device == 0x990A) || 494 (rdev->ddev->pci_device == 0x990D) || 495 (rdev->ddev->pci_device == 0x990E) || 496 (rdev->ddev->pci_device == 0x9913) || 497 (rdev->ddev->pci_device == 0x9918)) { 498 rdev->config.cayman.max_simds_per_se = 4; 499 rdev->config.cayman.max_backends_per_se = 2; 500 } else if ((rdev->ddev->pci_device == 0x9919) || 501 (rdev->ddev->pci_device == 0x9990) || 502 (rdev->ddev->pci_device == 0x9991) || 503 (rdev->ddev->pci_device == 0x9994) || 504 (rdev->ddev->pci_device == 0x9995) || 505 (rdev->ddev->pci_device == 0x9996) || 506 (rdev->ddev->pci_device == 0x999A) || 507 (rdev->ddev->pci_device == 0x99A0)) { 508 rdev->config.cayman.max_simds_per_se = 3; 509 rdev->config.cayman.max_backends_per_se = 1; 510 } else { 511 rdev->config.cayman.max_simds_per_se = 2; 512 rdev->config.cayman.max_backends_per_se = 1; 513 } 514 rdev->config.cayman.max_texture_channel_caches = 2; 515 rdev->config.cayman.max_gprs = 256; 516 rdev->config.cayman.max_threads = 256; 517 rdev->config.cayman.max_gs_threads = 32; 518 rdev->config.cayman.max_stack_entries = 512; 519 rdev->config.cayman.sx_num_of_sets = 8; 520 rdev->config.cayman.sx_max_export_size = 256; 521 rdev->config.cayman.sx_max_export_pos_size = 64; 522 rdev->config.cayman.sx_max_export_smx_size = 192; 523 rdev->config.cayman.max_hw_contexts = 8; 524 rdev->config.cayman.sq_num_cf_insts = 2; 525 526 rdev->config.cayman.sc_prim_fifo_size = 0x40; 527 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 528 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 529 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; 530 break; 531 } 532 533 /* Initialize HDP */ 534 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 535 WREG32((0x2c14 + j), 0x00000000); 536 WREG32((0x2c18 + j), 0x00000000); 537 WREG32((0x2c1c + j), 0x00000000); 538 WREG32((0x2c20 + j), 0x00000000); 539 WREG32((0x2c24 + j), 0x00000000); 540 } 541 542 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 543 544 evergreen_fix_pci_max_read_req_size(rdev); 545 546 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 547 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 548 549 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 550 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 551 if (rdev->config.cayman.mem_row_size_in_kb > 4) 552 rdev->config.cayman.mem_row_size_in_kb = 4; 553 /* XXX use MC settings? */ 554 rdev->config.cayman.shader_engine_tile_size = 32; 555 rdev->config.cayman.num_gpus = 1; 556 rdev->config.cayman.multi_gpu_tile_size = 64; 557 558 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 559 rdev->config.cayman.num_tile_pipes = (1 << tmp); 560 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 561 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 562 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 563 rdev->config.cayman.num_shader_engines = tmp + 1; 564 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 565 rdev->config.cayman.num_gpus = tmp + 1; 566 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 567 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 568 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 569 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 570 571 572 /* setup tiling info dword. gb_addr_config is not adequate since it does 573 * not have bank info, so create a custom tiling dword. 574 * bits 3:0 num_pipes 575 * bits 7:4 num_banks 576 * bits 11:8 group_size 577 * bits 15:12 row_size 578 */ 579 rdev->config.cayman.tile_config = 0; 580 switch (rdev->config.cayman.num_tile_pipes) { 581 case 1: 582 default: 583 rdev->config.cayman.tile_config |= (0 << 0); 584 break; 585 case 2: 586 rdev->config.cayman.tile_config |= (1 << 0); 587 break; 588 case 4: 589 rdev->config.cayman.tile_config |= (2 << 0); 590 break; 591 case 8: 592 rdev->config.cayman.tile_config |= (3 << 0); 593 break; 594 } 595 596 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 597 if (rdev->flags & RADEON_IS_IGP) 598 rdev->config.cayman.tile_config |= 1 << 4; 599 else { 600 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 601 case 0: /* four banks */ 602 rdev->config.cayman.tile_config |= 0 << 4; 603 break; 604 case 1: /* eight banks */ 605 rdev->config.cayman.tile_config |= 1 << 4; 606 break; 607 case 2: /* sixteen banks */ 608 default: 609 rdev->config.cayman.tile_config |= 2 << 4; 610 break; 611 } 612 } 613 rdev->config.cayman.tile_config |= 614 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 615 rdev->config.cayman.tile_config |= 616 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 617 618 tmp = 0; 619 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { 620 u32 rb_disable_bitmap; 621 622 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 623 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 624 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 625 tmp <<= 4; 626 tmp |= rb_disable_bitmap; 627 } 628 /* enabled rb are just the one not disabled :) */ 629 disabled_rb_mask = tmp; 630 631 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 632 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 633 634 WREG32(GB_ADDR_CONFIG, gb_addr_config); 635 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 636 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 637 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 638 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 639 640 if ((rdev->config.cayman.max_backends_per_se == 1) && 641 (rdev->flags & RADEON_IS_IGP)) { 642 if ((disabled_rb_mask & 3) == 1) { 643 /* RB0 disabled, RB1 enabled */ 644 tmp = 0x11111111; 645 } else { 646 /* RB1 disabled, RB0 enabled */ 647 tmp = 0x00000000; 648 } 649 } else { 650 tmp = gb_addr_config & NUM_PIPES_MASK; 651 tmp = r6xx_remap_render_backend(rdev, tmp, 652 rdev->config.cayman.max_backends_per_se * 653 rdev->config.cayman.max_shader_engines, 654 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 655 } 656 WREG32(GB_BACKEND_MAP, tmp); 657 658 cgts_tcc_disable = 0xffff0000; 659 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 660 cgts_tcc_disable &= ~(1 << (16 + i)); 661 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 662 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 663 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 664 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 665 666 /* reprogram the shader complex */ 667 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 668 for (i = 0; i < 16; i++) 669 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 670 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 671 672 /* set HW defaults for 3D engine */ 673 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 674 675 sx_debug_1 = RREG32(SX_DEBUG_1); 676 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 677 WREG32(SX_DEBUG_1, sx_debug_1); 678 679 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 680 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 681 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 682 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 683 684 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 685 686 /* need to be explicitly zero-ed */ 687 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 688 WREG32(SQ_LSTMP_RING_BASE, 0); 689 WREG32(SQ_HSTMP_RING_BASE, 0); 690 WREG32(SQ_ESTMP_RING_BASE, 0); 691 WREG32(SQ_GSTMP_RING_BASE, 0); 692 WREG32(SQ_VSTMP_RING_BASE, 0); 693 WREG32(SQ_PSTMP_RING_BASE, 0); 694 695 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 696 697 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 698 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 699 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 700 701 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 702 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 703 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 704 705 706 WREG32(VGT_NUM_INSTANCES, 1); 707 708 WREG32(CP_PERFMON_CNTL, 0); 709 710 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 711 FETCH_FIFO_HIWATER(0x4) | 712 DONE_FIFO_HIWATER(0xe0) | 713 ALU_UPDATE_FIFO_HIWATER(0x8))); 714 715 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 716 WREG32(SQ_CONFIG, (VC_ENABLE | 717 EXPORT_SRC_C | 718 GFX_PRIO(0) | 719 CS1_PRIO(0) | 720 CS2_PRIO(1))); 721 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 722 723 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 724 FORCE_EOV_MAX_REZ_CNT(255))); 725 726 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 727 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 728 729 WREG32(VGT_GS_VERTEX_REUSE, 16); 730 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 731 732 WREG32(CB_PERF_CTR0_SEL_0, 0); 733 WREG32(CB_PERF_CTR0_SEL_1, 0); 734 WREG32(CB_PERF_CTR1_SEL_0, 0); 735 WREG32(CB_PERF_CTR1_SEL_1, 0); 736 WREG32(CB_PERF_CTR2_SEL_0, 0); 737 WREG32(CB_PERF_CTR2_SEL_1, 0); 738 WREG32(CB_PERF_CTR3_SEL_0, 0); 739 WREG32(CB_PERF_CTR3_SEL_1, 0); 740 741 tmp = RREG32(HDP_MISC_CNTL); 742 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 743 WREG32(HDP_MISC_CNTL, tmp); 744 745 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 746 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 747 748 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 749 750 DRM_UDELAY(50); 751 } 752 753 /* 754 * GART 755 */ 756 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 757 { 758 /* flush hdp cache */ 759 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 760 761 /* bits 0-7 are the VM contexts0-7 */ 762 WREG32(VM_INVALIDATE_REQUEST, 1); 763 } 764 765 static int cayman_pcie_gart_enable(struct radeon_device *rdev) 766 { 767 int i, r; 768 769 if (rdev->gart.robj == NULL) { 770 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 771 return -EINVAL; 772 } 773 r = radeon_gart_table_vram_pin(rdev); 774 if (r) 775 return r; 776 radeon_gart_restore(rdev); 777 /* Setup TLB control */ 778 WREG32(MC_VM_MX_L1_TLB_CNTL, 779 (0xA << 7) | 780 ENABLE_L1_TLB | 781 ENABLE_L1_FRAGMENT_PROCESSING | 782 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 783 ENABLE_ADVANCED_DRIVER_MODEL | 784 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 785 /* Setup L2 cache */ 786 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 787 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 788 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 789 EFFECTIVE_L2_QUEUE_SIZE(7) | 790 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 791 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 792 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 793 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 794 /* setup context0 */ 795 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 796 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 797 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 798 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 799 (u32)(rdev->dummy_page.addr >> 12)); 800 WREG32(VM_CONTEXT0_CNTL2, 0); 801 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 802 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 803 804 WREG32(0x15D4, 0); 805 WREG32(0x15D8, 0); 806 WREG32(0x15DC, 0); 807 808 /* empty context1-7 */ 809 /* Assign the pt base to something valid for now; the pts used for 810 * the VMs are determined by the application and setup and assigned 811 * on the fly in the vm part of radeon_gart.c 812 */ 813 for (i = 1; i < 8; i++) { 814 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 815 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); 816 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 817 rdev->gart.table_addr >> 12); 818 } 819 820 /* enable context1-7 */ 821 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 822 (u32)(rdev->dummy_page.addr >> 12)); 823 WREG32(VM_CONTEXT1_CNTL2, 4); 824 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 825 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 826 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 827 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 828 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 829 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 830 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 831 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 832 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 833 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 834 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 835 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 836 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 837 838 cayman_pcie_gart_tlb_flush(rdev); 839 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 840 (unsigned)(rdev->mc.gtt_size >> 20), 841 (unsigned long long)rdev->gart.table_addr); 842 rdev->gart.ready = true; 843 return 0; 844 } 845 846 static void cayman_pcie_gart_disable(struct radeon_device *rdev) 847 { 848 /* Disable all tables */ 849 WREG32(VM_CONTEXT0_CNTL, 0); 850 WREG32(VM_CONTEXT1_CNTL, 0); 851 /* Setup TLB control */ 852 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 853 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 854 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 855 /* Setup L2 cache */ 856 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 857 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 858 EFFECTIVE_L2_QUEUE_SIZE(7) | 859 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 860 WREG32(VM_L2_CNTL2, 0); 861 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 862 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 863 radeon_gart_table_vram_unpin(rdev); 864 } 865 866 static void cayman_pcie_gart_fini(struct radeon_device *rdev) 867 { 868 cayman_pcie_gart_disable(rdev); 869 radeon_gart_table_vram_free(rdev); 870 radeon_gart_fini(rdev); 871 } 872 873 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 874 int ring, u32 cp_int_cntl) 875 { 876 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 877 878 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); 879 WREG32(CP_INT_CNTL, cp_int_cntl); 880 } 881 882 /* 883 * CP. 884 */ 885 void cayman_fence_ring_emit(struct radeon_device *rdev, 886 struct radeon_fence *fence) 887 { 888 struct radeon_ring *ring = &rdev->ring[fence->ring]; 889 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 890 891 /* flush read cache over gart for this vmid */ 892 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 893 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 894 radeon_ring_write(ring, 0); 895 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 896 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 897 radeon_ring_write(ring, 0xFFFFFFFF); 898 radeon_ring_write(ring, 0); 899 radeon_ring_write(ring, 10); /* poll interval */ 900 /* EVENT_WRITE_EOP - flush caches, send int */ 901 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 902 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 903 radeon_ring_write(ring, addr & 0xffffffff); 904 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 905 radeon_ring_write(ring, fence->seq); 906 radeon_ring_write(ring, 0); 907 } 908 909 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 910 { 911 struct radeon_ring *ring = &rdev->ring[ib->ring]; 912 913 /* set to DX10/11 mode */ 914 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 915 radeon_ring_write(ring, 1); 916 917 if (ring->rptr_save_reg) { 918 uint32_t next_rptr = ring->wptr + 3 + 4 + 8; 919 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 920 radeon_ring_write(ring, ((ring->rptr_save_reg - 921 PACKET3_SET_CONFIG_REG_START) >> 2)); 922 radeon_ring_write(ring, next_rptr); 923 } 924 925 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 926 radeon_ring_write(ring, 927 #ifdef __BIG_ENDIAN 928 (2 << 0) | 929 #endif 930 (ib->gpu_addr & 0xFFFFFFFC)); 931 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 932 radeon_ring_write(ring, ib->length_dw | 933 (ib->vm ? (ib->vm->id << 24) : 0)); 934 935 /* flush read cache over gart for this vmid */ 936 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 937 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 938 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); 939 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 940 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 941 radeon_ring_write(ring, 0xFFFFFFFF); 942 radeon_ring_write(ring, 0); 943 radeon_ring_write(ring, 10); /* poll interval */ 944 } 945 946 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 947 { 948 if (enable) 949 WREG32(CP_ME_CNTL, 0); 950 else { 951 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 952 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 953 WREG32(SCRATCH_UMSK, 0); 954 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 955 } 956 } 957 958 static int cayman_cp_load_microcode(struct radeon_device *rdev) 959 { 960 const __be32 *fw_data; 961 int i; 962 963 if (!rdev->me_fw || !rdev->pfp_fw) 964 return -EINVAL; 965 966 cayman_cp_enable(rdev, false); 967 968 fw_data = (const __be32 *)rdev->pfp_fw->data; 969 WREG32(CP_PFP_UCODE_ADDR, 0); 970 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 971 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 972 WREG32(CP_PFP_UCODE_ADDR, 0); 973 974 fw_data = (const __be32 *)rdev->me_fw->data; 975 WREG32(CP_ME_RAM_WADDR, 0); 976 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 977 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 978 979 WREG32(CP_PFP_UCODE_ADDR, 0); 980 WREG32(CP_ME_RAM_WADDR, 0); 981 WREG32(CP_ME_RAM_RADDR, 0); 982 return 0; 983 } 984 985 static int cayman_cp_start(struct radeon_device *rdev) 986 { 987 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 988 int r, i; 989 990 r = radeon_ring_lock(rdev, ring, 7); 991 if (r) { 992 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 993 return r; 994 } 995 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 996 radeon_ring_write(ring, 0x1); 997 radeon_ring_write(ring, 0x0); 998 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 999 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1000 radeon_ring_write(ring, 0); 1001 radeon_ring_write(ring, 0); 1002 radeon_ring_unlock_commit(rdev, ring); 1003 1004 cayman_cp_enable(rdev, true); 1005 1006 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 1007 if (r) { 1008 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1009 return r; 1010 } 1011 1012 /* setup clear context state */ 1013 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1014 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1015 1016 for (i = 0; i < cayman_default_size; i++) 1017 radeon_ring_write(ring, cayman_default_state[i]); 1018 1019 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1020 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 1021 1022 /* set clear context state */ 1023 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1024 radeon_ring_write(ring, 0); 1025 1026 /* SQ_VTX_BASE_VTX_LOC */ 1027 radeon_ring_write(ring, 0xc0026f00); 1028 radeon_ring_write(ring, 0x00000000); 1029 radeon_ring_write(ring, 0x00000000); 1030 radeon_ring_write(ring, 0x00000000); 1031 1032 /* Clear consts */ 1033 radeon_ring_write(ring, 0xc0036f00); 1034 radeon_ring_write(ring, 0x00000bc4); 1035 radeon_ring_write(ring, 0xffffffff); 1036 radeon_ring_write(ring, 0xffffffff); 1037 radeon_ring_write(ring, 0xffffffff); 1038 1039 radeon_ring_write(ring, 0xc0026900); 1040 radeon_ring_write(ring, 0x00000316); 1041 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1042 radeon_ring_write(ring, 0x00000010); /* */ 1043 1044 radeon_ring_unlock_commit(rdev, ring); 1045 1046 /* XXX init other rings */ 1047 1048 return 0; 1049 } 1050 1051 static void cayman_cp_fini(struct radeon_device *rdev) 1052 { 1053 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1054 cayman_cp_enable(rdev, false); 1055 radeon_ring_fini(rdev, ring); 1056 radeon_scratch_free(rdev, ring->rptr_save_reg); 1057 } 1058 1059 static int cayman_cp_resume(struct radeon_device *rdev) 1060 { 1061 static const int ridx[] = { 1062 RADEON_RING_TYPE_GFX_INDEX, 1063 CAYMAN_RING_TYPE_CP1_INDEX, 1064 CAYMAN_RING_TYPE_CP2_INDEX 1065 }; 1066 static const unsigned cp_rb_cntl[] = { 1067 CP_RB0_CNTL, 1068 CP_RB1_CNTL, 1069 CP_RB2_CNTL, 1070 }; 1071 static const unsigned cp_rb_rptr_addr[] = { 1072 CP_RB0_RPTR_ADDR, 1073 CP_RB1_RPTR_ADDR, 1074 CP_RB2_RPTR_ADDR 1075 }; 1076 static const unsigned cp_rb_rptr_addr_hi[] = { 1077 CP_RB0_RPTR_ADDR_HI, 1078 CP_RB1_RPTR_ADDR_HI, 1079 CP_RB2_RPTR_ADDR_HI 1080 }; 1081 static const unsigned cp_rb_base[] = { 1082 CP_RB0_BASE, 1083 CP_RB1_BASE, 1084 CP_RB2_BASE 1085 }; 1086 struct radeon_ring *ring; 1087 int i, r; 1088 1089 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1090 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1091 SOFT_RESET_PA | 1092 SOFT_RESET_SH | 1093 SOFT_RESET_VGT | 1094 SOFT_RESET_SPI | 1095 SOFT_RESET_SX)); 1096 RREG32(GRBM_SOFT_RESET); 1097 DRM_MDELAY(15); 1098 WREG32(GRBM_SOFT_RESET, 0); 1099 RREG32(GRBM_SOFT_RESET); 1100 1101 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1102 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1103 1104 /* Set the write pointer delay */ 1105 WREG32(CP_RB_WPTR_DELAY, 0); 1106 1107 WREG32(CP_DEBUG, (1 << 27)); 1108 1109 /* set the wb address whether it's enabled or not */ 1110 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1111 WREG32(SCRATCH_UMSK, 0xff); 1112 1113 for (i = 0; i < 3; ++i) { 1114 uint32_t rb_cntl; 1115 uint64_t addr; 1116 1117 /* Set ring buffer size */ 1118 ring = &rdev->ring[ridx[i]]; 1119 rb_cntl = drm_order(ring->ring_size / 8); 1120 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1121 #ifdef __BIG_ENDIAN 1122 rb_cntl |= BUF_SWAP_32BIT; 1123 #endif 1124 WREG32(cp_rb_cntl[i], rb_cntl); 1125 1126 /* set the wb address whether it's enabled or not */ 1127 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; 1128 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); 1129 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); 1130 } 1131 1132 /* set the rb base addr, this causes an internal reset of ALL rings */ 1133 for (i = 0; i < 3; ++i) { 1134 ring = &rdev->ring[ridx[i]]; 1135 WREG32(cp_rb_base[i], ring->gpu_addr >> 8); 1136 } 1137 1138 for (i = 0; i < 3; ++i) { 1139 /* Initialize the ring buffer's read and write pointers */ 1140 ring = &rdev->ring[ridx[i]]; 1141 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); 1142 1143 ring->rptr = ring->wptr = 0; 1144 WREG32(ring->rptr_reg, ring->rptr); 1145 WREG32(ring->wptr_reg, ring->wptr); 1146 1147 DRM_MDELAY(1); 1148 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); 1149 } 1150 1151 /* start the rings */ 1152 cayman_cp_start(rdev); 1153 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1154 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1155 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1156 /* this only test cp0 */ 1157 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1158 if (r) { 1159 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1160 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1161 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1162 return r; 1163 } 1164 1165 return 0; 1166 } 1167 1168 /* 1169 * DMA 1170 * Starting with R600, the GPU has an asynchronous 1171 * DMA engine. The programming model is very similar 1172 * to the 3D engine (ring buffer, IBs, etc.), but the 1173 * DMA controller has it's own packet format that is 1174 * different form the PM4 format used by the 3D engine. 1175 * It supports copying data, writing embedded data, 1176 * solid fills, and a number of other things. It also 1177 * has support for tiling/detiling of buffers. 1178 * Cayman and newer support two asynchronous DMA engines. 1179 */ 1180 /** 1181 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine 1182 * 1183 * @rdev: radeon_device pointer 1184 * @ib: IB object to schedule 1185 * 1186 * Schedule an IB in the DMA ring (cayman-SI). 1187 */ 1188 void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 1189 struct radeon_ib *ib) 1190 { 1191 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1192 1193 if (rdev->wb.enabled) { 1194 u32 next_rptr = ring->wptr + 4; 1195 while ((next_rptr & 7) != 5) 1196 next_rptr++; 1197 next_rptr += 3; 1198 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 1199 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 1200 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 1201 radeon_ring_write(ring, next_rptr); 1202 } 1203 1204 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 1205 * Pad as necessary with NOPs. 1206 */ 1207 while ((ring->wptr & 7) != 5) 1208 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1209 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); 1210 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 1211 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 1212 1213 } 1214 1215 /** 1216 * cayman_dma_stop - stop the async dma engines 1217 * 1218 * @rdev: radeon_device pointer 1219 * 1220 * Stop the async dma engines (cayman-SI). 1221 */ 1222 void cayman_dma_stop(struct radeon_device *rdev) 1223 { 1224 u32 rb_cntl; 1225 1226 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1227 1228 /* dma0 */ 1229 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1230 rb_cntl &= ~DMA_RB_ENABLE; 1231 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); 1232 1233 /* dma1 */ 1234 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1235 rb_cntl &= ~DMA_RB_ENABLE; 1236 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); 1237 1238 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 1239 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; 1240 } 1241 1242 /** 1243 * cayman_dma_resume - setup and start the async dma engines 1244 * 1245 * @rdev: radeon_device pointer 1246 * 1247 * Set up the DMA ring buffers and enable them. (cayman-SI). 1248 * Returns 0 for success, error for failure. 1249 */ 1250 int cayman_dma_resume(struct radeon_device *rdev) 1251 { 1252 struct radeon_ring *ring; 1253 u32 rb_cntl, dma_cntl, ib_cntl; 1254 u32 rb_bufsz; 1255 u32 reg_offset, wb_offset; 1256 int i, r; 1257 1258 /* Reset dma */ 1259 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1260 RREG32(SRBM_SOFT_RESET); 1261 DRM_UDELAY(50); 1262 WREG32(SRBM_SOFT_RESET, 0); 1263 1264 for (i = 0; i < 2; i++) { 1265 if (i == 0) { 1266 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1267 reg_offset = DMA0_REGISTER_OFFSET; 1268 wb_offset = R600_WB_DMA_RPTR_OFFSET; 1269 } else { 1270 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1271 reg_offset = DMA1_REGISTER_OFFSET; 1272 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; 1273 } 1274 1275 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); 1276 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 1277 1278 /* Set ring buffer size in dwords */ 1279 rb_bufsz = drm_order(ring->ring_size / 4); 1280 rb_cntl = rb_bufsz << 1; 1281 #ifdef __BIG_ENDIAN 1282 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 1283 #endif 1284 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); 1285 1286 /* Initialize the ring buffer's read and write pointers */ 1287 WREG32(DMA_RB_RPTR + reg_offset, 0); 1288 WREG32(DMA_RB_WPTR + reg_offset, 0); 1289 1290 /* set the wb address whether it's enabled or not */ 1291 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, 1292 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); 1293 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, 1294 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 1295 1296 if (rdev->wb.enabled) 1297 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 1298 1299 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); 1300 1301 /* enable DMA IBs */ 1302 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 1303 #ifdef __BIG_ENDIAN 1304 ib_cntl |= DMA_IB_SWAP_ENABLE; 1305 #endif 1306 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); 1307 1308 dma_cntl = RREG32(DMA_CNTL + reg_offset); 1309 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 1310 WREG32(DMA_CNTL + reg_offset, dma_cntl); 1311 1312 ring->wptr = 0; 1313 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); 1314 1315 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; 1316 1317 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); 1318 1319 ring->ready = true; 1320 1321 r = radeon_ring_test(rdev, ring->idx, ring); 1322 if (r) { 1323 ring->ready = false; 1324 return r; 1325 } 1326 } 1327 1328 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * cayman_dma_fini - tear down the async dma engines 1335 * 1336 * @rdev: radeon_device pointer 1337 * 1338 * Stop the async dma engines and free the rings (cayman-SI). 1339 */ 1340 void cayman_dma_fini(struct radeon_device *rdev) 1341 { 1342 cayman_dma_stop(rdev); 1343 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 1344 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 1345 } 1346 1347 static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) 1348 { 1349 u32 reset_mask = 0; 1350 u32 tmp; 1351 1352 /* GRBM_STATUS */ 1353 tmp = RREG32(GRBM_STATUS); 1354 if (tmp & (PA_BUSY | SC_BUSY | 1355 SH_BUSY | SX_BUSY | 1356 TA_BUSY | VGT_BUSY | 1357 DB_BUSY | CB_BUSY | 1358 GDS_BUSY | SPI_BUSY | 1359 IA_BUSY | IA_BUSY_NO_DMA)) 1360 reset_mask |= RADEON_RESET_GFX; 1361 1362 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING | 1363 CP_BUSY | CP_COHERENCY_BUSY)) 1364 reset_mask |= RADEON_RESET_CP; 1365 1366 if (tmp & GRBM_EE_BUSY) 1367 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1368 1369 /* DMA_STATUS_REG 0 */ 1370 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 1371 if (!(tmp & DMA_IDLE)) 1372 reset_mask |= RADEON_RESET_DMA; 1373 1374 /* DMA_STATUS_REG 1 */ 1375 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 1376 if (!(tmp & DMA_IDLE)) 1377 reset_mask |= RADEON_RESET_DMA1; 1378 1379 /* SRBM_STATUS2 */ 1380 tmp = RREG32(SRBM_STATUS2); 1381 if (tmp & DMA_BUSY) 1382 reset_mask |= RADEON_RESET_DMA; 1383 1384 if (tmp & DMA1_BUSY) 1385 reset_mask |= RADEON_RESET_DMA1; 1386 1387 /* SRBM_STATUS */ 1388 tmp = RREG32(SRBM_STATUS); 1389 if (tmp & (RLC_RQ_PENDING | RLC_BUSY)) 1390 reset_mask |= RADEON_RESET_RLC; 1391 1392 if (tmp & IH_BUSY) 1393 reset_mask |= RADEON_RESET_IH; 1394 1395 if (tmp & SEM_BUSY) 1396 reset_mask |= RADEON_RESET_SEM; 1397 1398 if (tmp & GRBM_RQ_PENDING) 1399 reset_mask |= RADEON_RESET_GRBM; 1400 1401 if (tmp & VMC_BUSY) 1402 reset_mask |= RADEON_RESET_VMC; 1403 1404 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY | 1405 MCC_BUSY | MCD_BUSY)) 1406 reset_mask |= RADEON_RESET_MC; 1407 1408 if (evergreen_is_display_hung(rdev)) 1409 reset_mask |= RADEON_RESET_DISPLAY; 1410 1411 /* VM_L2_STATUS */ 1412 tmp = RREG32(VM_L2_STATUS); 1413 if (tmp & L2_BUSY) 1414 reset_mask |= RADEON_RESET_VMC; 1415 1416 /* Skip MC reset as it's mostly likely not hung, just busy */ 1417 if (reset_mask & RADEON_RESET_MC) { 1418 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1419 reset_mask &= ~RADEON_RESET_MC; 1420 } 1421 1422 return reset_mask; 1423 } 1424 1425 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1426 { 1427 struct evergreen_mc_save save; 1428 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1429 u32 tmp; 1430 1431 if (reset_mask == 0) 1432 return; 1433 1434 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1435 1436 evergreen_print_gpu_status_regs(rdev); 1437 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1438 RREG32(0x14F8)); 1439 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1440 RREG32(0x14D8)); 1441 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1442 RREG32(0x14FC)); 1443 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1444 RREG32(0x14DC)); 1445 1446 /* Disable CP parsing/prefetching */ 1447 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1448 1449 if (reset_mask & RADEON_RESET_DMA) { 1450 /* dma0 */ 1451 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1452 tmp &= ~DMA_RB_ENABLE; 1453 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); 1454 } 1455 1456 if (reset_mask & RADEON_RESET_DMA1) { 1457 /* dma1 */ 1458 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1459 tmp &= ~DMA_RB_ENABLE; 1460 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); 1461 } 1462 1463 DRM_UDELAY(50); 1464 1465 evergreen_mc_stop(rdev, &save); 1466 if (evergreen_mc_wait_for_idle(rdev)) { 1467 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1468 } 1469 1470 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1471 grbm_soft_reset = SOFT_RESET_CB | 1472 SOFT_RESET_DB | 1473 SOFT_RESET_GDS | 1474 SOFT_RESET_PA | 1475 SOFT_RESET_SC | 1476 SOFT_RESET_SPI | 1477 SOFT_RESET_SH | 1478 SOFT_RESET_SX | 1479 SOFT_RESET_TC | 1480 SOFT_RESET_TA | 1481 SOFT_RESET_VGT | 1482 SOFT_RESET_IA; 1483 } 1484 1485 if (reset_mask & RADEON_RESET_CP) { 1486 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; 1487 1488 srbm_soft_reset |= SOFT_RESET_GRBM; 1489 } 1490 1491 if (reset_mask & RADEON_RESET_DMA) 1492 srbm_soft_reset |= SOFT_RESET_DMA; 1493 1494 if (reset_mask & RADEON_RESET_DMA1) 1495 srbm_soft_reset |= SOFT_RESET_DMA1; 1496 1497 if (reset_mask & RADEON_RESET_DISPLAY) 1498 srbm_soft_reset |= SOFT_RESET_DC; 1499 1500 if (reset_mask & RADEON_RESET_RLC) 1501 srbm_soft_reset |= SOFT_RESET_RLC; 1502 1503 if (reset_mask & RADEON_RESET_SEM) 1504 srbm_soft_reset |= SOFT_RESET_SEM; 1505 1506 if (reset_mask & RADEON_RESET_IH) 1507 srbm_soft_reset |= SOFT_RESET_IH; 1508 1509 if (reset_mask & RADEON_RESET_GRBM) 1510 srbm_soft_reset |= SOFT_RESET_GRBM; 1511 1512 if (reset_mask & RADEON_RESET_VMC) 1513 srbm_soft_reset |= SOFT_RESET_VMC; 1514 1515 if (!(rdev->flags & RADEON_IS_IGP)) { 1516 if (reset_mask & RADEON_RESET_MC) 1517 srbm_soft_reset |= SOFT_RESET_MC; 1518 } 1519 1520 if (grbm_soft_reset) { 1521 tmp = RREG32(GRBM_SOFT_RESET); 1522 tmp |= grbm_soft_reset; 1523 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 1524 WREG32(GRBM_SOFT_RESET, tmp); 1525 tmp = RREG32(GRBM_SOFT_RESET); 1526 1527 DRM_UDELAY(50); 1528 1529 tmp &= ~grbm_soft_reset; 1530 WREG32(GRBM_SOFT_RESET, tmp); 1531 tmp = RREG32(GRBM_SOFT_RESET); 1532 } 1533 1534 if (srbm_soft_reset) { 1535 tmp = RREG32(SRBM_SOFT_RESET); 1536 tmp |= srbm_soft_reset; 1537 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1538 WREG32(SRBM_SOFT_RESET, tmp); 1539 tmp = RREG32(SRBM_SOFT_RESET); 1540 1541 DRM_UDELAY(50); 1542 1543 tmp &= ~srbm_soft_reset; 1544 WREG32(SRBM_SOFT_RESET, tmp); 1545 tmp = RREG32(SRBM_SOFT_RESET); 1546 } 1547 1548 /* Wait a little for things to settle down */ 1549 DRM_UDELAY(50); 1550 1551 evergreen_mc_resume(rdev, &save); 1552 DRM_UDELAY(50); 1553 1554 evergreen_print_gpu_status_regs(rdev); 1555 } 1556 1557 int cayman_asic_reset(struct radeon_device *rdev) 1558 { 1559 u32 reset_mask; 1560 1561 reset_mask = cayman_gpu_check_soft_reset(rdev); 1562 1563 if (reset_mask) 1564 r600_set_bios_scratch_engine_hung(rdev, true); 1565 1566 cayman_gpu_soft_reset(rdev, reset_mask); 1567 1568 reset_mask = cayman_gpu_check_soft_reset(rdev); 1569 1570 if (!reset_mask) 1571 r600_set_bios_scratch_engine_hung(rdev, false); 1572 1573 return 0; 1574 } 1575 1576 /** 1577 * cayman_gfx_is_lockup - Check if the GFX engine is locked up 1578 * 1579 * @rdev: radeon_device pointer 1580 * @ring: radeon_ring structure holding ring information 1581 * 1582 * Check if the GFX engine is locked up. 1583 * Returns true if the engine appears to be locked up, false if not. 1584 */ 1585 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1586 { 1587 u32 reset_mask = cayman_gpu_check_soft_reset(rdev); 1588 1589 if (!(reset_mask & (RADEON_RESET_GFX | 1590 RADEON_RESET_COMPUTE | 1591 RADEON_RESET_CP))) { 1592 radeon_ring_lockup_update(ring); 1593 return false; 1594 } 1595 /* force CP activities */ 1596 radeon_ring_force_activity(rdev, ring); 1597 return radeon_ring_test_lockup(rdev, ring); 1598 } 1599 1600 /** 1601 * cayman_dma_is_lockup - Check if the DMA engine is locked up 1602 * 1603 * @rdev: radeon_device pointer 1604 * @ring: radeon_ring structure holding ring information 1605 * 1606 * Check if the async DMA engine is locked up. 1607 * Returns true if the engine appears to be locked up, false if not. 1608 */ 1609 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1610 { 1611 u32 reset_mask = cayman_gpu_check_soft_reset(rdev); 1612 u32 mask; 1613 1614 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 1615 mask = RADEON_RESET_DMA; 1616 else 1617 mask = RADEON_RESET_DMA1; 1618 1619 if (!(reset_mask & mask)) { 1620 radeon_ring_lockup_update(ring); 1621 return false; 1622 } 1623 /* force ring activities */ 1624 radeon_ring_force_activity(rdev, ring); 1625 return radeon_ring_test_lockup(rdev, ring); 1626 } 1627 1628 static int cayman_startup(struct radeon_device *rdev) 1629 { 1630 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1631 int r; 1632 1633 /* enable pcie gen2 link */ 1634 evergreen_pcie_gen2_enable(rdev); 1635 1636 if (rdev->flags & RADEON_IS_IGP) { 1637 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1638 r = ni_init_microcode(rdev); 1639 if (r) { 1640 DRM_ERROR("Failed to load firmware!\n"); 1641 return r; 1642 } 1643 } 1644 } else { 1645 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1646 r = ni_init_microcode(rdev); 1647 if (r) { 1648 DRM_ERROR("Failed to load firmware!\n"); 1649 return r; 1650 } 1651 } 1652 1653 r = ni_mc_load_microcode(rdev); 1654 if (r) { 1655 DRM_ERROR("Failed to load MC firmware!\n"); 1656 return r; 1657 } 1658 } 1659 1660 r = r600_vram_scratch_init(rdev); 1661 if (r) 1662 return r; 1663 1664 evergreen_mc_program(rdev); 1665 r = cayman_pcie_gart_enable(rdev); 1666 if (r) 1667 return r; 1668 cayman_gpu_init(rdev); 1669 1670 r = evergreen_blit_init(rdev); 1671 if (r) { 1672 r600_blit_fini(rdev); 1673 rdev->asic->copy.copy = NULL; 1674 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1675 } 1676 1677 /* allocate rlc buffers */ 1678 if (rdev->flags & RADEON_IS_IGP) { 1679 r = si_rlc_init(rdev); 1680 if (r) { 1681 DRM_ERROR("Failed to init rlc BOs!\n"); 1682 return r; 1683 } 1684 } 1685 1686 /* allocate wb buffer */ 1687 r = radeon_wb_init(rdev); 1688 if (r) 1689 return r; 1690 1691 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1692 if (r) { 1693 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1694 return r; 1695 } 1696 1697 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 1698 if (r) { 1699 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1700 return r; 1701 } 1702 1703 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 1704 if (r) { 1705 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1706 return r; 1707 } 1708 1709 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 1710 if (r) { 1711 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1712 return r; 1713 } 1714 1715 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 1716 if (r) { 1717 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1718 return r; 1719 } 1720 1721 /* Enable IRQ */ 1722 r = r600_irq_init(rdev); 1723 if (r) { 1724 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1725 radeon_irq_kms_fini(rdev); 1726 return r; 1727 } 1728 evergreen_irq_set(rdev); 1729 1730 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1731 CP_RB0_RPTR, CP_RB0_WPTR, 1732 0, 0xfffff, RADEON_CP_PACKET2); 1733 if (r) 1734 return r; 1735 1736 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1737 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1738 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 1739 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 1740 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1741 if (r) 1742 return r; 1743 1744 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1745 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 1746 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 1747 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 1748 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1749 if (r) 1750 return r; 1751 1752 r = cayman_cp_load_microcode(rdev); 1753 if (r) 1754 return r; 1755 r = cayman_cp_resume(rdev); 1756 if (r) 1757 return r; 1758 1759 r = cayman_dma_resume(rdev); 1760 if (r) 1761 return r; 1762 1763 r = radeon_ib_pool_init(rdev); 1764 if (r) { 1765 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1766 return r; 1767 } 1768 1769 r = radeon_vm_manager_init(rdev); 1770 if (r) { 1771 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 1772 return r; 1773 } 1774 1775 r = r600_audio_init(rdev); 1776 if (r) 1777 return r; 1778 1779 return 0; 1780 } 1781 1782 int cayman_resume(struct radeon_device *rdev) 1783 { 1784 int r; 1785 1786 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1787 * posting will perform necessary task to bring back GPU into good 1788 * shape. 1789 */ 1790 /* post card */ 1791 atom_asic_init(rdev->mode_info.atom_context); 1792 1793 rdev->accel_working = true; 1794 r = cayman_startup(rdev); 1795 if (r) { 1796 DRM_ERROR("cayman startup failed on resume\n"); 1797 rdev->accel_working = false; 1798 return r; 1799 } 1800 return r; 1801 } 1802 1803 int cayman_suspend(struct radeon_device *rdev) 1804 { 1805 r600_audio_fini(rdev); 1806 radeon_vm_manager_fini(rdev); 1807 cayman_cp_enable(rdev, false); 1808 cayman_dma_stop(rdev); 1809 evergreen_irq_suspend(rdev); 1810 radeon_wb_disable(rdev); 1811 cayman_pcie_gart_disable(rdev); 1812 return 0; 1813 } 1814 1815 /* Plan is to move initialization in that function and use 1816 * helper function so that radeon_device_init pretty much 1817 * do nothing more than calling asic specific function. This 1818 * should also allow to remove a bunch of callback function 1819 * like vram_info. 1820 */ 1821 int cayman_init(struct radeon_device *rdev) 1822 { 1823 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1824 int r; 1825 1826 /* Read BIOS */ 1827 if (!radeon_get_bios(rdev)) { 1828 if (ASIC_IS_AVIVO(rdev)) 1829 return -EINVAL; 1830 } 1831 /* Must be an ATOMBIOS */ 1832 if (!rdev->is_atom_bios) { 1833 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1834 return -EINVAL; 1835 } 1836 r = radeon_atombios_init(rdev); 1837 if (r) 1838 return r; 1839 1840 /* Post card if necessary */ 1841 if (!radeon_card_posted(rdev)) { 1842 if (!rdev->bios) { 1843 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1844 return -EINVAL; 1845 } 1846 DRM_INFO("GPU not posted. posting now...\n"); 1847 atom_asic_init(rdev->mode_info.atom_context); 1848 } 1849 /* Initialize scratch registers */ 1850 r600_scratch_init(rdev); 1851 /* Initialize surface registers */ 1852 radeon_surface_init(rdev); 1853 /* Initialize clocks */ 1854 radeon_get_clock_info(rdev->ddev); 1855 /* Fence driver */ 1856 r = radeon_fence_driver_init(rdev); 1857 if (r) 1858 return r; 1859 /* initialize memory controller */ 1860 r = evergreen_mc_init(rdev); 1861 if (r) 1862 return r; 1863 /* Memory manager */ 1864 r = radeon_bo_init(rdev); 1865 if (r) 1866 return r; 1867 1868 r = radeon_irq_kms_init(rdev); 1869 if (r) 1870 return r; 1871 1872 ring->ring_obj = NULL; 1873 r600_ring_init(rdev, ring, 1024 * 1024); 1874 1875 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1876 ring->ring_obj = NULL; 1877 r600_ring_init(rdev, ring, 64 * 1024); 1878 1879 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1880 ring->ring_obj = NULL; 1881 r600_ring_init(rdev, ring, 64 * 1024); 1882 1883 rdev->ih.ring_obj = NULL; 1884 r600_ih_ring_init(rdev, 64 * 1024); 1885 1886 r = r600_pcie_gart_init(rdev); 1887 if (r) 1888 return r; 1889 1890 rdev->accel_working = true; 1891 r = cayman_startup(rdev); 1892 if (r) { 1893 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1894 cayman_cp_fini(rdev); 1895 cayman_dma_fini(rdev); 1896 r600_irq_fini(rdev); 1897 if (rdev->flags & RADEON_IS_IGP) 1898 si_rlc_fini(rdev); 1899 radeon_wb_fini(rdev); 1900 radeon_ib_pool_fini(rdev); 1901 radeon_vm_manager_fini(rdev); 1902 radeon_irq_kms_fini(rdev); 1903 cayman_pcie_gart_fini(rdev); 1904 rdev->accel_working = false; 1905 } 1906 1907 /* Don't start up if the MC ucode is missing. 1908 * The default clocks and voltages before the MC ucode 1909 * is loaded are not suffient for advanced operations. 1910 * 1911 * We can skip this check for TN, because there is no MC 1912 * ucode. 1913 */ 1914 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 1915 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1916 return -EINVAL; 1917 } 1918 1919 return 0; 1920 } 1921 1922 void cayman_fini(struct radeon_device *rdev) 1923 { 1924 r600_blit_fini(rdev); 1925 cayman_cp_fini(rdev); 1926 cayman_dma_fini(rdev); 1927 r600_irq_fini(rdev); 1928 if (rdev->flags & RADEON_IS_IGP) 1929 si_rlc_fini(rdev); 1930 radeon_wb_fini(rdev); 1931 radeon_vm_manager_fini(rdev); 1932 radeon_ib_pool_fini(rdev); 1933 radeon_irq_kms_fini(rdev); 1934 cayman_pcie_gart_fini(rdev); 1935 r600_vram_scratch_fini(rdev); 1936 radeon_gem_fini(rdev); 1937 radeon_fence_driver_fini(rdev); 1938 radeon_bo_fini(rdev); 1939 radeon_atombios_fini(rdev); 1940 ni_fini_microcode(rdev); 1941 drm_free(rdev->bios, M_DRM); 1942 rdev->bios = NULL; 1943 } 1944 1945 /* 1946 * vm 1947 */ 1948 int cayman_vm_init(struct radeon_device *rdev) 1949 { 1950 /* number of VMs */ 1951 rdev->vm_manager.nvm = 8; 1952 /* base offset of vram pages */ 1953 if (rdev->flags & RADEON_IS_IGP) { 1954 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 1955 tmp <<= 22; 1956 rdev->vm_manager.vram_base_offset = tmp; 1957 } else 1958 rdev->vm_manager.vram_base_offset = 0; 1959 return 0; 1960 } 1961 1962 void cayman_vm_fini(struct radeon_device *rdev) 1963 { 1964 } 1965 1966 #define R600_ENTRY_VALID (1 << 0) 1967 #define R600_PTE_SYSTEM (1 << 1) 1968 #define R600_PTE_SNOOPED (1 << 2) 1969 #define R600_PTE_READABLE (1 << 5) 1970 #define R600_PTE_WRITEABLE (1 << 6) 1971 1972 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) 1973 { 1974 uint32_t r600_flags = 0; 1975 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; 1976 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1977 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1978 if (flags & RADEON_VM_PAGE_SYSTEM) { 1979 r600_flags |= R600_PTE_SYSTEM; 1980 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 1981 } 1982 return r600_flags; 1983 } 1984 1985 /** 1986 * cayman_vm_set_page - update the page tables using the CP 1987 * 1988 * @rdev: radeon_device pointer 1989 * @ib: indirect buffer to fill with commands 1990 * @pe: addr of the page entry 1991 * @addr: dst addr to write into pe 1992 * @count: number of page entries to update 1993 * @incr: increase next addr by incr bytes 1994 * @flags: access flags 1995 * 1996 * Update the page tables using the CP (cayman/TN). 1997 */ 1998 void cayman_vm_set_page(struct radeon_device *rdev, 1999 struct radeon_ib *ib, 2000 uint64_t pe, 2001 uint64_t addr, unsigned count, 2002 uint32_t incr, uint32_t flags) 2003 { 2004 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2005 uint64_t value; 2006 unsigned ndw; 2007 2008 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { 2009 while (count) { 2010 ndw = 1 + count * 2; 2011 if (ndw > 0x3FFF) 2012 ndw = 0x3FFF; 2013 2014 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw); 2015 ib->ptr[ib->length_dw++] = pe; 2016 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 2017 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 2018 if (flags & RADEON_VM_PAGE_SYSTEM) { 2019 value = radeon_vm_map_gart(rdev, addr); 2020 value &= 0xFFFFFFFFFFFFF000ULL; 2021 } else if (flags & RADEON_VM_PAGE_VALID) { 2022 value = addr; 2023 } else { 2024 value = 0; 2025 } 2026 addr += incr; 2027 value |= r600_flags; 2028 ib->ptr[ib->length_dw++] = value; 2029 ib->ptr[ib->length_dw++] = upper_32_bits(value); 2030 } 2031 } 2032 } else { 2033 while (count) { 2034 ndw = count * 2; 2035 if (ndw > 0xFFFFE) 2036 ndw = 0xFFFFE; 2037 2038 /* for non-physically contiguous pages (system) */ 2039 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); 2040 ib->ptr[ib->length_dw++] = pe; 2041 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 2042 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 2043 if (flags & RADEON_VM_PAGE_SYSTEM) { 2044 value = radeon_vm_map_gart(rdev, addr); 2045 value &= 0xFFFFFFFFFFFFF000ULL; 2046 } else if (flags & RADEON_VM_PAGE_VALID) { 2047 value = addr; 2048 } else { 2049 value = 0; 2050 } 2051 addr += incr; 2052 value |= r600_flags; 2053 ib->ptr[ib->length_dw++] = value; 2054 ib->ptr[ib->length_dw++] = upper_32_bits(value); 2055 } 2056 } 2057 while (ib->length_dw & 0x7) 2058 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); 2059 } 2060 } 2061 2062 /** 2063 * cayman_vm_flush - vm flush using the CP 2064 * 2065 * @rdev: radeon_device pointer 2066 * 2067 * Update the page table base and flush the VM TLB 2068 * using the CP (cayman-si). 2069 */ 2070 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 2071 { 2072 struct radeon_ring *ring = &rdev->ring[ridx]; 2073 2074 if (vm == NULL) 2075 return; 2076 2077 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 2078 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 2079 2080 /* flush hdp cache */ 2081 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 2082 radeon_ring_write(ring, 0x1); 2083 2084 /* bits 0-7 are the VM contexts0-7 */ 2085 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 2086 radeon_ring_write(ring, 1 << vm->id); 2087 2088 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 2089 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2090 radeon_ring_write(ring, 0x0); 2091 } 2092 2093 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 2094 { 2095 struct radeon_ring *ring = &rdev->ring[ridx]; 2096 2097 if (vm == NULL) 2098 return; 2099 2100 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 2101 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 2102 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 2103 2104 /* flush hdp cache */ 2105 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 2106 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 2107 radeon_ring_write(ring, 1); 2108 2109 /* bits 0-7 are the VM contexts0-7 */ 2110 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 2111 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 2112 radeon_ring_write(ring, 1 << vm->id); 2113 } 2114 2115