1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 * $FreeBSD: head/sys/dev/drm2/radeon/ni.c 254885 2013-08-25 19:37:15Z dumbbell $ 24 */ 25 26 #include <drm/drmP.h> 27 #include "radeon.h" 28 #include "radeon_asic.h" 29 #include <uapi_drm/radeon_drm.h> 30 #include "nid.h" 31 #include "atom.h" 32 #include "ni_reg.h" 33 #include "cayman_blit_shaders.h" 34 35 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 36 37 #define EVERGREEN_PFP_UCODE_SIZE 1120 38 #define EVERGREEN_PM4_UCODE_SIZE 1376 39 #define EVERGREEN_RLC_UCODE_SIZE 768 40 #define BTC_MC_UCODE_SIZE 6024 41 42 #define CAYMAN_PFP_UCODE_SIZE 2176 43 #define CAYMAN_PM4_UCODE_SIZE 2176 44 #define CAYMAN_RLC_UCODE_SIZE 1024 45 #define CAYMAN_MC_UCODE_SIZE 6037 46 47 #define ARUBA_RLC_UCODE_SIZE 1536 48 49 #define BTC_IO_MC_REGS_SIZE 29 50 51 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 52 {0x00000077, 0xff010100}, 53 {0x00000078, 0x00000000}, 54 {0x00000079, 0x00001434}, 55 {0x0000007a, 0xcc08ec08}, 56 {0x0000007b, 0x00040000}, 57 {0x0000007c, 0x000080c0}, 58 {0x0000007d, 0x09000000}, 59 {0x0000007e, 0x00210404}, 60 {0x00000081, 0x08a8e800}, 61 {0x00000082, 0x00030444}, 62 {0x00000083, 0x00000000}, 63 {0x00000085, 0x00000001}, 64 {0x00000086, 0x00000002}, 65 {0x00000087, 0x48490000}, 66 {0x00000088, 0x20244647}, 67 {0x00000089, 0x00000005}, 68 {0x0000008b, 0x66030000}, 69 {0x0000008c, 0x00006603}, 70 {0x0000008d, 0x00000100}, 71 {0x0000008f, 0x00001c0a}, 72 {0x00000090, 0xff000001}, 73 {0x00000094, 0x00101101}, 74 {0x00000095, 0x00000fff}, 75 {0x00000096, 0x00116fff}, 76 {0x00000097, 0x60010000}, 77 {0x00000098, 0x10010000}, 78 {0x00000099, 0x00006000}, 79 {0x0000009a, 0x00001000}, 80 {0x0000009f, 0x00946a00} 81 }; 82 83 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 84 {0x00000077, 0xff010100}, 85 {0x00000078, 0x00000000}, 86 {0x00000079, 0x00001434}, 87 {0x0000007a, 0xcc08ec08}, 88 {0x0000007b, 0x00040000}, 89 {0x0000007c, 0x000080c0}, 90 {0x0000007d, 0x09000000}, 91 {0x0000007e, 0x00210404}, 92 {0x00000081, 0x08a8e800}, 93 {0x00000082, 0x00030444}, 94 {0x00000083, 0x00000000}, 95 {0x00000085, 0x00000001}, 96 {0x00000086, 0x00000002}, 97 {0x00000087, 0x48490000}, 98 {0x00000088, 0x20244647}, 99 {0x00000089, 0x00000005}, 100 {0x0000008b, 0x66030000}, 101 {0x0000008c, 0x00006603}, 102 {0x0000008d, 0x00000100}, 103 {0x0000008f, 0x00001c0a}, 104 {0x00000090, 0xff000001}, 105 {0x00000094, 0x00101101}, 106 {0x00000095, 0x00000fff}, 107 {0x00000096, 0x00116fff}, 108 {0x00000097, 0x60010000}, 109 {0x00000098, 0x10010000}, 110 {0x00000099, 0x00006000}, 111 {0x0000009a, 0x00001000}, 112 {0x0000009f, 0x00936a00} 113 }; 114 115 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 116 {0x00000077, 0xff010100}, 117 {0x00000078, 0x00000000}, 118 {0x00000079, 0x00001434}, 119 {0x0000007a, 0xcc08ec08}, 120 {0x0000007b, 0x00040000}, 121 {0x0000007c, 0x000080c0}, 122 {0x0000007d, 0x09000000}, 123 {0x0000007e, 0x00210404}, 124 {0x00000081, 0x08a8e800}, 125 {0x00000082, 0x00030444}, 126 {0x00000083, 0x00000000}, 127 {0x00000085, 0x00000001}, 128 {0x00000086, 0x00000002}, 129 {0x00000087, 0x48490000}, 130 {0x00000088, 0x20244647}, 131 {0x00000089, 0x00000005}, 132 {0x0000008b, 0x66030000}, 133 {0x0000008c, 0x00006603}, 134 {0x0000008d, 0x00000100}, 135 {0x0000008f, 0x00001c0a}, 136 {0x00000090, 0xff000001}, 137 {0x00000094, 0x00101101}, 138 {0x00000095, 0x00000fff}, 139 {0x00000096, 0x00116fff}, 140 {0x00000097, 0x60010000}, 141 {0x00000098, 0x10010000}, 142 {0x00000099, 0x00006000}, 143 {0x0000009a, 0x00001000}, 144 {0x0000009f, 0x00916a00} 145 }; 146 147 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 148 {0x00000077, 0xff010100}, 149 {0x00000078, 0x00000000}, 150 {0x00000079, 0x00001434}, 151 {0x0000007a, 0xcc08ec08}, 152 {0x0000007b, 0x00040000}, 153 {0x0000007c, 0x000080c0}, 154 {0x0000007d, 0x09000000}, 155 {0x0000007e, 0x00210404}, 156 {0x00000081, 0x08a8e800}, 157 {0x00000082, 0x00030444}, 158 {0x00000083, 0x00000000}, 159 {0x00000085, 0x00000001}, 160 {0x00000086, 0x00000002}, 161 {0x00000087, 0x48490000}, 162 {0x00000088, 0x20244647}, 163 {0x00000089, 0x00000005}, 164 {0x0000008b, 0x66030000}, 165 {0x0000008c, 0x00006603}, 166 {0x0000008d, 0x00000100}, 167 {0x0000008f, 0x00001c0a}, 168 {0x00000090, 0xff000001}, 169 {0x00000094, 0x00101101}, 170 {0x00000095, 0x00000fff}, 171 {0x00000096, 0x00116fff}, 172 {0x00000097, 0x60010000}, 173 {0x00000098, 0x10010000}, 174 {0x00000099, 0x00006000}, 175 {0x0000009a, 0x00001000}, 176 {0x0000009f, 0x00976b00} 177 }; 178 179 int ni_mc_load_microcode(struct radeon_device *rdev) 180 { 181 const __be32 *fw_data; 182 u32 mem_type, running, blackout = 0; 183 u32 *io_mc_regs; 184 int i, ucode_size, regs_size; 185 186 if (!rdev->mc_fw) 187 return -EINVAL; 188 189 switch (rdev->family) { 190 case CHIP_BARTS: 191 io_mc_regs = (u32 *)&barts_io_mc_regs; 192 ucode_size = BTC_MC_UCODE_SIZE; 193 regs_size = BTC_IO_MC_REGS_SIZE; 194 break; 195 case CHIP_TURKS: 196 io_mc_regs = (u32 *)&turks_io_mc_regs; 197 ucode_size = BTC_MC_UCODE_SIZE; 198 regs_size = BTC_IO_MC_REGS_SIZE; 199 break; 200 case CHIP_CAICOS: 201 default: 202 io_mc_regs = (u32 *)&caicos_io_mc_regs; 203 ucode_size = BTC_MC_UCODE_SIZE; 204 regs_size = BTC_IO_MC_REGS_SIZE; 205 break; 206 case CHIP_CAYMAN: 207 io_mc_regs = (u32 *)&cayman_io_mc_regs; 208 ucode_size = CAYMAN_MC_UCODE_SIZE; 209 regs_size = BTC_IO_MC_REGS_SIZE; 210 break; 211 } 212 213 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 214 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 215 216 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 217 if (running) { 218 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 219 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 220 } 221 222 /* reset the engine and set to writable */ 223 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 224 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 225 226 /* load mc io regs */ 227 for (i = 0; i < regs_size; i++) { 228 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 229 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 230 } 231 /* load the MC ucode */ 232 fw_data = (const __be32 *)rdev->mc_fw->data; 233 for (i = 0; i < ucode_size; i++) 234 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 235 236 /* put the engine back into the active state */ 237 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 238 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 239 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 240 241 /* wait for training to complete */ 242 for (i = 0; i < rdev->usec_timeout; i++) { 243 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 244 break; 245 DRM_UDELAY(1); 246 } 247 248 if (running) 249 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 250 } 251 252 return 0; 253 } 254 255 int ni_init_microcode(struct radeon_device *rdev) 256 { 257 const char *chip_name; 258 const char *rlc_chip_name; 259 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 260 char fw_name[30]; 261 int err; 262 263 DRM_DEBUG("\n"); 264 265 switch (rdev->family) { 266 case CHIP_BARTS: 267 chip_name = "BARTS"; 268 rlc_chip_name = "BTC"; 269 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 270 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 271 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 272 mc_req_size = BTC_MC_UCODE_SIZE * 4; 273 break; 274 case CHIP_TURKS: 275 chip_name = "TURKS"; 276 rlc_chip_name = "BTC"; 277 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 278 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 279 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 280 mc_req_size = BTC_MC_UCODE_SIZE * 4; 281 break; 282 case CHIP_CAICOS: 283 chip_name = "CAICOS"; 284 rlc_chip_name = "BTC"; 285 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 286 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 287 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 288 mc_req_size = BTC_MC_UCODE_SIZE * 4; 289 break; 290 case CHIP_CAYMAN: 291 chip_name = "CAYMAN"; 292 rlc_chip_name = "CAYMAN"; 293 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 294 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 295 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 296 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 297 break; 298 case CHIP_ARUBA: 299 chip_name = "ARUBA"; 300 rlc_chip_name = "ARUBA"; 301 /* pfp/me same size as CAYMAN */ 302 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 303 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 304 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 305 mc_req_size = 0; 306 break; 307 default: panic("%s: Unsupported family %d", __func__, rdev->family); 308 } 309 310 DRM_INFO("Loading %s Microcode\n", chip_name); 311 err = 0; 312 313 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 314 rdev->pfp_fw = firmware_get(fw_name); 315 if (rdev->pfp_fw == NULL) { 316 err = -ENOENT; 317 goto out; 318 } 319 if (rdev->pfp_fw->datasize != pfp_req_size) { 320 DRM_ERROR( 321 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 322 rdev->pfp_fw->datasize, fw_name); 323 err = -EINVAL; 324 goto out; 325 } 326 327 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 328 rdev->me_fw = firmware_get(fw_name); 329 if (rdev->me_fw == NULL) { 330 err = -ENOENT; 331 goto out; 332 } 333 if (rdev->me_fw->datasize != me_req_size) { 334 DRM_ERROR( 335 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 336 rdev->me_fw->datasize, fw_name); 337 err = -EINVAL; 338 } 339 340 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", 341 rlc_chip_name); 342 rdev->rlc_fw = firmware_get(fw_name); 343 if (rdev->rlc_fw == NULL) { 344 err = -ENOENT; 345 goto out; 346 } 347 if (rdev->rlc_fw->datasize != rlc_req_size) { 348 DRM_ERROR( 349 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 350 rdev->rlc_fw->datasize, fw_name); 351 err = -EINVAL; 352 } 353 354 /* no MC ucode on TN */ 355 if (!(rdev->flags & RADEON_IS_IGP)) { 356 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", 357 chip_name); 358 rdev->mc_fw = firmware_get(fw_name); 359 if (rdev->mc_fw == NULL) { 360 err = -ENOENT; 361 goto out; 362 } 363 if (rdev->mc_fw->datasize != mc_req_size) { 364 DRM_ERROR( 365 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 366 rdev->mc_fw->datasize, fw_name); 367 err = -EINVAL; 368 } 369 } 370 out: 371 if (err) { 372 if (err != -EINVAL) 373 DRM_ERROR( 374 "ni_cp: Failed to load firmware \"%s\"\n", 375 fw_name); 376 if (rdev->pfp_fw != NULL) { 377 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 378 rdev->pfp_fw = NULL; 379 } 380 if (rdev->me_fw != NULL) { 381 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 382 rdev->me_fw = NULL; 383 } 384 if (rdev->rlc_fw != NULL) { 385 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 386 rdev->rlc_fw = NULL; 387 } 388 if (rdev->mc_fw != NULL) { 389 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 390 rdev->mc_fw = NULL; 391 } 392 } 393 return err; 394 } 395 396 /** 397 * ni_fini_microcode - drop the firmwares image references 398 * 399 * @rdev: radeon_device pointer 400 * 401 * Drop the pfp, me, mc and rlc firmwares image references. 402 * Called at driver shutdown. 403 */ 404 void ni_fini_microcode(struct radeon_device *rdev) 405 { 406 407 if (rdev->pfp_fw != NULL) { 408 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 409 rdev->pfp_fw = NULL; 410 } 411 412 if (rdev->me_fw != NULL) { 413 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 414 rdev->me_fw = NULL; 415 } 416 417 if (rdev->rlc_fw != NULL) { 418 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 419 rdev->rlc_fw = NULL; 420 } 421 422 if (rdev->mc_fw != NULL) { 423 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 424 rdev->mc_fw = NULL; 425 } 426 } 427 428 429 /* 430 * Core functions 431 */ 432 static void cayman_gpu_init(struct radeon_device *rdev) 433 { 434 u32 gb_addr_config = 0; 435 u32 mc_shared_chmap, mc_arb_ramcfg; 436 u32 cgts_tcc_disable; 437 u32 sx_debug_1; 438 u32 smx_dc_ctl0; 439 u32 cgts_sm_ctrl_reg; 440 u32 hdp_host_path_cntl; 441 u32 tmp; 442 u32 disabled_rb_mask; 443 int i, j; 444 445 switch (rdev->family) { 446 case CHIP_CAYMAN: 447 rdev->config.cayman.max_shader_engines = 2; 448 rdev->config.cayman.max_pipes_per_simd = 4; 449 rdev->config.cayman.max_tile_pipes = 8; 450 rdev->config.cayman.max_simds_per_se = 12; 451 rdev->config.cayman.max_backends_per_se = 4; 452 rdev->config.cayman.max_texture_channel_caches = 8; 453 rdev->config.cayman.max_gprs = 256; 454 rdev->config.cayman.max_threads = 256; 455 rdev->config.cayman.max_gs_threads = 32; 456 rdev->config.cayman.max_stack_entries = 512; 457 rdev->config.cayman.sx_num_of_sets = 8; 458 rdev->config.cayman.sx_max_export_size = 256; 459 rdev->config.cayman.sx_max_export_pos_size = 64; 460 rdev->config.cayman.sx_max_export_smx_size = 192; 461 rdev->config.cayman.max_hw_contexts = 8; 462 rdev->config.cayman.sq_num_cf_insts = 2; 463 464 rdev->config.cayman.sc_prim_fifo_size = 0x100; 465 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 466 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 467 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; 468 break; 469 case CHIP_ARUBA: 470 default: 471 rdev->config.cayman.max_shader_engines = 1; 472 rdev->config.cayman.max_pipes_per_simd = 4; 473 rdev->config.cayman.max_tile_pipes = 2; 474 if ((rdev->ddev->pci_device == 0x9900) || 475 (rdev->ddev->pci_device == 0x9901) || 476 (rdev->ddev->pci_device == 0x9905) || 477 (rdev->ddev->pci_device == 0x9906) || 478 (rdev->ddev->pci_device == 0x9907) || 479 (rdev->ddev->pci_device == 0x9908) || 480 (rdev->ddev->pci_device == 0x9909) || 481 (rdev->ddev->pci_device == 0x9910) || 482 (rdev->ddev->pci_device == 0x9917)) { 483 rdev->config.cayman.max_simds_per_se = 6; 484 rdev->config.cayman.max_backends_per_se = 2; 485 } else if ((rdev->ddev->pci_device == 0x9903) || 486 (rdev->ddev->pci_device == 0x9904) || 487 (rdev->ddev->pci_device == 0x990A) || 488 (rdev->ddev->pci_device == 0x9913) || 489 (rdev->ddev->pci_device == 0x9918)) { 490 rdev->config.cayman.max_simds_per_se = 4; 491 rdev->config.cayman.max_backends_per_se = 2; 492 } else if ((rdev->ddev->pci_device == 0x9919) || 493 (rdev->ddev->pci_device == 0x9990) || 494 (rdev->ddev->pci_device == 0x9991) || 495 (rdev->ddev->pci_device == 0x9994) || 496 (rdev->ddev->pci_device == 0x99A0)) { 497 rdev->config.cayman.max_simds_per_se = 3; 498 rdev->config.cayman.max_backends_per_se = 1; 499 } else { 500 rdev->config.cayman.max_simds_per_se = 2; 501 rdev->config.cayman.max_backends_per_se = 1; 502 } 503 rdev->config.cayman.max_texture_channel_caches = 2; 504 rdev->config.cayman.max_gprs = 256; 505 rdev->config.cayman.max_threads = 256; 506 rdev->config.cayman.max_gs_threads = 32; 507 rdev->config.cayman.max_stack_entries = 512; 508 rdev->config.cayman.sx_num_of_sets = 8; 509 rdev->config.cayman.sx_max_export_size = 256; 510 rdev->config.cayman.sx_max_export_pos_size = 64; 511 rdev->config.cayman.sx_max_export_smx_size = 192; 512 rdev->config.cayman.max_hw_contexts = 8; 513 rdev->config.cayman.sq_num_cf_insts = 2; 514 515 rdev->config.cayman.sc_prim_fifo_size = 0x40; 516 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 517 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 518 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; 519 break; 520 } 521 522 /* Initialize HDP */ 523 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 524 WREG32((0x2c14 + j), 0x00000000); 525 WREG32((0x2c18 + j), 0x00000000); 526 WREG32((0x2c1c + j), 0x00000000); 527 WREG32((0x2c20 + j), 0x00000000); 528 WREG32((0x2c24 + j), 0x00000000); 529 } 530 531 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 532 533 evergreen_fix_pci_max_read_req_size(rdev); 534 535 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 536 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 537 538 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 539 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 540 if (rdev->config.cayman.mem_row_size_in_kb > 4) 541 rdev->config.cayman.mem_row_size_in_kb = 4; 542 /* XXX use MC settings? */ 543 rdev->config.cayman.shader_engine_tile_size = 32; 544 rdev->config.cayman.num_gpus = 1; 545 rdev->config.cayman.multi_gpu_tile_size = 64; 546 547 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 548 rdev->config.cayman.num_tile_pipes = (1 << tmp); 549 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 550 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 551 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 552 rdev->config.cayman.num_shader_engines = tmp + 1; 553 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 554 rdev->config.cayman.num_gpus = tmp + 1; 555 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 556 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 557 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 558 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 559 560 561 /* setup tiling info dword. gb_addr_config is not adequate since it does 562 * not have bank info, so create a custom tiling dword. 563 * bits 3:0 num_pipes 564 * bits 7:4 num_banks 565 * bits 11:8 group_size 566 * bits 15:12 row_size 567 */ 568 rdev->config.cayman.tile_config = 0; 569 switch (rdev->config.cayman.num_tile_pipes) { 570 case 1: 571 default: 572 rdev->config.cayman.tile_config |= (0 << 0); 573 break; 574 case 2: 575 rdev->config.cayman.tile_config |= (1 << 0); 576 break; 577 case 4: 578 rdev->config.cayman.tile_config |= (2 << 0); 579 break; 580 case 8: 581 rdev->config.cayman.tile_config |= (3 << 0); 582 break; 583 } 584 585 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 586 if (rdev->flags & RADEON_IS_IGP) 587 rdev->config.cayman.tile_config |= 1 << 4; 588 else { 589 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 590 case 0: /* four banks */ 591 rdev->config.cayman.tile_config |= 0 << 4; 592 break; 593 case 1: /* eight banks */ 594 rdev->config.cayman.tile_config |= 1 << 4; 595 break; 596 case 2: /* sixteen banks */ 597 default: 598 rdev->config.cayman.tile_config |= 2 << 4; 599 break; 600 } 601 } 602 rdev->config.cayman.tile_config |= 603 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 604 rdev->config.cayman.tile_config |= 605 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 606 607 tmp = 0; 608 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { 609 u32 rb_disable_bitmap; 610 611 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 612 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 613 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 614 tmp <<= 4; 615 tmp |= rb_disable_bitmap; 616 } 617 /* enabled rb are just the one not disabled :) */ 618 disabled_rb_mask = tmp; 619 620 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 621 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 622 623 WREG32(GB_ADDR_CONFIG, gb_addr_config); 624 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 625 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 626 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 627 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 628 629 tmp = gb_addr_config & NUM_PIPES_MASK; 630 tmp = r6xx_remap_render_backend(rdev, tmp, 631 rdev->config.cayman.max_backends_per_se * 632 rdev->config.cayman.max_shader_engines, 633 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 634 WREG32(GB_BACKEND_MAP, tmp); 635 636 cgts_tcc_disable = 0xffff0000; 637 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 638 cgts_tcc_disable &= ~(1 << (16 + i)); 639 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 640 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 641 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 642 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 643 644 /* reprogram the shader complex */ 645 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 646 for (i = 0; i < 16; i++) 647 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 648 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 649 650 /* set HW defaults for 3D engine */ 651 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 652 653 sx_debug_1 = RREG32(SX_DEBUG_1); 654 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 655 WREG32(SX_DEBUG_1, sx_debug_1); 656 657 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 658 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 659 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 660 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 661 662 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 663 664 /* need to be explicitly zero-ed */ 665 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 666 WREG32(SQ_LSTMP_RING_BASE, 0); 667 WREG32(SQ_HSTMP_RING_BASE, 0); 668 WREG32(SQ_ESTMP_RING_BASE, 0); 669 WREG32(SQ_GSTMP_RING_BASE, 0); 670 WREG32(SQ_VSTMP_RING_BASE, 0); 671 WREG32(SQ_PSTMP_RING_BASE, 0); 672 673 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 674 675 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 676 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 677 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 678 679 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 680 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 681 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 682 683 684 WREG32(VGT_NUM_INSTANCES, 1); 685 686 WREG32(CP_PERFMON_CNTL, 0); 687 688 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 689 FETCH_FIFO_HIWATER(0x4) | 690 DONE_FIFO_HIWATER(0xe0) | 691 ALU_UPDATE_FIFO_HIWATER(0x8))); 692 693 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 694 WREG32(SQ_CONFIG, (VC_ENABLE | 695 EXPORT_SRC_C | 696 GFX_PRIO(0) | 697 CS1_PRIO(0) | 698 CS2_PRIO(1))); 699 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 700 701 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 702 FORCE_EOV_MAX_REZ_CNT(255))); 703 704 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 705 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 706 707 WREG32(VGT_GS_VERTEX_REUSE, 16); 708 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 709 710 WREG32(CB_PERF_CTR0_SEL_0, 0); 711 WREG32(CB_PERF_CTR0_SEL_1, 0); 712 WREG32(CB_PERF_CTR1_SEL_0, 0); 713 WREG32(CB_PERF_CTR1_SEL_1, 0); 714 WREG32(CB_PERF_CTR2_SEL_0, 0); 715 WREG32(CB_PERF_CTR2_SEL_1, 0); 716 WREG32(CB_PERF_CTR3_SEL_0, 0); 717 WREG32(CB_PERF_CTR3_SEL_1, 0); 718 719 tmp = RREG32(HDP_MISC_CNTL); 720 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 721 WREG32(HDP_MISC_CNTL, tmp); 722 723 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 724 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 725 726 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 727 728 DRM_UDELAY(50); 729 } 730 731 /* 732 * GART 733 */ 734 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 735 { 736 /* flush hdp cache */ 737 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 738 739 /* bits 0-7 are the VM contexts0-7 */ 740 WREG32(VM_INVALIDATE_REQUEST, 1); 741 } 742 743 static int cayman_pcie_gart_enable(struct radeon_device *rdev) 744 { 745 int i, r; 746 747 if (rdev->gart.robj == NULL) { 748 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 749 return -EINVAL; 750 } 751 r = radeon_gart_table_vram_pin(rdev); 752 if (r) 753 return r; 754 radeon_gart_restore(rdev); 755 /* Setup TLB control */ 756 WREG32(MC_VM_MX_L1_TLB_CNTL, 757 (0xA << 7) | 758 ENABLE_L1_TLB | 759 ENABLE_L1_FRAGMENT_PROCESSING | 760 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 761 ENABLE_ADVANCED_DRIVER_MODEL | 762 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 763 /* Setup L2 cache */ 764 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 765 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 766 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 767 EFFECTIVE_L2_QUEUE_SIZE(7) | 768 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 769 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 770 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 771 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 772 /* setup context0 */ 773 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 774 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 775 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 776 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 777 (u32)(rdev->dummy_page.addr >> 12)); 778 WREG32(VM_CONTEXT0_CNTL2, 0); 779 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 780 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 781 782 WREG32(0x15D4, 0); 783 WREG32(0x15D8, 0); 784 WREG32(0x15DC, 0); 785 786 /* empty context1-7 */ 787 /* Assign the pt base to something valid for now; the pts used for 788 * the VMs are determined by the application and setup and assigned 789 * on the fly in the vm part of radeon_gart.c 790 */ 791 for (i = 1; i < 8; i++) { 792 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 793 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); 794 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 795 rdev->gart.table_addr >> 12); 796 } 797 798 /* enable context1-7 */ 799 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 800 (u32)(rdev->dummy_page.addr >> 12)); 801 WREG32(VM_CONTEXT1_CNTL2, 4); 802 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 803 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 804 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 805 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 806 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 807 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 808 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 809 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 810 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 811 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 812 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 813 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 814 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 815 816 cayman_pcie_gart_tlb_flush(rdev); 817 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 818 (unsigned)(rdev->mc.gtt_size >> 20), 819 (unsigned long long)rdev->gart.table_addr); 820 rdev->gart.ready = true; 821 return 0; 822 } 823 824 static void cayman_pcie_gart_disable(struct radeon_device *rdev) 825 { 826 /* Disable all tables */ 827 WREG32(VM_CONTEXT0_CNTL, 0); 828 WREG32(VM_CONTEXT1_CNTL, 0); 829 /* Setup TLB control */ 830 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 831 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 832 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 833 /* Setup L2 cache */ 834 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 835 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 836 EFFECTIVE_L2_QUEUE_SIZE(7) | 837 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 838 WREG32(VM_L2_CNTL2, 0); 839 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 840 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 841 radeon_gart_table_vram_unpin(rdev); 842 } 843 844 static void cayman_pcie_gart_fini(struct radeon_device *rdev) 845 { 846 cayman_pcie_gart_disable(rdev); 847 radeon_gart_table_vram_free(rdev); 848 radeon_gart_fini(rdev); 849 } 850 851 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 852 int ring, u32 cp_int_cntl) 853 { 854 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 855 856 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); 857 WREG32(CP_INT_CNTL, cp_int_cntl); 858 } 859 860 /* 861 * CP. 862 */ 863 void cayman_fence_ring_emit(struct radeon_device *rdev, 864 struct radeon_fence *fence) 865 { 866 struct radeon_ring *ring = &rdev->ring[fence->ring]; 867 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 868 869 /* flush read cache over gart for this vmid */ 870 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 871 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 872 radeon_ring_write(ring, 0); 873 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 874 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 875 radeon_ring_write(ring, 0xFFFFFFFF); 876 radeon_ring_write(ring, 0); 877 radeon_ring_write(ring, 10); /* poll interval */ 878 /* EVENT_WRITE_EOP - flush caches, send int */ 879 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 880 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 881 radeon_ring_write(ring, addr & 0xffffffff); 882 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 883 radeon_ring_write(ring, fence->seq); 884 radeon_ring_write(ring, 0); 885 } 886 887 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 888 { 889 struct radeon_ring *ring = &rdev->ring[ib->ring]; 890 891 /* set to DX10/11 mode */ 892 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 893 radeon_ring_write(ring, 1); 894 895 if (ring->rptr_save_reg) { 896 uint32_t next_rptr = ring->wptr + 3 + 4 + 8; 897 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 898 radeon_ring_write(ring, ((ring->rptr_save_reg - 899 PACKET3_SET_CONFIG_REG_START) >> 2)); 900 radeon_ring_write(ring, next_rptr); 901 } 902 903 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 904 radeon_ring_write(ring, 905 #ifdef __BIG_ENDIAN 906 (2 << 0) | 907 #endif 908 (ib->gpu_addr & 0xFFFFFFFC)); 909 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 910 radeon_ring_write(ring, ib->length_dw | 911 (ib->vm ? (ib->vm->id << 24) : 0)); 912 913 /* flush read cache over gart for this vmid */ 914 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 915 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 916 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); 917 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 918 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 919 radeon_ring_write(ring, 0xFFFFFFFF); 920 radeon_ring_write(ring, 0); 921 radeon_ring_write(ring, 10); /* poll interval */ 922 } 923 924 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 925 { 926 if (enable) 927 WREG32(CP_ME_CNTL, 0); 928 else { 929 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 930 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 931 WREG32(SCRATCH_UMSK, 0); 932 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 933 } 934 } 935 936 static int cayman_cp_load_microcode(struct radeon_device *rdev) 937 { 938 const __be32 *fw_data; 939 int i; 940 941 if (!rdev->me_fw || !rdev->pfp_fw) 942 return -EINVAL; 943 944 cayman_cp_enable(rdev, false); 945 946 fw_data = (const __be32 *)rdev->pfp_fw->data; 947 WREG32(CP_PFP_UCODE_ADDR, 0); 948 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 949 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 950 WREG32(CP_PFP_UCODE_ADDR, 0); 951 952 fw_data = (const __be32 *)rdev->me_fw->data; 953 WREG32(CP_ME_RAM_WADDR, 0); 954 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 955 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 956 957 WREG32(CP_PFP_UCODE_ADDR, 0); 958 WREG32(CP_ME_RAM_WADDR, 0); 959 WREG32(CP_ME_RAM_RADDR, 0); 960 return 0; 961 } 962 963 static int cayman_cp_start(struct radeon_device *rdev) 964 { 965 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 966 int r, i; 967 968 r = radeon_ring_lock(rdev, ring, 7); 969 if (r) { 970 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 971 return r; 972 } 973 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 974 radeon_ring_write(ring, 0x1); 975 radeon_ring_write(ring, 0x0); 976 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 977 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 978 radeon_ring_write(ring, 0); 979 radeon_ring_write(ring, 0); 980 radeon_ring_unlock_commit(rdev, ring); 981 982 cayman_cp_enable(rdev, true); 983 984 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 985 if (r) { 986 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 987 return r; 988 } 989 990 /* setup clear context state */ 991 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 992 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 993 994 for (i = 0; i < cayman_default_size; i++) 995 radeon_ring_write(ring, cayman_default_state[i]); 996 997 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 998 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 999 1000 /* set clear context state */ 1001 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1002 radeon_ring_write(ring, 0); 1003 1004 /* SQ_VTX_BASE_VTX_LOC */ 1005 radeon_ring_write(ring, 0xc0026f00); 1006 radeon_ring_write(ring, 0x00000000); 1007 radeon_ring_write(ring, 0x00000000); 1008 radeon_ring_write(ring, 0x00000000); 1009 1010 /* Clear consts */ 1011 radeon_ring_write(ring, 0xc0036f00); 1012 radeon_ring_write(ring, 0x00000bc4); 1013 radeon_ring_write(ring, 0xffffffff); 1014 radeon_ring_write(ring, 0xffffffff); 1015 radeon_ring_write(ring, 0xffffffff); 1016 1017 radeon_ring_write(ring, 0xc0026900); 1018 radeon_ring_write(ring, 0x00000316); 1019 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1020 radeon_ring_write(ring, 0x00000010); /* */ 1021 1022 radeon_ring_unlock_commit(rdev, ring); 1023 1024 /* XXX init other rings */ 1025 1026 return 0; 1027 } 1028 1029 static void cayman_cp_fini(struct radeon_device *rdev) 1030 { 1031 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1032 cayman_cp_enable(rdev, false); 1033 radeon_ring_fini(rdev, ring); 1034 radeon_scratch_free(rdev, ring->rptr_save_reg); 1035 } 1036 1037 static int cayman_cp_resume(struct radeon_device *rdev) 1038 { 1039 static const int ridx[] = { 1040 RADEON_RING_TYPE_GFX_INDEX, 1041 CAYMAN_RING_TYPE_CP1_INDEX, 1042 CAYMAN_RING_TYPE_CP2_INDEX 1043 }; 1044 static const unsigned cp_rb_cntl[] = { 1045 CP_RB0_CNTL, 1046 CP_RB1_CNTL, 1047 CP_RB2_CNTL, 1048 }; 1049 static const unsigned cp_rb_rptr_addr[] = { 1050 CP_RB0_RPTR_ADDR, 1051 CP_RB1_RPTR_ADDR, 1052 CP_RB2_RPTR_ADDR 1053 }; 1054 static const unsigned cp_rb_rptr_addr_hi[] = { 1055 CP_RB0_RPTR_ADDR_HI, 1056 CP_RB1_RPTR_ADDR_HI, 1057 CP_RB2_RPTR_ADDR_HI 1058 }; 1059 static const unsigned cp_rb_base[] = { 1060 CP_RB0_BASE, 1061 CP_RB1_BASE, 1062 CP_RB2_BASE 1063 }; 1064 struct radeon_ring *ring; 1065 int i, r; 1066 1067 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1068 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1069 SOFT_RESET_PA | 1070 SOFT_RESET_SH | 1071 SOFT_RESET_VGT | 1072 SOFT_RESET_SPI | 1073 SOFT_RESET_SX)); 1074 RREG32(GRBM_SOFT_RESET); 1075 DRM_MDELAY(15); 1076 WREG32(GRBM_SOFT_RESET, 0); 1077 RREG32(GRBM_SOFT_RESET); 1078 1079 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1080 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1081 1082 /* Set the write pointer delay */ 1083 WREG32(CP_RB_WPTR_DELAY, 0); 1084 1085 WREG32(CP_DEBUG, (1 << 27)); 1086 1087 /* set the wb address whether it's enabled or not */ 1088 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1089 WREG32(SCRATCH_UMSK, 0xff); 1090 1091 for (i = 0; i < 3; ++i) { 1092 uint32_t rb_cntl; 1093 uint64_t addr; 1094 1095 /* Set ring buffer size */ 1096 ring = &rdev->ring[ridx[i]]; 1097 rb_cntl = drm_order(ring->ring_size / 8); 1098 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1099 #ifdef __BIG_ENDIAN 1100 rb_cntl |= BUF_SWAP_32BIT; 1101 #endif 1102 WREG32(cp_rb_cntl[i], rb_cntl); 1103 1104 /* set the wb address whether it's enabled or not */ 1105 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; 1106 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); 1107 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); 1108 } 1109 1110 /* set the rb base addr, this causes an internal reset of ALL rings */ 1111 for (i = 0; i < 3; ++i) { 1112 ring = &rdev->ring[ridx[i]]; 1113 WREG32(cp_rb_base[i], ring->gpu_addr >> 8); 1114 } 1115 1116 for (i = 0; i < 3; ++i) { 1117 /* Initialize the ring buffer's read and write pointers */ 1118 ring = &rdev->ring[ridx[i]]; 1119 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); 1120 1121 ring->rptr = ring->wptr = 0; 1122 WREG32(ring->rptr_reg, ring->rptr); 1123 WREG32(ring->wptr_reg, ring->wptr); 1124 1125 DRM_MDELAY(1); 1126 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); 1127 } 1128 1129 /* start the rings */ 1130 cayman_cp_start(rdev); 1131 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1132 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1133 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1134 /* this only test cp0 */ 1135 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1136 if (r) { 1137 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1138 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1139 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1140 return r; 1141 } 1142 1143 return 0; 1144 } 1145 1146 /* 1147 * DMA 1148 * Starting with R600, the GPU has an asynchronous 1149 * DMA engine. The programming model is very similar 1150 * to the 3D engine (ring buffer, IBs, etc.), but the 1151 * DMA controller has it's own packet format that is 1152 * different form the PM4 format used by the 3D engine. 1153 * It supports copying data, writing embedded data, 1154 * solid fills, and a number of other things. It also 1155 * has support for tiling/detiling of buffers. 1156 * Cayman and newer support two asynchronous DMA engines. 1157 */ 1158 /** 1159 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine 1160 * 1161 * @rdev: radeon_device pointer 1162 * @ib: IB object to schedule 1163 * 1164 * Schedule an IB in the DMA ring (cayman-SI). 1165 */ 1166 void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 1167 struct radeon_ib *ib) 1168 { 1169 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1170 1171 if (rdev->wb.enabled) { 1172 u32 next_rptr = ring->wptr + 4; 1173 while ((next_rptr & 7) != 5) 1174 next_rptr++; 1175 next_rptr += 3; 1176 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 1177 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 1178 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 1179 radeon_ring_write(ring, next_rptr); 1180 } 1181 1182 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 1183 * Pad as necessary with NOPs. 1184 */ 1185 while ((ring->wptr & 7) != 5) 1186 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1187 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); 1188 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 1189 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 1190 1191 } 1192 1193 /** 1194 * cayman_dma_stop - stop the async dma engines 1195 * 1196 * @rdev: radeon_device pointer 1197 * 1198 * Stop the async dma engines (cayman-SI). 1199 */ 1200 void cayman_dma_stop(struct radeon_device *rdev) 1201 { 1202 u32 rb_cntl; 1203 1204 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1205 1206 /* dma0 */ 1207 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1208 rb_cntl &= ~DMA_RB_ENABLE; 1209 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); 1210 1211 /* dma1 */ 1212 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1213 rb_cntl &= ~DMA_RB_ENABLE; 1214 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); 1215 1216 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 1217 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; 1218 } 1219 1220 /** 1221 * cayman_dma_resume - setup and start the async dma engines 1222 * 1223 * @rdev: radeon_device pointer 1224 * 1225 * Set up the DMA ring buffers and enable them. (cayman-SI). 1226 * Returns 0 for success, error for failure. 1227 */ 1228 int cayman_dma_resume(struct radeon_device *rdev) 1229 { 1230 struct radeon_ring *ring; 1231 u32 rb_cntl, dma_cntl, ib_cntl; 1232 u32 rb_bufsz; 1233 u32 reg_offset, wb_offset; 1234 int i, r; 1235 1236 /* Reset dma */ 1237 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1238 RREG32(SRBM_SOFT_RESET); 1239 DRM_UDELAY(50); 1240 WREG32(SRBM_SOFT_RESET, 0); 1241 1242 for (i = 0; i < 2; i++) { 1243 if (i == 0) { 1244 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1245 reg_offset = DMA0_REGISTER_OFFSET; 1246 wb_offset = R600_WB_DMA_RPTR_OFFSET; 1247 } else { 1248 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1249 reg_offset = DMA1_REGISTER_OFFSET; 1250 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; 1251 } 1252 1253 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); 1254 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 1255 1256 /* Set ring buffer size in dwords */ 1257 rb_bufsz = drm_order(ring->ring_size / 4); 1258 rb_cntl = rb_bufsz << 1; 1259 #ifdef __BIG_ENDIAN 1260 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 1261 #endif 1262 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); 1263 1264 /* Initialize the ring buffer's read and write pointers */ 1265 WREG32(DMA_RB_RPTR + reg_offset, 0); 1266 WREG32(DMA_RB_WPTR + reg_offset, 0); 1267 1268 /* set the wb address whether it's enabled or not */ 1269 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, 1270 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); 1271 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, 1272 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 1273 1274 if (rdev->wb.enabled) 1275 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 1276 1277 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); 1278 1279 /* enable DMA IBs */ 1280 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 1281 #ifdef __BIG_ENDIAN 1282 ib_cntl |= DMA_IB_SWAP_ENABLE; 1283 #endif 1284 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); 1285 1286 dma_cntl = RREG32(DMA_CNTL + reg_offset); 1287 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 1288 WREG32(DMA_CNTL + reg_offset, dma_cntl); 1289 1290 ring->wptr = 0; 1291 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); 1292 1293 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; 1294 1295 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); 1296 1297 ring->ready = true; 1298 1299 r = radeon_ring_test(rdev, ring->idx, ring); 1300 if (r) { 1301 ring->ready = false; 1302 return r; 1303 } 1304 } 1305 1306 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1307 1308 return 0; 1309 } 1310 1311 /** 1312 * cayman_dma_fini - tear down the async dma engines 1313 * 1314 * @rdev: radeon_device pointer 1315 * 1316 * Stop the async dma engines and free the rings (cayman-SI). 1317 */ 1318 void cayman_dma_fini(struct radeon_device *rdev) 1319 { 1320 cayman_dma_stop(rdev); 1321 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 1322 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 1323 } 1324 1325 static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev) 1326 { 1327 u32 grbm_reset = 0; 1328 1329 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1330 return; 1331 1332 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 1333 RREG32(GRBM_STATUS)); 1334 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 1335 RREG32(GRBM_STATUS_SE0)); 1336 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 1337 RREG32(GRBM_STATUS_SE1)); 1338 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 1339 RREG32(SRBM_STATUS)); 1340 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1341 RREG32(CP_STALLED_STAT1)); 1342 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1343 RREG32(CP_STALLED_STAT2)); 1344 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1345 RREG32(CP_BUSY_STAT)); 1346 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1347 RREG32(CP_STAT)); 1348 1349 /* Disable CP parsing/prefetching */ 1350 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1351 1352 /* reset all the gfx blocks */ 1353 grbm_reset = (SOFT_RESET_CP | 1354 SOFT_RESET_CB | 1355 SOFT_RESET_DB | 1356 SOFT_RESET_GDS | 1357 SOFT_RESET_PA | 1358 SOFT_RESET_SC | 1359 SOFT_RESET_SPI | 1360 SOFT_RESET_SH | 1361 SOFT_RESET_SX | 1362 SOFT_RESET_TC | 1363 SOFT_RESET_TA | 1364 SOFT_RESET_VGT | 1365 SOFT_RESET_IA); 1366 1367 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1368 WREG32(GRBM_SOFT_RESET, grbm_reset); 1369 (void)RREG32(GRBM_SOFT_RESET); 1370 DRM_UDELAY(50); 1371 WREG32(GRBM_SOFT_RESET, 0); 1372 (void)RREG32(GRBM_SOFT_RESET); 1373 1374 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 1375 RREG32(GRBM_STATUS)); 1376 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 1377 RREG32(GRBM_STATUS_SE0)); 1378 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 1379 RREG32(GRBM_STATUS_SE1)); 1380 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 1381 RREG32(SRBM_STATUS)); 1382 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1383 RREG32(CP_STALLED_STAT1)); 1384 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1385 RREG32(CP_STALLED_STAT2)); 1386 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1387 RREG32(CP_BUSY_STAT)); 1388 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1389 RREG32(CP_STAT)); 1390 1391 } 1392 1393 static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev) 1394 { 1395 u32 tmp; 1396 1397 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1398 return; 1399 1400 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1401 RREG32(DMA_STATUS_REG)); 1402 1403 /* dma0 */ 1404 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1405 tmp &= ~DMA_RB_ENABLE; 1406 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); 1407 1408 /* dma1 */ 1409 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1410 tmp &= ~DMA_RB_ENABLE; 1411 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); 1412 1413 /* Reset dma */ 1414 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1415 RREG32(SRBM_SOFT_RESET); 1416 DRM_UDELAY(50); 1417 WREG32(SRBM_SOFT_RESET, 0); 1418 1419 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1420 RREG32(DMA_STATUS_REG)); 1421 1422 } 1423 1424 static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1425 { 1426 struct evergreen_mc_save save; 1427 1428 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1429 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 1430 1431 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1432 reset_mask &= ~RADEON_RESET_DMA; 1433 1434 if (reset_mask == 0) 1435 return 0; 1436 1437 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1438 1439 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1440 RREG32(0x14F8)); 1441 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1442 RREG32(0x14D8)); 1443 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1444 RREG32(0x14FC)); 1445 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1446 RREG32(0x14DC)); 1447 1448 evergreen_mc_stop(rdev, &save); 1449 if (evergreen_mc_wait_for_idle(rdev)) { 1450 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1451 } 1452 1453 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 1454 cayman_gpu_soft_reset_gfx(rdev); 1455 1456 if (reset_mask & RADEON_RESET_DMA) 1457 cayman_gpu_soft_reset_dma(rdev); 1458 1459 /* Wait a little for things to settle down */ 1460 DRM_UDELAY(50); 1461 1462 evergreen_mc_resume(rdev, &save); 1463 return 0; 1464 } 1465 1466 int cayman_asic_reset(struct radeon_device *rdev) 1467 { 1468 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 1469 RADEON_RESET_COMPUTE | 1470 RADEON_RESET_DMA)); 1471 } 1472 1473 /** 1474 * cayman_dma_is_lockup - Check if the DMA engine is locked up 1475 * 1476 * @rdev: radeon_device pointer 1477 * @ring: radeon_ring structure holding ring information 1478 * 1479 * Check if the async DMA engine is locked up (cayman-SI). 1480 * Returns true if the engine appears to be locked up, false if not. 1481 */ 1482 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1483 { 1484 u32 dma_status_reg; 1485 1486 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 1487 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 1488 else 1489 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 1490 if (dma_status_reg & DMA_IDLE) { 1491 radeon_ring_lockup_update(ring); 1492 return false; 1493 } 1494 /* force ring activities */ 1495 radeon_ring_force_activity(rdev, ring); 1496 return radeon_ring_test_lockup(rdev, ring); 1497 } 1498 1499 static int cayman_startup(struct radeon_device *rdev) 1500 { 1501 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1502 int r; 1503 1504 /* enable pcie gen2 link */ 1505 evergreen_pcie_gen2_enable(rdev); 1506 1507 if (rdev->flags & RADEON_IS_IGP) { 1508 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1509 r = ni_init_microcode(rdev); 1510 if (r) { 1511 DRM_ERROR("Failed to load firmware!\n"); 1512 return r; 1513 } 1514 } 1515 } else { 1516 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1517 r = ni_init_microcode(rdev); 1518 if (r) { 1519 DRM_ERROR("Failed to load firmware!\n"); 1520 return r; 1521 } 1522 } 1523 1524 r = ni_mc_load_microcode(rdev); 1525 if (r) { 1526 DRM_ERROR("Failed to load MC firmware!\n"); 1527 return r; 1528 } 1529 } 1530 1531 r = r600_vram_scratch_init(rdev); 1532 if (r) 1533 return r; 1534 1535 evergreen_mc_program(rdev); 1536 r = cayman_pcie_gart_enable(rdev); 1537 if (r) 1538 return r; 1539 cayman_gpu_init(rdev); 1540 1541 r = evergreen_blit_init(rdev); 1542 if (r) { 1543 r600_blit_fini(rdev); 1544 rdev->asic->copy.copy = NULL; 1545 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1546 } 1547 1548 /* allocate rlc buffers */ 1549 if (rdev->flags & RADEON_IS_IGP) { 1550 r = si_rlc_init(rdev); 1551 if (r) { 1552 DRM_ERROR("Failed to init rlc BOs!\n"); 1553 return r; 1554 } 1555 } 1556 1557 /* allocate wb buffer */ 1558 r = radeon_wb_init(rdev); 1559 if (r) 1560 return r; 1561 1562 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1563 if (r) { 1564 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1565 return r; 1566 } 1567 1568 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 1569 if (r) { 1570 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1571 return r; 1572 } 1573 1574 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 1575 if (r) { 1576 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1577 return r; 1578 } 1579 1580 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 1581 if (r) { 1582 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1583 return r; 1584 } 1585 1586 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 1587 if (r) { 1588 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1589 return r; 1590 } 1591 1592 /* Enable IRQ */ 1593 r = r600_irq_init(rdev); 1594 if (r) { 1595 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1596 radeon_irq_kms_fini(rdev); 1597 return r; 1598 } 1599 evergreen_irq_set(rdev); 1600 1601 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1602 CP_RB0_RPTR, CP_RB0_WPTR, 1603 0, 0xfffff, RADEON_CP_PACKET2); 1604 if (r) 1605 return r; 1606 1607 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1608 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1609 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 1610 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 1611 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1612 if (r) 1613 return r; 1614 1615 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1616 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 1617 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 1618 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 1619 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1620 if (r) 1621 return r; 1622 1623 r = cayman_cp_load_microcode(rdev); 1624 if (r) 1625 return r; 1626 r = cayman_cp_resume(rdev); 1627 if (r) 1628 return r; 1629 1630 r = cayman_dma_resume(rdev); 1631 if (r) 1632 return r; 1633 1634 r = radeon_ib_pool_init(rdev); 1635 if (r) { 1636 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1637 return r; 1638 } 1639 1640 r = radeon_vm_manager_init(rdev); 1641 if (r) { 1642 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 1643 return r; 1644 } 1645 1646 r = r600_audio_init(rdev); 1647 if (r) 1648 return r; 1649 1650 return 0; 1651 } 1652 1653 int cayman_resume(struct radeon_device *rdev) 1654 { 1655 int r; 1656 1657 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1658 * posting will perform necessary task to bring back GPU into good 1659 * shape. 1660 */ 1661 /* post card */ 1662 atom_asic_init(rdev->mode_info.atom_context); 1663 1664 rdev->accel_working = true; 1665 r = cayman_startup(rdev); 1666 if (r) { 1667 DRM_ERROR("cayman startup failed on resume\n"); 1668 rdev->accel_working = false; 1669 return r; 1670 } 1671 return r; 1672 } 1673 1674 int cayman_suspend(struct radeon_device *rdev) 1675 { 1676 r600_audio_fini(rdev); 1677 cayman_cp_enable(rdev, false); 1678 cayman_dma_stop(rdev); 1679 evergreen_irq_suspend(rdev); 1680 radeon_wb_disable(rdev); 1681 cayman_pcie_gart_disable(rdev); 1682 return 0; 1683 } 1684 1685 /* Plan is to move initialization in that function and use 1686 * helper function so that radeon_device_init pretty much 1687 * do nothing more than calling asic specific function. This 1688 * should also allow to remove a bunch of callback function 1689 * like vram_info. 1690 */ 1691 int cayman_init(struct radeon_device *rdev) 1692 { 1693 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1694 int r; 1695 1696 /* Read BIOS */ 1697 if (!radeon_get_bios(rdev)) { 1698 if (ASIC_IS_AVIVO(rdev)) 1699 return -EINVAL; 1700 } 1701 /* Must be an ATOMBIOS */ 1702 if (!rdev->is_atom_bios) { 1703 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1704 return -EINVAL; 1705 } 1706 r = radeon_atombios_init(rdev); 1707 if (r) 1708 return r; 1709 1710 /* Post card if necessary */ 1711 if (!radeon_card_posted(rdev)) { 1712 if (!rdev->bios) { 1713 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1714 return -EINVAL; 1715 } 1716 DRM_INFO("GPU not posted. posting now...\n"); 1717 atom_asic_init(rdev->mode_info.atom_context); 1718 } 1719 /* Initialize scratch registers */ 1720 r600_scratch_init(rdev); 1721 /* Initialize surface registers */ 1722 radeon_surface_init(rdev); 1723 /* Initialize clocks */ 1724 radeon_get_clock_info(rdev->ddev); 1725 /* Fence driver */ 1726 r = radeon_fence_driver_init(rdev); 1727 if (r) 1728 return r; 1729 /* initialize memory controller */ 1730 r = evergreen_mc_init(rdev); 1731 if (r) 1732 return r; 1733 /* Memory manager */ 1734 r = radeon_bo_init(rdev); 1735 if (r) 1736 return r; 1737 1738 r = radeon_irq_kms_init(rdev); 1739 if (r) 1740 return r; 1741 1742 ring->ring_obj = NULL; 1743 r600_ring_init(rdev, ring, 1024 * 1024); 1744 1745 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1746 ring->ring_obj = NULL; 1747 r600_ring_init(rdev, ring, 64 * 1024); 1748 1749 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1750 ring->ring_obj = NULL; 1751 r600_ring_init(rdev, ring, 64 * 1024); 1752 1753 rdev->ih.ring_obj = NULL; 1754 r600_ih_ring_init(rdev, 64 * 1024); 1755 1756 r = r600_pcie_gart_init(rdev); 1757 if (r) 1758 return r; 1759 1760 rdev->accel_working = true; 1761 r = cayman_startup(rdev); 1762 if (r) { 1763 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1764 cayman_cp_fini(rdev); 1765 cayman_dma_fini(rdev); 1766 r600_irq_fini(rdev); 1767 if (rdev->flags & RADEON_IS_IGP) 1768 si_rlc_fini(rdev); 1769 radeon_wb_fini(rdev); 1770 radeon_ib_pool_fini(rdev); 1771 radeon_vm_manager_fini(rdev); 1772 radeon_irq_kms_fini(rdev); 1773 cayman_pcie_gart_fini(rdev); 1774 rdev->accel_working = false; 1775 } 1776 1777 /* Don't start up if the MC ucode is missing. 1778 * The default clocks and voltages before the MC ucode 1779 * is loaded are not suffient for advanced operations. 1780 * 1781 * We can skip this check for TN, because there is no MC 1782 * ucode. 1783 */ 1784 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 1785 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1786 return -EINVAL; 1787 } 1788 1789 return 0; 1790 } 1791 1792 void cayman_fini(struct radeon_device *rdev) 1793 { 1794 r600_blit_fini(rdev); 1795 cayman_cp_fini(rdev); 1796 cayman_dma_fini(rdev); 1797 r600_irq_fini(rdev); 1798 if (rdev->flags & RADEON_IS_IGP) 1799 si_rlc_fini(rdev); 1800 radeon_wb_fini(rdev); 1801 radeon_vm_manager_fini(rdev); 1802 radeon_ib_pool_fini(rdev); 1803 radeon_irq_kms_fini(rdev); 1804 cayman_pcie_gart_fini(rdev); 1805 r600_vram_scratch_fini(rdev); 1806 radeon_gem_fini(rdev); 1807 radeon_fence_driver_fini(rdev); 1808 radeon_bo_fini(rdev); 1809 radeon_atombios_fini(rdev); 1810 ni_fini_microcode(rdev); 1811 drm_free(rdev->bios, DRM_MEM_DRIVER); 1812 rdev->bios = NULL; 1813 } 1814 1815 /* 1816 * vm 1817 */ 1818 int cayman_vm_init(struct radeon_device *rdev) 1819 { 1820 /* number of VMs */ 1821 rdev->vm_manager.nvm = 8; 1822 /* base offset of vram pages */ 1823 if (rdev->flags & RADEON_IS_IGP) { 1824 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 1825 tmp <<= 22; 1826 rdev->vm_manager.vram_base_offset = tmp; 1827 } else 1828 rdev->vm_manager.vram_base_offset = 0; 1829 return 0; 1830 } 1831 1832 void cayman_vm_fini(struct radeon_device *rdev) 1833 { 1834 } 1835 1836 #define R600_ENTRY_VALID (1 << 0) 1837 #define R600_PTE_SYSTEM (1 << 1) 1838 #define R600_PTE_SNOOPED (1 << 2) 1839 #define R600_PTE_READABLE (1 << 5) 1840 #define R600_PTE_WRITEABLE (1 << 6) 1841 1842 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) 1843 { 1844 uint32_t r600_flags = 0; 1845 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; 1846 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1847 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1848 if (flags & RADEON_VM_PAGE_SYSTEM) { 1849 r600_flags |= R600_PTE_SYSTEM; 1850 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 1851 } 1852 return r600_flags; 1853 } 1854 1855 /** 1856 * cayman_vm_set_page - update the page tables using the CP 1857 * 1858 * @rdev: radeon_device pointer 1859 * @pe: addr of the page entry 1860 * @addr: dst addr to write into pe 1861 * @count: number of page entries to update 1862 * @incr: increase next addr by incr bytes 1863 * @flags: access flags 1864 * 1865 * Update the page tables using the CP (cayman-si). 1866 */ 1867 void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, 1868 uint64_t addr, unsigned count, 1869 uint32_t incr, uint32_t flags) 1870 { 1871 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1872 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1873 uint64_t value; 1874 unsigned ndw; 1875 1876 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { 1877 while (count) { 1878 ndw = 1 + count * 2; 1879 if (ndw > 0x3FFF) 1880 ndw = 0x3FFF; 1881 1882 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); 1883 radeon_ring_write(ring, pe); 1884 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1885 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 1886 if (flags & RADEON_VM_PAGE_SYSTEM) { 1887 value = radeon_vm_map_gart(rdev, addr); 1888 value &= 0xFFFFFFFFFFFFF000ULL; 1889 } else if (flags & RADEON_VM_PAGE_VALID) { 1890 value = addr; 1891 } else { 1892 value = 0; 1893 } 1894 addr += incr; 1895 value |= r600_flags; 1896 radeon_ring_write(ring, value); 1897 radeon_ring_write(ring, upper_32_bits(value)); 1898 } 1899 } 1900 } else { 1901 while (count) { 1902 ndw = count * 2; 1903 if (ndw > 0xFFFFE) 1904 ndw = 0xFFFFE; 1905 1906 /* for non-physically contiguous pages (system) */ 1907 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw)); 1908 radeon_ring_write(ring, pe); 1909 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1910 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 1911 if (flags & RADEON_VM_PAGE_SYSTEM) { 1912 value = radeon_vm_map_gart(rdev, addr); 1913 value &= 0xFFFFFFFFFFFFF000ULL; 1914 } else if (flags & RADEON_VM_PAGE_VALID) { 1915 value = addr; 1916 } else { 1917 value = 0; 1918 } 1919 addr += incr; 1920 value |= r600_flags; 1921 radeon_ring_write(ring, value); 1922 radeon_ring_write(ring, upper_32_bits(value)); 1923 } 1924 } 1925 } 1926 } 1927 1928 /** 1929 * cayman_vm_flush - vm flush using the CP 1930 * 1931 * @rdev: radeon_device pointer 1932 * 1933 * Update the page table base and flush the VM TLB 1934 * using the CP (cayman-si). 1935 */ 1936 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 1937 { 1938 struct radeon_ring *ring = &rdev->ring[ridx]; 1939 1940 if (vm == NULL) 1941 return; 1942 1943 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 1944 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1945 1946 /* flush hdp cache */ 1947 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 1948 radeon_ring_write(ring, 0x1); 1949 1950 /* bits 0-7 are the VM contexts0-7 */ 1951 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 1952 radeon_ring_write(ring, 1 << vm->id); 1953 1954 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 1955 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 1956 radeon_ring_write(ring, 0x0); 1957 } 1958 1959 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 1960 { 1961 struct radeon_ring *ring = &rdev->ring[ridx]; 1962 1963 if (vm == NULL) 1964 return; 1965 1966 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1967 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 1968 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1969 1970 /* flush hdp cache */ 1971 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1972 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 1973 radeon_ring_write(ring, 1); 1974 1975 /* bits 0-7 are the VM contexts0-7 */ 1976 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1977 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 1978 radeon_ring_write(ring, 1 << vm->id); 1979 } 1980 1981