1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 * 24 * $FreeBSD: head/sys/dev/drm2/radeon/si.c 254885 2013-08-25 19:37:15Z dumbbell $ 25 */ 26 27 #include <drm/drmP.h> 28 #include "radeon.h" 29 #include "radeon_asic.h" 30 #include <uapi_drm/radeon_drm.h> 31 #include "sid.h" 32 #include "atom.h" 33 #include "si_blit_shaders.h" 34 35 #define SI_PFP_UCODE_SIZE 2144 36 #define SI_PM4_UCODE_SIZE 2144 37 #define SI_CE_UCODE_SIZE 2144 38 #define SI_RLC_UCODE_SIZE 2048 39 #define SI_MC_UCODE_SIZE 7769 40 #define OLAND_MC_UCODE_SIZE 7863 41 42 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 43 extern bool evergreen_is_display_hung(struct radeon_device *rdev); 44 45 #define PCIE_BUS_CLK 10000 46 #define TCLK (PCIE_BUS_CLK / 10) 47 48 /** 49 * si_get_xclk - get the xclk 50 * 51 * @rdev: radeon_device pointer 52 * 53 * Returns the reference clock used by the gfx engine 54 * (SI). 55 */ 56 u32 si_get_xclk(struct radeon_device *rdev) 57 { 58 u32 reference_clock = rdev->clock.spll.reference_freq; 59 u32 tmp; 60 61 tmp = RREG32(CG_CLKPIN_CNTL_2); 62 if (tmp & MUX_TCLK_TO_XCLK) 63 return TCLK; 64 65 tmp = RREG32(CG_CLKPIN_CNTL); 66 if (tmp & XTALIN_DIVIDE) 67 return reference_clock / 4; 68 69 return reference_clock; 70 } 71 72 /* get temperature in millidegrees */ 73 int si_get_temp(struct radeon_device *rdev) 74 { 75 u32 temp; 76 int actual_temp = 0; 77 78 temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> 79 CTF_TEMP_SHIFT; 80 81 if (temp & 0x200) 82 actual_temp = 255; 83 else 84 actual_temp = temp & 0x1ff; 85 86 actual_temp = (actual_temp * 1000); 87 88 return actual_temp; 89 } 90 91 #define TAHITI_IO_MC_REGS_SIZE 36 92 93 static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { 94 {0x0000006f, 0x03044000}, 95 {0x00000070, 0x0480c018}, 96 {0x00000071, 0x00000040}, 97 {0x00000072, 0x01000000}, 98 {0x00000074, 0x000000ff}, 99 {0x00000075, 0x00143400}, 100 {0x00000076, 0x08ec0800}, 101 {0x00000077, 0x040000cc}, 102 {0x00000079, 0x00000000}, 103 {0x0000007a, 0x21000409}, 104 {0x0000007c, 0x00000000}, 105 {0x0000007d, 0xe8000000}, 106 {0x0000007e, 0x044408a8}, 107 {0x0000007f, 0x00000003}, 108 {0x00000080, 0x00000000}, 109 {0x00000081, 0x01000000}, 110 {0x00000082, 0x02000000}, 111 {0x00000083, 0x00000000}, 112 {0x00000084, 0xe3f3e4f4}, 113 {0x00000085, 0x00052024}, 114 {0x00000087, 0x00000000}, 115 {0x00000088, 0x66036603}, 116 {0x00000089, 0x01000000}, 117 {0x0000008b, 0x1c0a0000}, 118 {0x0000008c, 0xff010000}, 119 {0x0000008e, 0xffffefff}, 120 {0x0000008f, 0xfff3efff}, 121 {0x00000090, 0xfff3efbf}, 122 {0x00000094, 0x00101101}, 123 {0x00000095, 0x00000fff}, 124 {0x00000096, 0x00116fff}, 125 {0x00000097, 0x60010000}, 126 {0x00000098, 0x10010000}, 127 {0x00000099, 0x00006000}, 128 {0x0000009a, 0x00001000}, 129 {0x0000009f, 0x00a77400} 130 }; 131 132 static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { 133 {0x0000006f, 0x03044000}, 134 {0x00000070, 0x0480c018}, 135 {0x00000071, 0x00000040}, 136 {0x00000072, 0x01000000}, 137 {0x00000074, 0x000000ff}, 138 {0x00000075, 0x00143400}, 139 {0x00000076, 0x08ec0800}, 140 {0x00000077, 0x040000cc}, 141 {0x00000079, 0x00000000}, 142 {0x0000007a, 0x21000409}, 143 {0x0000007c, 0x00000000}, 144 {0x0000007d, 0xe8000000}, 145 {0x0000007e, 0x044408a8}, 146 {0x0000007f, 0x00000003}, 147 {0x00000080, 0x00000000}, 148 {0x00000081, 0x01000000}, 149 {0x00000082, 0x02000000}, 150 {0x00000083, 0x00000000}, 151 {0x00000084, 0xe3f3e4f4}, 152 {0x00000085, 0x00052024}, 153 {0x00000087, 0x00000000}, 154 {0x00000088, 0x66036603}, 155 {0x00000089, 0x01000000}, 156 {0x0000008b, 0x1c0a0000}, 157 {0x0000008c, 0xff010000}, 158 {0x0000008e, 0xffffefff}, 159 {0x0000008f, 0xfff3efff}, 160 {0x00000090, 0xfff3efbf}, 161 {0x00000094, 0x00101101}, 162 {0x00000095, 0x00000fff}, 163 {0x00000096, 0x00116fff}, 164 {0x00000097, 0x60010000}, 165 {0x00000098, 0x10010000}, 166 {0x00000099, 0x00006000}, 167 {0x0000009a, 0x00001000}, 168 {0x0000009f, 0x00a47400} 169 }; 170 171 static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { 172 {0x0000006f, 0x03044000}, 173 {0x00000070, 0x0480c018}, 174 {0x00000071, 0x00000040}, 175 {0x00000072, 0x01000000}, 176 {0x00000074, 0x000000ff}, 177 {0x00000075, 0x00143400}, 178 {0x00000076, 0x08ec0800}, 179 {0x00000077, 0x040000cc}, 180 {0x00000079, 0x00000000}, 181 {0x0000007a, 0x21000409}, 182 {0x0000007c, 0x00000000}, 183 {0x0000007d, 0xe8000000}, 184 {0x0000007e, 0x044408a8}, 185 {0x0000007f, 0x00000003}, 186 {0x00000080, 0x00000000}, 187 {0x00000081, 0x01000000}, 188 {0x00000082, 0x02000000}, 189 {0x00000083, 0x00000000}, 190 {0x00000084, 0xe3f3e4f4}, 191 {0x00000085, 0x00052024}, 192 {0x00000087, 0x00000000}, 193 {0x00000088, 0x66036603}, 194 {0x00000089, 0x01000000}, 195 {0x0000008b, 0x1c0a0000}, 196 {0x0000008c, 0xff010000}, 197 {0x0000008e, 0xffffefff}, 198 {0x0000008f, 0xfff3efff}, 199 {0x00000090, 0xfff3efbf}, 200 {0x00000094, 0x00101101}, 201 {0x00000095, 0x00000fff}, 202 {0x00000096, 0x00116fff}, 203 {0x00000097, 0x60010000}, 204 {0x00000098, 0x10010000}, 205 {0x00000099, 0x00006000}, 206 {0x0000009a, 0x00001000}, 207 {0x0000009f, 0x00a37400} 208 }; 209 210 static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { 211 {0x0000006f, 0x03044000}, 212 {0x00000070, 0x0480c018}, 213 {0x00000071, 0x00000040}, 214 {0x00000072, 0x01000000}, 215 {0x00000074, 0x000000ff}, 216 {0x00000075, 0x00143400}, 217 {0x00000076, 0x08ec0800}, 218 {0x00000077, 0x040000cc}, 219 {0x00000079, 0x00000000}, 220 {0x0000007a, 0x21000409}, 221 {0x0000007c, 0x00000000}, 222 {0x0000007d, 0xe8000000}, 223 {0x0000007e, 0x044408a8}, 224 {0x0000007f, 0x00000003}, 225 {0x00000080, 0x00000000}, 226 {0x00000081, 0x01000000}, 227 {0x00000082, 0x02000000}, 228 {0x00000083, 0x00000000}, 229 {0x00000084, 0xe3f3e4f4}, 230 {0x00000085, 0x00052024}, 231 {0x00000087, 0x00000000}, 232 {0x00000088, 0x66036603}, 233 {0x00000089, 0x01000000}, 234 {0x0000008b, 0x1c0a0000}, 235 {0x0000008c, 0xff010000}, 236 {0x0000008e, 0xffffefff}, 237 {0x0000008f, 0xfff3efff}, 238 {0x00000090, 0xfff3efbf}, 239 {0x00000094, 0x00101101}, 240 {0x00000095, 0x00000fff}, 241 {0x00000096, 0x00116fff}, 242 {0x00000097, 0x60010000}, 243 {0x00000098, 0x10010000}, 244 {0x00000099, 0x00006000}, 245 {0x0000009a, 0x00001000}, 246 {0x0000009f, 0x00a17730} 247 }; 248 249 /* ucode loading */ 250 static int si_mc_load_microcode(struct radeon_device *rdev) 251 { 252 const __be32 *fw_data; 253 u32 running, blackout = 0; 254 u32 *io_mc_regs; 255 int i, ucode_size, regs_size; 256 257 if (!rdev->mc_fw) 258 return -EINVAL; 259 260 switch (rdev->family) { 261 case CHIP_TAHITI: 262 io_mc_regs = (u32 *)&tahiti_io_mc_regs; 263 ucode_size = SI_MC_UCODE_SIZE; 264 regs_size = TAHITI_IO_MC_REGS_SIZE; 265 break; 266 case CHIP_PITCAIRN: 267 io_mc_regs = (u32 *)&pitcairn_io_mc_regs; 268 ucode_size = SI_MC_UCODE_SIZE; 269 regs_size = TAHITI_IO_MC_REGS_SIZE; 270 break; 271 case CHIP_VERDE: 272 default: 273 io_mc_regs = (u32 *)&verde_io_mc_regs; 274 ucode_size = SI_MC_UCODE_SIZE; 275 regs_size = TAHITI_IO_MC_REGS_SIZE; 276 break; 277 case CHIP_OLAND: 278 io_mc_regs = (u32 *)&oland_io_mc_regs; 279 ucode_size = OLAND_MC_UCODE_SIZE; 280 regs_size = TAHITI_IO_MC_REGS_SIZE; 281 break; 282 } 283 284 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 285 286 if (running == 0) { 287 if (running) { 288 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 289 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 290 } 291 292 /* reset the engine and set to writable */ 293 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 294 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 295 296 /* load mc io regs */ 297 for (i = 0; i < regs_size; i++) { 298 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 299 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 300 } 301 /* load the MC ucode */ 302 fw_data = (const __be32 *)rdev->mc_fw->data; 303 for (i = 0; i < ucode_size; i++) 304 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 305 306 /* put the engine back into the active state */ 307 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 308 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 309 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 310 311 /* wait for training to complete */ 312 for (i = 0; i < rdev->usec_timeout; i++) { 313 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0) 314 break; 315 DRM_UDELAY(1); 316 } 317 for (i = 0; i < rdev->usec_timeout; i++) { 318 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1) 319 break; 320 DRM_UDELAY(1); 321 } 322 323 if (running) 324 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 325 } 326 327 return 0; 328 } 329 330 static int si_init_microcode(struct radeon_device *rdev) 331 { 332 const char *chip_name; 333 const char *rlc_chip_name; 334 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 335 char fw_name[30]; 336 int err; 337 338 DRM_DEBUG("\n"); 339 340 switch (rdev->family) { 341 case CHIP_TAHITI: 342 chip_name = "TAHITI"; 343 rlc_chip_name = "TAHITI"; 344 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 345 me_req_size = SI_PM4_UCODE_SIZE * 4; 346 ce_req_size = SI_CE_UCODE_SIZE * 4; 347 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 348 mc_req_size = SI_MC_UCODE_SIZE * 4; 349 break; 350 case CHIP_PITCAIRN: 351 chip_name = "PITCAIRN"; 352 rlc_chip_name = "PITCAIRN"; 353 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 354 me_req_size = SI_PM4_UCODE_SIZE * 4; 355 ce_req_size = SI_CE_UCODE_SIZE * 4; 356 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 357 mc_req_size = SI_MC_UCODE_SIZE * 4; 358 break; 359 case CHIP_VERDE: 360 chip_name = "VERDE"; 361 rlc_chip_name = "VERDE"; 362 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 363 me_req_size = SI_PM4_UCODE_SIZE * 4; 364 ce_req_size = SI_CE_UCODE_SIZE * 4; 365 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 366 mc_req_size = SI_MC_UCODE_SIZE * 4; 367 break; 368 case CHIP_OLAND: 369 chip_name = "OLAND"; 370 rlc_chip_name = "OLAND"; 371 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 372 me_req_size = SI_PM4_UCODE_SIZE * 4; 373 ce_req_size = SI_CE_UCODE_SIZE * 4; 374 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 375 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 376 break; 377 default: panic("%s: Unsupported family %d", __func__, rdev->family); 378 } 379 380 DRM_INFO("Loading %s Microcode\n", chip_name); 381 err = 0; 382 383 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 384 rdev->pfp_fw = firmware_get(fw_name); 385 if (rdev->pfp_fw == NULL) { 386 err = -ENOENT; 387 goto out; 388 } 389 if (rdev->pfp_fw->datasize != pfp_req_size) { 390 DRM_ERROR( 391 "si_cp: Bogus length %zu in firmware \"%s\"\n", 392 rdev->pfp_fw->datasize, fw_name); 393 err = -EINVAL; 394 goto out; 395 } 396 397 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 398 rdev->me_fw = firmware_get(fw_name); 399 if (rdev->me_fw == NULL) { 400 err = -ENOENT; 401 goto out; 402 } 403 if (rdev->me_fw->datasize != me_req_size) { 404 DRM_ERROR( 405 "si_cp: Bogus length %zu in firmware \"%s\"\n", 406 rdev->me_fw->datasize, fw_name); 407 err = -EINVAL; 408 } 409 410 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_ce", chip_name); 411 rdev->ce_fw = firmware_get(fw_name); 412 if (rdev->ce_fw == NULL) { 413 err = -ENOENT; 414 goto out; 415 } 416 if (rdev->ce_fw->datasize != ce_req_size) { 417 DRM_ERROR( 418 "si_cp: Bogus length %zu in firmware \"%s\"\n", 419 rdev->ce_fw->datasize, fw_name); 420 err = -EINVAL; 421 } 422 423 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", 424 rlc_chip_name); 425 rdev->rlc_fw = firmware_get(fw_name); 426 if (rdev->rlc_fw == NULL) { 427 err = -ENOENT; 428 goto out; 429 } 430 if (rdev->rlc_fw->datasize != rlc_req_size) { 431 DRM_ERROR( 432 "si_rlc: Bogus length %zu in firmware \"%s\"\n", 433 rdev->rlc_fw->datasize, fw_name); 434 err = -EINVAL; 435 } 436 437 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", chip_name); 438 rdev->mc_fw = firmware_get(fw_name); 439 if (rdev->mc_fw == NULL) { 440 err = -ENOENT; 441 goto out; 442 } 443 if (rdev->mc_fw->datasize != mc_req_size) { 444 DRM_ERROR( 445 "si_mc: Bogus length %zu in firmware \"%s\"\n", 446 rdev->mc_fw->datasize, fw_name); 447 err = -EINVAL; 448 } 449 450 out: 451 if (err) { 452 if (err != -EINVAL) 453 DRM_ERROR( 454 "si_cp: Failed to load firmware \"%s\"\n", 455 fw_name); 456 if (rdev->pfp_fw != NULL) { 457 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 458 rdev->pfp_fw = NULL; 459 } 460 if (rdev->me_fw != NULL) { 461 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 462 rdev->me_fw = NULL; 463 } 464 if (rdev->ce_fw != NULL) { 465 firmware_put(rdev->ce_fw, FIRMWARE_UNLOAD); 466 rdev->ce_fw = NULL; 467 } 468 if (rdev->rlc_fw != NULL) { 469 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 470 rdev->rlc_fw = NULL; 471 } 472 if (rdev->mc_fw != NULL) { 473 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 474 rdev->mc_fw = NULL; 475 } 476 } 477 return err; 478 } 479 480 /** 481 * si_fini_microcode - drop the firmwares image references 482 * 483 * @rdev: radeon_device pointer 484 * 485 * Drop the pfp, me, rlc, mc and ce firmware image references. 486 * Called at driver shutdown. 487 */ 488 static void si_fini_microcode(struct radeon_device *rdev) 489 { 490 491 if (rdev->pfp_fw != NULL) { 492 firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD); 493 rdev->pfp_fw = NULL; 494 } 495 496 if (rdev->me_fw != NULL) { 497 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 498 rdev->me_fw = NULL; 499 } 500 501 if (rdev->rlc_fw != NULL) { 502 firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD); 503 rdev->rlc_fw = NULL; 504 } 505 506 if (rdev->mc_fw != NULL) { 507 firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD); 508 rdev->mc_fw = NULL; 509 } 510 511 if (rdev->ce_fw != NULL) { 512 firmware_put(rdev->ce_fw, FIRMWARE_UNLOAD); 513 rdev->ce_fw = NULL; 514 } 515 } 516 517 /* watermark setup */ 518 static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, 519 struct radeon_crtc *radeon_crtc, 520 struct drm_display_mode *mode, 521 struct drm_display_mode *other_mode) 522 { 523 u32 tmp; 524 /* 525 * Line Buffer Setup 526 * There are 3 line buffers, each one shared by 2 display controllers. 527 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 528 * the display controllers. The paritioning is done via one of four 529 * preset allocations specified in bits 21:20: 530 * 0 - half lb 531 * 2 - whole lb, other crtc must be disabled 532 */ 533 /* this can get tricky if we have two large displays on a paired group 534 * of crtcs. Ideally for multiple large displays we'd assign them to 535 * non-linked crtcs for maximum line buffer allocation. 536 */ 537 if (radeon_crtc->base.enabled && mode) { 538 if (other_mode) 539 tmp = 0; /* 1/2 */ 540 else 541 tmp = 2; /* whole */ 542 } else 543 tmp = 0; 544 545 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, 546 DC_LB_MEMORY_CONFIG(tmp)); 547 548 if (radeon_crtc->base.enabled && mode) { 549 switch (tmp) { 550 case 0: 551 default: 552 return 4096 * 2; 553 case 2: 554 return 8192 * 2; 555 } 556 } 557 558 /* controller not enabled, so no lb used */ 559 return 0; 560 } 561 562 static u32 si_get_number_of_dram_channels(struct radeon_device *rdev) 563 { 564 u32 tmp = RREG32(MC_SHARED_CHMAP); 565 566 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 567 case 0: 568 default: 569 return 1; 570 case 1: 571 return 2; 572 case 2: 573 return 4; 574 case 3: 575 return 8; 576 case 4: 577 return 3; 578 case 5: 579 return 6; 580 case 6: 581 return 10; 582 case 7: 583 return 12; 584 case 8: 585 return 16; 586 } 587 } 588 589 struct dce6_wm_params { 590 u32 dram_channels; /* number of dram channels */ 591 u32 yclk; /* bandwidth per dram data pin in kHz */ 592 u32 sclk; /* engine clock in kHz */ 593 u32 disp_clk; /* display clock in kHz */ 594 u32 src_width; /* viewport width */ 595 u32 active_time; /* active display time in ns */ 596 u32 blank_time; /* blank time in ns */ 597 bool interlaced; /* mode is interlaced */ 598 fixed20_12 vsc; /* vertical scale ratio */ 599 u32 num_heads; /* number of active crtcs */ 600 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 601 u32 lb_size; /* line buffer allocated to pipe */ 602 u32 vtaps; /* vertical scaler taps */ 603 }; 604 605 static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm) 606 { 607 /* Calculate raw DRAM Bandwidth */ 608 fixed20_12 dram_efficiency; /* 0.7 */ 609 fixed20_12 yclk, dram_channels, bandwidth; 610 fixed20_12 a; 611 612 a.full = dfixed_const(1000); 613 yclk.full = dfixed_const(wm->yclk); 614 yclk.full = dfixed_div(yclk, a); 615 dram_channels.full = dfixed_const(wm->dram_channels * 4); 616 a.full = dfixed_const(10); 617 dram_efficiency.full = dfixed_const(7); 618 dram_efficiency.full = dfixed_div(dram_efficiency, a); 619 bandwidth.full = dfixed_mul(dram_channels, yclk); 620 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 621 622 return dfixed_trunc(bandwidth); 623 } 624 625 static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm) 626 { 627 /* Calculate DRAM Bandwidth and the part allocated to display. */ 628 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 629 fixed20_12 yclk, dram_channels, bandwidth; 630 fixed20_12 a; 631 632 a.full = dfixed_const(1000); 633 yclk.full = dfixed_const(wm->yclk); 634 yclk.full = dfixed_div(yclk, a); 635 dram_channels.full = dfixed_const(wm->dram_channels * 4); 636 a.full = dfixed_const(10); 637 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 638 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 639 bandwidth.full = dfixed_mul(dram_channels, yclk); 640 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 641 642 return dfixed_trunc(bandwidth); 643 } 644 645 static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm) 646 { 647 /* Calculate the display Data return Bandwidth */ 648 fixed20_12 return_efficiency; /* 0.8 */ 649 fixed20_12 sclk, bandwidth; 650 fixed20_12 a; 651 652 a.full = dfixed_const(1000); 653 sclk.full = dfixed_const(wm->sclk); 654 sclk.full = dfixed_div(sclk, a); 655 a.full = dfixed_const(10); 656 return_efficiency.full = dfixed_const(8); 657 return_efficiency.full = dfixed_div(return_efficiency, a); 658 a.full = dfixed_const(32); 659 bandwidth.full = dfixed_mul(a, sclk); 660 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 661 662 return dfixed_trunc(bandwidth); 663 } 664 665 static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm) 666 { 667 return 32; 668 } 669 670 static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm) 671 { 672 /* Calculate the DMIF Request Bandwidth */ 673 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 674 fixed20_12 disp_clk, sclk, bandwidth; 675 fixed20_12 a, b1, b2; 676 u32 min_bandwidth; 677 678 a.full = dfixed_const(1000); 679 disp_clk.full = dfixed_const(wm->disp_clk); 680 disp_clk.full = dfixed_div(disp_clk, a); 681 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2); 682 b1.full = dfixed_mul(a, disp_clk); 683 684 a.full = dfixed_const(1000); 685 sclk.full = dfixed_const(wm->sclk); 686 sclk.full = dfixed_div(sclk, a); 687 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm)); 688 b2.full = dfixed_mul(a, sclk); 689 690 a.full = dfixed_const(10); 691 disp_clk_request_efficiency.full = dfixed_const(8); 692 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 693 694 min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2)); 695 696 a.full = dfixed_const(min_bandwidth); 697 bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency); 698 699 return dfixed_trunc(bandwidth); 700 } 701 702 static u32 dce6_available_bandwidth(struct dce6_wm_params *wm) 703 { 704 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 705 u32 dram_bandwidth = dce6_dram_bandwidth(wm); 706 u32 data_return_bandwidth = dce6_data_return_bandwidth(wm); 707 u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm); 708 709 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 710 } 711 712 static u32 dce6_average_bandwidth(struct dce6_wm_params *wm) 713 { 714 /* Calculate the display mode Average Bandwidth 715 * DisplayMode should contain the source and destination dimensions, 716 * timing, etc. 717 */ 718 fixed20_12 bpp; 719 fixed20_12 line_time; 720 fixed20_12 src_width; 721 fixed20_12 bandwidth; 722 fixed20_12 a; 723 724 a.full = dfixed_const(1000); 725 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 726 line_time.full = dfixed_div(line_time, a); 727 bpp.full = dfixed_const(wm->bytes_per_pixel); 728 src_width.full = dfixed_const(wm->src_width); 729 bandwidth.full = dfixed_mul(src_width, bpp); 730 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 731 bandwidth.full = dfixed_div(bandwidth, line_time); 732 733 return dfixed_trunc(bandwidth); 734 } 735 736 static u32 dce6_latency_watermark(struct dce6_wm_params *wm) 737 { 738 /* First calcualte the latency in ns */ 739 u32 mc_latency = 2000; /* 2000 ns. */ 740 u32 available_bandwidth = dce6_available_bandwidth(wm); 741 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 742 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 743 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 744 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 745 (wm->num_heads * cursor_line_pair_return_time); 746 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 747 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 748 u32 tmp, dmif_size = 12288; 749 fixed20_12 a, b, c; 750 751 if (wm->num_heads == 0) 752 return 0; 753 754 a.full = dfixed_const(2); 755 b.full = dfixed_const(1); 756 if ((wm->vsc.full > a.full) || 757 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 758 (wm->vtaps >= 5) || 759 ((wm->vsc.full >= a.full) && wm->interlaced)) 760 max_src_lines_per_dst_line = 4; 761 else 762 max_src_lines_per_dst_line = 2; 763 764 a.full = dfixed_const(available_bandwidth); 765 b.full = dfixed_const(wm->num_heads); 766 a.full = dfixed_div(a, b); 767 768 b.full = dfixed_const(mc_latency + 512); 769 c.full = dfixed_const(wm->disp_clk); 770 b.full = dfixed_div(b, c); 771 772 c.full = dfixed_const(dmif_size); 773 b.full = dfixed_div(c, b); 774 775 tmp = min(dfixed_trunc(a), dfixed_trunc(b)); 776 777 b.full = dfixed_const(1000); 778 c.full = dfixed_const(wm->disp_clk); 779 b.full = dfixed_div(c, b); 780 c.full = dfixed_const(wm->bytes_per_pixel); 781 b.full = dfixed_mul(b, c); 782 783 lb_fill_bw = min(tmp, dfixed_trunc(b)); 784 785 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 786 b.full = dfixed_const(1000); 787 c.full = dfixed_const(lb_fill_bw); 788 b.full = dfixed_div(c, b); 789 a.full = dfixed_div(a, b); 790 line_fill_time = dfixed_trunc(a); 791 792 if (line_fill_time < wm->active_time) 793 return latency; 794 else 795 return latency + (line_fill_time - wm->active_time); 796 797 } 798 799 static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) 800 { 801 if (dce6_average_bandwidth(wm) <= 802 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) 803 return true; 804 else 805 return false; 806 }; 807 808 static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) 809 { 810 if (dce6_average_bandwidth(wm) <= 811 (dce6_available_bandwidth(wm) / wm->num_heads)) 812 return true; 813 else 814 return false; 815 }; 816 817 static bool dce6_check_latency_hiding(struct dce6_wm_params *wm) 818 { 819 u32 lb_partitions = wm->lb_size / wm->src_width; 820 u32 line_time = wm->active_time + wm->blank_time; 821 u32 latency_tolerant_lines; 822 u32 latency_hiding; 823 fixed20_12 a; 824 825 a.full = dfixed_const(1); 826 if (wm->vsc.full > a.full) 827 latency_tolerant_lines = 1; 828 else { 829 if (lb_partitions <= (wm->vtaps + 1)) 830 latency_tolerant_lines = 1; 831 else 832 latency_tolerant_lines = 2; 833 } 834 835 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 836 837 if (dce6_latency_watermark(wm) <= latency_hiding) 838 return true; 839 else 840 return false; 841 } 842 843 static void dce6_program_watermarks(struct radeon_device *rdev, 844 struct radeon_crtc *radeon_crtc, 845 u32 lb_size, u32 num_heads) 846 { 847 struct drm_display_mode *mode = &radeon_crtc->base.mode; 848 struct dce6_wm_params wm; 849 u32 pixel_period; 850 u32 line_time = 0; 851 u32 latency_watermark_a = 0, latency_watermark_b = 0; 852 u32 priority_a_mark = 0, priority_b_mark = 0; 853 u32 priority_a_cnt = PRIORITY_OFF; 854 u32 priority_b_cnt = PRIORITY_OFF; 855 u32 tmp, arb_control3; 856 fixed20_12 a, b, c; 857 858 if (radeon_crtc->base.enabled && num_heads && mode) { 859 pixel_period = 1000000 / (u32)mode->clock; 860 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 861 priority_a_cnt = 0; 862 priority_b_cnt = 0; 863 864 wm.yclk = rdev->pm.current_mclk * 10; 865 wm.sclk = rdev->pm.current_sclk * 10; 866 wm.disp_clk = mode->clock; 867 wm.src_width = mode->crtc_hdisplay; 868 wm.active_time = mode->crtc_hdisplay * pixel_period; 869 wm.blank_time = line_time - wm.active_time; 870 wm.interlaced = false; 871 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 872 wm.interlaced = true; 873 wm.vsc = radeon_crtc->vsc; 874 wm.vtaps = 1; 875 if (radeon_crtc->rmx_type != RMX_OFF) 876 wm.vtaps = 2; 877 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 878 wm.lb_size = lb_size; 879 if (rdev->family == CHIP_ARUBA) 880 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 881 else 882 wm.dram_channels = si_get_number_of_dram_channels(rdev); 883 wm.num_heads = num_heads; 884 885 /* set for high clocks */ 886 latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); 887 /* set for low clocks */ 888 /* wm.yclk = low clk; wm.sclk = low clk */ 889 latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535); 890 891 /* possibly force display priority to high */ 892 /* should really do this at mode validation time... */ 893 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 894 !dce6_average_bandwidth_vs_available_bandwidth(&wm) || 895 !dce6_check_latency_hiding(&wm) || 896 (rdev->disp_priority == 2)) { 897 DRM_DEBUG_KMS("force priority to high\n"); 898 priority_a_cnt |= PRIORITY_ALWAYS_ON; 899 priority_b_cnt |= PRIORITY_ALWAYS_ON; 900 } 901 902 a.full = dfixed_const(1000); 903 b.full = dfixed_const(mode->clock); 904 b.full = dfixed_div(b, a); 905 c.full = dfixed_const(latency_watermark_a); 906 c.full = dfixed_mul(c, b); 907 c.full = dfixed_mul(c, radeon_crtc->hsc); 908 c.full = dfixed_div(c, a); 909 a.full = dfixed_const(16); 910 c.full = dfixed_div(c, a); 911 priority_a_mark = dfixed_trunc(c); 912 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 913 914 a.full = dfixed_const(1000); 915 b.full = dfixed_const(mode->clock); 916 b.full = dfixed_div(b, a); 917 c.full = dfixed_const(latency_watermark_b); 918 c.full = dfixed_mul(c, b); 919 c.full = dfixed_mul(c, radeon_crtc->hsc); 920 c.full = dfixed_div(c, a); 921 a.full = dfixed_const(16); 922 c.full = dfixed_div(c, a); 923 priority_b_mark = dfixed_trunc(c); 924 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 925 } 926 927 /* select wm A */ 928 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); 929 tmp = arb_control3; 930 tmp &= ~LATENCY_WATERMARK_MASK(3); 931 tmp |= LATENCY_WATERMARK_MASK(1); 932 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); 933 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, 934 (LATENCY_LOW_WATERMARK(latency_watermark_a) | 935 LATENCY_HIGH_WATERMARK(line_time))); 936 /* select wm B */ 937 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); 938 tmp &= ~LATENCY_WATERMARK_MASK(3); 939 tmp |= LATENCY_WATERMARK_MASK(2); 940 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); 941 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, 942 (LATENCY_LOW_WATERMARK(latency_watermark_b) | 943 LATENCY_HIGH_WATERMARK(line_time))); 944 /* restore original selection */ 945 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); 946 947 /* write the priority marks */ 948 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 949 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 950 951 } 952 953 void dce6_bandwidth_update(struct radeon_device *rdev) 954 { 955 struct drm_display_mode *mode0 = NULL; 956 struct drm_display_mode *mode1 = NULL; 957 u32 num_heads = 0, lb_size; 958 int i; 959 960 radeon_update_display_priority(rdev); 961 962 for (i = 0; i < rdev->num_crtc; i++) { 963 if (rdev->mode_info.crtcs[i]->base.enabled) 964 num_heads++; 965 } 966 for (i = 0; i < rdev->num_crtc; i += 2) { 967 mode0 = &rdev->mode_info.crtcs[i]->base.mode; 968 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; 969 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); 970 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); 971 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); 972 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); 973 } 974 } 975 976 /* 977 * Core functions 978 */ 979 static void si_tiling_mode_table_init(struct radeon_device *rdev) 980 { 981 const u32 num_tile_mode_states = 32; 982 u32 reg_offset, gb_tile_moden, split_equal_to_row_size; 983 984 switch (rdev->config.si.mem_row_size_in_kb) { 985 case 1: 986 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; 987 break; 988 case 2: 989 default: 990 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; 991 break; 992 case 4: 993 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; 994 break; 995 } 996 997 if ((rdev->family == CHIP_TAHITI) || 998 (rdev->family == CHIP_PITCAIRN)) { 999 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1000 switch (reg_offset) { 1001 case 0: /* non-AA compressed depth or any compressed stencil */ 1002 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1003 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1004 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1005 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1006 NUM_BANKS(ADDR_SURF_16_BANK) | 1007 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1008 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1009 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1010 break; 1011 case 1: /* 2xAA/4xAA compressed depth only */ 1012 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1013 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1014 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1015 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1016 NUM_BANKS(ADDR_SURF_16_BANK) | 1017 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1018 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1019 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1020 break; 1021 case 2: /* 8xAA compressed depth only */ 1022 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1023 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1024 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1025 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1026 NUM_BANKS(ADDR_SURF_16_BANK) | 1027 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1028 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1029 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1030 break; 1031 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ 1032 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1033 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1034 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1035 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1036 NUM_BANKS(ADDR_SURF_16_BANK) | 1037 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1038 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1039 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1040 break; 1041 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ 1042 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1043 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1044 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1045 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1046 NUM_BANKS(ADDR_SURF_16_BANK) | 1047 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1048 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1049 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1050 break; 1051 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ 1052 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1053 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1054 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1055 TILE_SPLIT(split_equal_to_row_size) | 1056 NUM_BANKS(ADDR_SURF_16_BANK) | 1057 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1058 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1059 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1060 break; 1061 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ 1062 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1063 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1064 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1065 TILE_SPLIT(split_equal_to_row_size) | 1066 NUM_BANKS(ADDR_SURF_16_BANK) | 1067 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1068 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1069 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1070 break; 1071 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ 1072 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1073 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1074 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1075 TILE_SPLIT(split_equal_to_row_size) | 1076 NUM_BANKS(ADDR_SURF_16_BANK) | 1077 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1078 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1079 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1080 break; 1081 case 8: /* 1D and 1D Array Surfaces */ 1082 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1083 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1084 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1085 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1086 NUM_BANKS(ADDR_SURF_16_BANK) | 1087 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1088 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1089 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1090 break; 1091 case 9: /* Displayable maps. */ 1092 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1093 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1094 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1095 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1096 NUM_BANKS(ADDR_SURF_16_BANK) | 1097 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1098 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1099 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1100 break; 1101 case 10: /* Display 8bpp. */ 1102 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1103 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1104 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1105 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1106 NUM_BANKS(ADDR_SURF_16_BANK) | 1107 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1108 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1109 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1110 break; 1111 case 11: /* Display 16bpp. */ 1112 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1113 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1114 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1115 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1116 NUM_BANKS(ADDR_SURF_16_BANK) | 1117 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1118 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1119 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1120 break; 1121 case 12: /* Display 32bpp. */ 1122 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1123 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1124 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1125 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1126 NUM_BANKS(ADDR_SURF_16_BANK) | 1127 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1128 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1129 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1130 break; 1131 case 13: /* Thin. */ 1132 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1133 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1134 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1135 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1136 NUM_BANKS(ADDR_SURF_16_BANK) | 1137 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1138 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1139 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1140 break; 1141 case 14: /* Thin 8 bpp. */ 1142 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1143 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1144 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1145 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1146 NUM_BANKS(ADDR_SURF_16_BANK) | 1147 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1148 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1149 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1150 break; 1151 case 15: /* Thin 16 bpp. */ 1152 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1153 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1154 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1155 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1156 NUM_BANKS(ADDR_SURF_16_BANK) | 1157 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1158 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1159 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1160 break; 1161 case 16: /* Thin 32 bpp. */ 1162 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1163 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1164 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1165 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1166 NUM_BANKS(ADDR_SURF_16_BANK) | 1167 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1170 break; 1171 case 17: /* Thin 64 bpp. */ 1172 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1173 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1174 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1175 TILE_SPLIT(split_equal_to_row_size) | 1176 NUM_BANKS(ADDR_SURF_16_BANK) | 1177 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1178 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1179 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1180 break; 1181 case 21: /* 8 bpp PRT. */ 1182 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1183 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1184 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1185 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1186 NUM_BANKS(ADDR_SURF_16_BANK) | 1187 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1188 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1189 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1190 break; 1191 case 22: /* 16 bpp PRT */ 1192 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1193 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1194 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1195 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1196 NUM_BANKS(ADDR_SURF_16_BANK) | 1197 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1200 break; 1201 case 23: /* 32 bpp PRT */ 1202 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1203 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1204 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1205 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1206 NUM_BANKS(ADDR_SURF_16_BANK) | 1207 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1208 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1209 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1210 break; 1211 case 24: /* 64 bpp PRT */ 1212 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1213 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1214 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1215 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1216 NUM_BANKS(ADDR_SURF_16_BANK) | 1217 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1218 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1219 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1220 break; 1221 case 25: /* 128 bpp PRT */ 1222 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1223 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1224 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1225 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | 1226 NUM_BANKS(ADDR_SURF_8_BANK) | 1227 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1228 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1229 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1230 break; 1231 default: 1232 gb_tile_moden = 0; 1233 break; 1234 } 1235 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 1236 } 1237 } else if ((rdev->family == CHIP_VERDE) || 1238 (rdev->family == CHIP_OLAND)) { 1239 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1240 switch (reg_offset) { 1241 case 0: /* non-AA compressed depth or any compressed stencil */ 1242 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1243 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1244 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1245 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1246 NUM_BANKS(ADDR_SURF_16_BANK) | 1247 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1248 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1249 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1250 break; 1251 case 1: /* 2xAA/4xAA compressed depth only */ 1252 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1253 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1254 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1255 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1256 NUM_BANKS(ADDR_SURF_16_BANK) | 1257 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1258 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1259 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1260 break; 1261 case 2: /* 8xAA compressed depth only */ 1262 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1263 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1264 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1265 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1266 NUM_BANKS(ADDR_SURF_16_BANK) | 1267 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1268 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1269 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1270 break; 1271 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ 1272 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1273 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1274 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1275 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1276 NUM_BANKS(ADDR_SURF_16_BANK) | 1277 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1278 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1279 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1280 break; 1281 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ 1282 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1283 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1284 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1285 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1286 NUM_BANKS(ADDR_SURF_16_BANK) | 1287 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1288 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1289 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1290 break; 1291 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ 1292 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1293 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1294 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1295 TILE_SPLIT(split_equal_to_row_size) | 1296 NUM_BANKS(ADDR_SURF_16_BANK) | 1297 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1298 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1299 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1300 break; 1301 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ 1302 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1303 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1304 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1305 TILE_SPLIT(split_equal_to_row_size) | 1306 NUM_BANKS(ADDR_SURF_16_BANK) | 1307 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1308 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1309 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1310 break; 1311 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ 1312 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1313 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 1314 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1315 TILE_SPLIT(split_equal_to_row_size) | 1316 NUM_BANKS(ADDR_SURF_16_BANK) | 1317 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1318 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1319 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1320 break; 1321 case 8: /* 1D and 1D Array Surfaces */ 1322 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1323 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1324 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1325 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1326 NUM_BANKS(ADDR_SURF_16_BANK) | 1327 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1330 break; 1331 case 9: /* Displayable maps. */ 1332 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1333 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1334 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1335 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1336 NUM_BANKS(ADDR_SURF_16_BANK) | 1337 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1338 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1339 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1340 break; 1341 case 10: /* Display 8bpp. */ 1342 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1343 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1344 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1345 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1346 NUM_BANKS(ADDR_SURF_16_BANK) | 1347 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1350 break; 1351 case 11: /* Display 16bpp. */ 1352 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1353 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1354 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1355 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1356 NUM_BANKS(ADDR_SURF_16_BANK) | 1357 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1358 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1359 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1360 break; 1361 case 12: /* Display 32bpp. */ 1362 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1363 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | 1364 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1365 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1366 NUM_BANKS(ADDR_SURF_16_BANK) | 1367 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1370 break; 1371 case 13: /* Thin. */ 1372 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1373 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1374 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1375 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1376 NUM_BANKS(ADDR_SURF_16_BANK) | 1377 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1380 break; 1381 case 14: /* Thin 8 bpp. */ 1382 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1383 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1384 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1385 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1386 NUM_BANKS(ADDR_SURF_16_BANK) | 1387 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1388 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1389 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1390 break; 1391 case 15: /* Thin 16 bpp. */ 1392 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1393 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1394 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1395 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1396 NUM_BANKS(ADDR_SURF_16_BANK) | 1397 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1398 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1399 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1400 break; 1401 case 16: /* Thin 32 bpp. */ 1402 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1403 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1404 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1405 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1406 NUM_BANKS(ADDR_SURF_16_BANK) | 1407 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1408 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1409 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1410 break; 1411 case 17: /* Thin 64 bpp. */ 1412 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1413 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1414 PIPE_CONFIG(ADDR_SURF_P4_8x16) | 1415 TILE_SPLIT(split_equal_to_row_size) | 1416 NUM_BANKS(ADDR_SURF_16_BANK) | 1417 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1418 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1419 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1420 break; 1421 case 21: /* 8 bpp PRT. */ 1422 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1423 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1424 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1425 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1426 NUM_BANKS(ADDR_SURF_16_BANK) | 1427 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1428 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1429 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1430 break; 1431 case 22: /* 16 bpp PRT */ 1432 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1433 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1434 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1435 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1436 NUM_BANKS(ADDR_SURF_16_BANK) | 1437 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1438 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1439 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); 1440 break; 1441 case 23: /* 32 bpp PRT */ 1442 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1443 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1444 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1445 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1446 NUM_BANKS(ADDR_SURF_16_BANK) | 1447 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1448 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1449 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1450 break; 1451 case 24: /* 64 bpp PRT */ 1452 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1453 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1454 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1455 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1456 NUM_BANKS(ADDR_SURF_16_BANK) | 1457 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1458 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1459 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); 1460 break; 1461 case 25: /* 128 bpp PRT */ 1462 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1463 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | 1464 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | 1465 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | 1466 NUM_BANKS(ADDR_SURF_8_BANK) | 1467 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1468 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1469 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); 1470 break; 1471 default: 1472 gb_tile_moden = 0; 1473 break; 1474 } 1475 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 1476 } 1477 } else 1478 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); 1479 } 1480 1481 static void si_select_se_sh(struct radeon_device *rdev, 1482 u32 se_num, u32 sh_num) 1483 { 1484 u32 data = INSTANCE_BROADCAST_WRITES; 1485 1486 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 1487 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; 1488 else if (se_num == 0xffffffff) 1489 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); 1490 else if (sh_num == 0xffffffff) 1491 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); 1492 else 1493 data |= SH_INDEX(sh_num) | SE_INDEX(se_num); 1494 WREG32(GRBM_GFX_INDEX, data); 1495 } 1496 1497 static u32 si_create_bitmask(u32 bit_width) 1498 { 1499 u32 i, mask = 0; 1500 1501 for (i = 0; i < bit_width; i++) { 1502 mask <<= 1; 1503 mask |= 1; 1504 } 1505 return mask; 1506 } 1507 1508 static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) 1509 { 1510 u32 data, mask; 1511 1512 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 1513 if (data & 1) 1514 data &= INACTIVE_CUS_MASK; 1515 else 1516 data = 0; 1517 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); 1518 1519 data >>= INACTIVE_CUS_SHIFT; 1520 1521 mask = si_create_bitmask(cu_per_sh); 1522 1523 return ~data & mask; 1524 } 1525 1526 static void si_setup_spi(struct radeon_device *rdev, 1527 u32 se_num, u32 sh_per_se, 1528 u32 cu_per_sh) 1529 { 1530 int i, j, k; 1531 u32 data, mask, active_cu; 1532 1533 for (i = 0; i < se_num; i++) { 1534 for (j = 0; j < sh_per_se; j++) { 1535 si_select_se_sh(rdev, i, j); 1536 data = RREG32(SPI_STATIC_THREAD_MGMT_3); 1537 active_cu = si_get_cu_enabled(rdev, cu_per_sh); 1538 1539 mask = 1; 1540 for (k = 0; k < 16; k++) { 1541 mask <<= k; 1542 if (active_cu & mask) { 1543 data &= ~mask; 1544 WREG32(SPI_STATIC_THREAD_MGMT_3, data); 1545 break; 1546 } 1547 } 1548 } 1549 } 1550 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1551 } 1552 1553 static u32 si_get_rb_disabled(struct radeon_device *rdev, 1554 u32 max_rb_num, u32 se_num, 1555 u32 sh_per_se) 1556 { 1557 u32 data, mask; 1558 1559 data = RREG32(CC_RB_BACKEND_DISABLE); 1560 if (data & 1) 1561 data &= BACKEND_DISABLE_MASK; 1562 else 1563 data = 0; 1564 data |= RREG32(GC_USER_RB_BACKEND_DISABLE); 1565 1566 data >>= BACKEND_DISABLE_SHIFT; 1567 1568 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 1569 1570 return data & mask; 1571 } 1572 1573 static void si_setup_rb(struct radeon_device *rdev, 1574 u32 se_num, u32 sh_per_se, 1575 u32 max_rb_num) 1576 { 1577 int i, j; 1578 u32 data, mask; 1579 u32 disabled_rbs = 0; 1580 u32 enabled_rbs = 0; 1581 1582 for (i = 0; i < se_num; i++) { 1583 for (j = 0; j < sh_per_se; j++) { 1584 si_select_se_sh(rdev, i, j); 1585 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 1586 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 1587 } 1588 } 1589 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1590 1591 mask = 1; 1592 for (i = 0; i < max_rb_num; i++) { 1593 if (!(disabled_rbs & mask)) 1594 enabled_rbs |= mask; 1595 mask <<= 1; 1596 } 1597 1598 for (i = 0; i < se_num; i++) { 1599 si_select_se_sh(rdev, i, 0xffffffff); 1600 data = 0; 1601 for (j = 0; j < sh_per_se; j++) { 1602 switch (enabled_rbs & 3) { 1603 case 1: 1604 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); 1605 break; 1606 case 2: 1607 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); 1608 break; 1609 case 3: 1610 default: 1611 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); 1612 break; 1613 } 1614 enabled_rbs >>= 2; 1615 } 1616 WREG32(PA_SC_RASTER_CONFIG, data); 1617 } 1618 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1619 } 1620 1621 static void si_gpu_init(struct radeon_device *rdev) 1622 { 1623 u32 gb_addr_config = 0; 1624 u32 mc_shared_chmap, mc_arb_ramcfg; 1625 u32 sx_debug_1; 1626 u32 hdp_host_path_cntl; 1627 u32 tmp; 1628 int i, j; 1629 1630 switch (rdev->family) { 1631 case CHIP_TAHITI: 1632 rdev->config.si.max_shader_engines = 2; 1633 rdev->config.si.max_tile_pipes = 12; 1634 rdev->config.si.max_cu_per_sh = 8; 1635 rdev->config.si.max_sh_per_se = 2; 1636 rdev->config.si.max_backends_per_se = 4; 1637 rdev->config.si.max_texture_channel_caches = 12; 1638 rdev->config.si.max_gprs = 256; 1639 rdev->config.si.max_gs_threads = 32; 1640 rdev->config.si.max_hw_contexts = 8; 1641 1642 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; 1643 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1644 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1645 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1646 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; 1647 break; 1648 case CHIP_PITCAIRN: 1649 rdev->config.si.max_shader_engines = 2; 1650 rdev->config.si.max_tile_pipes = 8; 1651 rdev->config.si.max_cu_per_sh = 5; 1652 rdev->config.si.max_sh_per_se = 2; 1653 rdev->config.si.max_backends_per_se = 4; 1654 rdev->config.si.max_texture_channel_caches = 8; 1655 rdev->config.si.max_gprs = 256; 1656 rdev->config.si.max_gs_threads = 32; 1657 rdev->config.si.max_hw_contexts = 8; 1658 1659 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; 1660 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1661 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1662 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1663 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; 1664 break; 1665 case CHIP_VERDE: 1666 default: 1667 rdev->config.si.max_shader_engines = 1; 1668 rdev->config.si.max_tile_pipes = 4; 1669 rdev->config.si.max_cu_per_sh = 2; 1670 rdev->config.si.max_sh_per_se = 2; 1671 rdev->config.si.max_backends_per_se = 4; 1672 rdev->config.si.max_texture_channel_caches = 4; 1673 rdev->config.si.max_gprs = 256; 1674 rdev->config.si.max_gs_threads = 32; 1675 rdev->config.si.max_hw_contexts = 8; 1676 1677 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; 1678 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1679 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1680 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1681 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; 1682 break; 1683 case CHIP_OLAND: 1684 rdev->config.si.max_shader_engines = 1; 1685 rdev->config.si.max_tile_pipes = 4; 1686 rdev->config.si.max_cu_per_sh = 6; 1687 rdev->config.si.max_sh_per_se = 1; 1688 rdev->config.si.max_backends_per_se = 2; 1689 rdev->config.si.max_texture_channel_caches = 4; 1690 rdev->config.si.max_gprs = 256; 1691 rdev->config.si.max_gs_threads = 16; 1692 rdev->config.si.max_hw_contexts = 8; 1693 1694 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; 1695 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1696 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1697 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1698 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; 1699 break; 1700 } 1701 1702 /* Initialize HDP */ 1703 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1704 WREG32((0x2c14 + j), 0x00000000); 1705 WREG32((0x2c18 + j), 0x00000000); 1706 WREG32((0x2c1c + j), 0x00000000); 1707 WREG32((0x2c20 + j), 0x00000000); 1708 WREG32((0x2c24 + j), 0x00000000); 1709 } 1710 1711 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1712 1713 evergreen_fix_pci_max_read_req_size(rdev); 1714 1715 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 1716 1717 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1718 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1719 1720 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; 1721 rdev->config.si.mem_max_burst_length_bytes = 256; 1722 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1723 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1724 if (rdev->config.si.mem_row_size_in_kb > 4) 1725 rdev->config.si.mem_row_size_in_kb = 4; 1726 /* XXX use MC settings? */ 1727 rdev->config.si.shader_engine_tile_size = 32; 1728 rdev->config.si.num_gpus = 1; 1729 rdev->config.si.multi_gpu_tile_size = 64; 1730 1731 /* fix up row size */ 1732 gb_addr_config &= ~ROW_SIZE_MASK; 1733 switch (rdev->config.si.mem_row_size_in_kb) { 1734 case 1: 1735 default: 1736 gb_addr_config |= ROW_SIZE(0); 1737 break; 1738 case 2: 1739 gb_addr_config |= ROW_SIZE(1); 1740 break; 1741 case 4: 1742 gb_addr_config |= ROW_SIZE(2); 1743 break; 1744 } 1745 1746 /* setup tiling info dword. gb_addr_config is not adequate since it does 1747 * not have bank info, so create a custom tiling dword. 1748 * bits 3:0 num_pipes 1749 * bits 7:4 num_banks 1750 * bits 11:8 group_size 1751 * bits 15:12 row_size 1752 */ 1753 rdev->config.si.tile_config = 0; 1754 switch (rdev->config.si.num_tile_pipes) { 1755 case 1: 1756 rdev->config.si.tile_config |= (0 << 0); 1757 break; 1758 case 2: 1759 rdev->config.si.tile_config |= (1 << 0); 1760 break; 1761 case 4: 1762 rdev->config.si.tile_config |= (2 << 0); 1763 break; 1764 case 8: 1765 default: 1766 /* XXX what about 12? */ 1767 rdev->config.si.tile_config |= (3 << 0); 1768 break; 1769 } 1770 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 1771 case 0: /* four banks */ 1772 rdev->config.si.tile_config |= 0 << 4; 1773 break; 1774 case 1: /* eight banks */ 1775 rdev->config.si.tile_config |= 1 << 4; 1776 break; 1777 case 2: /* sixteen banks */ 1778 default: 1779 rdev->config.si.tile_config |= 2 << 4; 1780 break; 1781 } 1782 rdev->config.si.tile_config |= 1783 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1784 rdev->config.si.tile_config |= 1785 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1786 1787 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1788 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1789 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1790 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 1791 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 1792 1793 si_tiling_mode_table_init(rdev); 1794 1795 si_setup_rb(rdev, rdev->config.si.max_shader_engines, 1796 rdev->config.si.max_sh_per_se, 1797 rdev->config.si.max_backends_per_se); 1798 1799 si_setup_spi(rdev, rdev->config.si.max_shader_engines, 1800 rdev->config.si.max_sh_per_se, 1801 rdev->config.si.max_cu_per_sh); 1802 1803 1804 /* set HW defaults for 3D engine */ 1805 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 1806 ROQ_IB2_START(0x2b))); 1807 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 1808 1809 sx_debug_1 = RREG32(SX_DEBUG_1); 1810 WREG32(SX_DEBUG_1, sx_debug_1); 1811 1812 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 1813 1814 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) | 1815 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) | 1816 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) | 1817 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size))); 1818 1819 WREG32(VGT_NUM_INSTANCES, 1); 1820 1821 WREG32(CP_PERFMON_CNTL, 0); 1822 1823 WREG32(SQ_CONFIG, 0); 1824 1825 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 1826 FORCE_EOV_MAX_REZ_CNT(255))); 1827 1828 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 1829 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 1830 1831 WREG32(VGT_GS_VERTEX_REUSE, 16); 1832 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 1833 1834 WREG32(CB_PERFCOUNTER0_SELECT0, 0); 1835 WREG32(CB_PERFCOUNTER0_SELECT1, 0); 1836 WREG32(CB_PERFCOUNTER1_SELECT0, 0); 1837 WREG32(CB_PERFCOUNTER1_SELECT1, 0); 1838 WREG32(CB_PERFCOUNTER2_SELECT0, 0); 1839 WREG32(CB_PERFCOUNTER2_SELECT1, 0); 1840 WREG32(CB_PERFCOUNTER3_SELECT0, 0); 1841 WREG32(CB_PERFCOUNTER3_SELECT1, 0); 1842 1843 tmp = RREG32(HDP_MISC_CNTL); 1844 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 1845 WREG32(HDP_MISC_CNTL, tmp); 1846 1847 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 1848 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 1849 1850 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 1851 1852 DRM_UDELAY(50); 1853 } 1854 1855 /* 1856 * GPU scratch registers helpers function. 1857 */ 1858 static void si_scratch_init(struct radeon_device *rdev) 1859 { 1860 int i; 1861 1862 rdev->scratch.num_reg = 7; 1863 rdev->scratch.reg_base = SCRATCH_REG0; 1864 for (i = 0; i < rdev->scratch.num_reg; i++) { 1865 rdev->scratch.free[i] = true; 1866 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 1867 } 1868 } 1869 1870 void si_fence_ring_emit(struct radeon_device *rdev, 1871 struct radeon_fence *fence) 1872 { 1873 struct radeon_ring *ring = &rdev->ring[fence->ring]; 1874 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 1875 1876 /* flush read cache over gart */ 1877 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1878 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1879 radeon_ring_write(ring, 0); 1880 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1881 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 1882 PACKET3_TC_ACTION_ENA | 1883 PACKET3_SH_KCACHE_ACTION_ENA | 1884 PACKET3_SH_ICACHE_ACTION_ENA); 1885 radeon_ring_write(ring, 0xFFFFFFFF); 1886 radeon_ring_write(ring, 0); 1887 radeon_ring_write(ring, 10); /* poll interval */ 1888 /* EVENT_WRITE_EOP - flush caches, send int */ 1889 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 1890 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); 1891 radeon_ring_write(ring, addr & 0xffffffff); 1892 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 1893 radeon_ring_write(ring, fence->seq); 1894 radeon_ring_write(ring, 0); 1895 } 1896 1897 /* 1898 * IB stuff 1899 */ 1900 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1901 { 1902 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1903 u32 header; 1904 1905 if (ib->is_const_ib) { 1906 /* set switch buffer packet before const IB */ 1907 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 1908 radeon_ring_write(ring, 0); 1909 1910 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 1911 } else { 1912 u32 next_rptr; 1913 if (ring->rptr_save_reg) { 1914 next_rptr = ring->wptr + 3 + 4 + 8; 1915 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1916 radeon_ring_write(ring, ((ring->rptr_save_reg - 1917 PACKET3_SET_CONFIG_REG_START) >> 2)); 1918 radeon_ring_write(ring, next_rptr); 1919 } else if (rdev->wb.enabled) { 1920 next_rptr = ring->wptr + 5 + 4 + 8; 1921 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 1922 radeon_ring_write(ring, (1 << 8)); 1923 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 1924 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 1925 radeon_ring_write(ring, next_rptr); 1926 } 1927 1928 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 1929 } 1930 1931 radeon_ring_write(ring, header); 1932 radeon_ring_write(ring, 1933 #ifdef __BIG_ENDIAN 1934 (2 << 0) | 1935 #endif 1936 (ib->gpu_addr & 0xFFFFFFFC)); 1937 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 1938 radeon_ring_write(ring, ib->length_dw | 1939 (ib->vm ? (ib->vm->id << 24) : 0)); 1940 1941 if (!ib->is_const_ib) { 1942 /* flush read cache over gart for this vmid */ 1943 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1944 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1945 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); 1946 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1947 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 1948 PACKET3_TC_ACTION_ENA | 1949 PACKET3_SH_KCACHE_ACTION_ENA | 1950 PACKET3_SH_ICACHE_ACTION_ENA); 1951 radeon_ring_write(ring, 0xFFFFFFFF); 1952 radeon_ring_write(ring, 0); 1953 radeon_ring_write(ring, 10); /* poll interval */ 1954 } 1955 } 1956 1957 /* 1958 * CP. 1959 */ 1960 static void si_cp_enable(struct radeon_device *rdev, bool enable) 1961 { 1962 if (enable) 1963 WREG32(CP_ME_CNTL, 0); 1964 else { 1965 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1966 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); 1967 WREG32(SCRATCH_UMSK, 0); 1968 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1969 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1970 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1971 } 1972 DRM_UDELAY(50); 1973 } 1974 1975 static int si_cp_load_microcode(struct radeon_device *rdev) 1976 { 1977 const __be32 *fw_data; 1978 int i; 1979 1980 if (!rdev->me_fw || !rdev->pfp_fw) 1981 return -EINVAL; 1982 1983 si_cp_enable(rdev, false); 1984 1985 /* PFP */ 1986 fw_data = (const __be32 *)rdev->pfp_fw->data; 1987 WREG32(CP_PFP_UCODE_ADDR, 0); 1988 for (i = 0; i < SI_PFP_UCODE_SIZE; i++) 1989 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1990 WREG32(CP_PFP_UCODE_ADDR, 0); 1991 1992 /* CE */ 1993 fw_data = (const __be32 *)rdev->ce_fw->data; 1994 WREG32(CP_CE_UCODE_ADDR, 0); 1995 for (i = 0; i < SI_CE_UCODE_SIZE; i++) 1996 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); 1997 WREG32(CP_CE_UCODE_ADDR, 0); 1998 1999 /* ME */ 2000 fw_data = (const __be32 *)rdev->me_fw->data; 2001 WREG32(CP_ME_RAM_WADDR, 0); 2002 for (i = 0; i < SI_PM4_UCODE_SIZE; i++) 2003 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 2004 WREG32(CP_ME_RAM_WADDR, 0); 2005 2006 WREG32(CP_PFP_UCODE_ADDR, 0); 2007 WREG32(CP_CE_UCODE_ADDR, 0); 2008 WREG32(CP_ME_RAM_WADDR, 0); 2009 WREG32(CP_ME_RAM_RADDR, 0); 2010 return 0; 2011 } 2012 2013 static int si_cp_start(struct radeon_device *rdev) 2014 { 2015 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2016 int r, i; 2017 2018 r = radeon_ring_lock(rdev, ring, 7 + 4); 2019 if (r) { 2020 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2021 return r; 2022 } 2023 /* init the CP */ 2024 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2025 radeon_ring_write(ring, 0x1); 2026 radeon_ring_write(ring, 0x0); 2027 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1); 2028 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2029 radeon_ring_write(ring, 0); 2030 radeon_ring_write(ring, 0); 2031 2032 /* init the CE partitions */ 2033 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2034 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2035 radeon_ring_write(ring, 0xc000); 2036 radeon_ring_write(ring, 0xe000); 2037 radeon_ring_unlock_commit(rdev, ring); 2038 2039 si_cp_enable(rdev, true); 2040 2041 r = radeon_ring_lock(rdev, ring, si_default_size + 10); 2042 if (r) { 2043 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2044 return r; 2045 } 2046 2047 /* setup clear context state */ 2048 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2049 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2050 2051 for (i = 0; i < si_default_size; i++) 2052 radeon_ring_write(ring, si_default_state[i]); 2053 2054 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2055 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2056 2057 /* set clear context state */ 2058 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2059 radeon_ring_write(ring, 0); 2060 2061 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 2062 radeon_ring_write(ring, 0x00000316); 2063 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2064 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 2065 2066 radeon_ring_unlock_commit(rdev, ring); 2067 2068 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { 2069 ring = &rdev->ring[i]; 2070 r = radeon_ring_lock(rdev, ring, 2); 2071 2072 /* clear the compute context state */ 2073 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); 2074 radeon_ring_write(ring, 0); 2075 2076 radeon_ring_unlock_commit(rdev, ring); 2077 } 2078 2079 return 0; 2080 } 2081 2082 static void si_cp_fini(struct radeon_device *rdev) 2083 { 2084 struct radeon_ring *ring; 2085 si_cp_enable(rdev, false); 2086 2087 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2088 radeon_ring_fini(rdev, ring); 2089 radeon_scratch_free(rdev, ring->rptr_save_reg); 2090 2091 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 2092 radeon_ring_fini(rdev, ring); 2093 radeon_scratch_free(rdev, ring->rptr_save_reg); 2094 2095 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 2096 radeon_ring_fini(rdev, ring); 2097 radeon_scratch_free(rdev, ring->rptr_save_reg); 2098 } 2099 2100 static int si_cp_resume(struct radeon_device *rdev) 2101 { 2102 struct radeon_ring *ring; 2103 u32 tmp; 2104 u32 rb_bufsz; 2105 int r; 2106 2107 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 2108 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 2109 SOFT_RESET_PA | 2110 SOFT_RESET_VGT | 2111 SOFT_RESET_SPI | 2112 SOFT_RESET_SX)); 2113 RREG32(GRBM_SOFT_RESET); 2114 DRM_MDELAY(15); 2115 WREG32(GRBM_SOFT_RESET, 0); 2116 RREG32(GRBM_SOFT_RESET); 2117 2118 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2119 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 2120 2121 /* Set the write pointer delay */ 2122 WREG32(CP_RB_WPTR_DELAY, 0); 2123 2124 WREG32(CP_DEBUG, 0); 2125 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2126 2127 /* ring 0 - compute and gfx */ 2128 /* Set ring buffer size */ 2129 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2130 rb_bufsz = drm_order(ring->ring_size / 8); 2131 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2132 #ifdef __BIG_ENDIAN 2133 tmp |= BUF_SWAP_32BIT; 2134 #endif 2135 WREG32(CP_RB0_CNTL, tmp); 2136 2137 /* Initialize the ring buffer's read and write pointers */ 2138 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 2139 ring->wptr = 0; 2140 WREG32(CP_RB0_WPTR, ring->wptr); 2141 2142 /* set the wb address whether it's enabled or not */ 2143 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2144 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2145 2146 if (rdev->wb.enabled) 2147 WREG32(SCRATCH_UMSK, 0xff); 2148 else { 2149 tmp |= RB_NO_UPDATE; 2150 WREG32(SCRATCH_UMSK, 0); 2151 } 2152 2153 DRM_MDELAY(1); 2154 WREG32(CP_RB0_CNTL, tmp); 2155 2156 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); 2157 2158 ring->rptr = RREG32(CP_RB0_RPTR); 2159 2160 /* ring1 - compute only */ 2161 /* Set ring buffer size */ 2162 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 2163 rb_bufsz = drm_order(ring->ring_size / 8); 2164 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2165 #ifdef __BIG_ENDIAN 2166 tmp |= BUF_SWAP_32BIT; 2167 #endif 2168 WREG32(CP_RB1_CNTL, tmp); 2169 2170 /* Initialize the ring buffer's read and write pointers */ 2171 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 2172 ring->wptr = 0; 2173 WREG32(CP_RB1_WPTR, ring->wptr); 2174 2175 /* set the wb address whether it's enabled or not */ 2176 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 2177 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 2178 2179 DRM_MDELAY(1); 2180 WREG32(CP_RB1_CNTL, tmp); 2181 2182 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); 2183 2184 ring->rptr = RREG32(CP_RB1_RPTR); 2185 2186 /* ring2 - compute only */ 2187 /* Set ring buffer size */ 2188 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 2189 rb_bufsz = drm_order(ring->ring_size / 8); 2190 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2191 #ifdef __BIG_ENDIAN 2192 tmp |= BUF_SWAP_32BIT; 2193 #endif 2194 WREG32(CP_RB2_CNTL, tmp); 2195 2196 /* Initialize the ring buffer's read and write pointers */ 2197 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 2198 ring->wptr = 0; 2199 WREG32(CP_RB2_WPTR, ring->wptr); 2200 2201 /* set the wb address whether it's enabled or not */ 2202 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 2203 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 2204 2205 DRM_MDELAY(1); 2206 WREG32(CP_RB2_CNTL, tmp); 2207 2208 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); 2209 2210 ring->rptr = RREG32(CP_RB2_RPTR); 2211 2212 /* start the rings */ 2213 si_cp_start(rdev); 2214 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 2215 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; 2216 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; 2217 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 2218 if (r) { 2219 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2220 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 2221 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 2222 return r; 2223 } 2224 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); 2225 if (r) { 2226 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 2227 } 2228 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); 2229 if (r) { 2230 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 2231 } 2232 2233 return 0; 2234 } 2235 2236 static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) 2237 { 2238 u32 reset_mask = 0; 2239 u32 tmp; 2240 2241 /* GRBM_STATUS */ 2242 tmp = RREG32(GRBM_STATUS); 2243 if (tmp & (PA_BUSY | SC_BUSY | 2244 BCI_BUSY | SX_BUSY | 2245 TA_BUSY | VGT_BUSY | 2246 DB_BUSY | CB_BUSY | 2247 GDS_BUSY | SPI_BUSY | 2248 IA_BUSY | IA_BUSY_NO_DMA)) 2249 reset_mask |= RADEON_RESET_GFX; 2250 2251 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING | 2252 CP_BUSY | CP_COHERENCY_BUSY)) 2253 reset_mask |= RADEON_RESET_CP; 2254 2255 if (tmp & GRBM_EE_BUSY) 2256 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 2257 2258 /* GRBM_STATUS2 */ 2259 tmp = RREG32(GRBM_STATUS2); 2260 if (tmp & (RLC_RQ_PENDING | RLC_BUSY)) 2261 reset_mask |= RADEON_RESET_RLC; 2262 2263 /* DMA_STATUS_REG 0 */ 2264 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 2265 if (!(tmp & DMA_IDLE)) 2266 reset_mask |= RADEON_RESET_DMA; 2267 2268 /* DMA_STATUS_REG 1 */ 2269 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 2270 if (!(tmp & DMA_IDLE)) 2271 reset_mask |= RADEON_RESET_DMA1; 2272 2273 /* SRBM_STATUS2 */ 2274 tmp = RREG32(SRBM_STATUS2); 2275 if (tmp & DMA_BUSY) 2276 reset_mask |= RADEON_RESET_DMA; 2277 2278 if (tmp & DMA1_BUSY) 2279 reset_mask |= RADEON_RESET_DMA1; 2280 2281 /* SRBM_STATUS */ 2282 tmp = RREG32(SRBM_STATUS); 2283 2284 if (tmp & IH_BUSY) 2285 reset_mask |= RADEON_RESET_IH; 2286 2287 if (tmp & SEM_BUSY) 2288 reset_mask |= RADEON_RESET_SEM; 2289 2290 if (tmp & GRBM_RQ_PENDING) 2291 reset_mask |= RADEON_RESET_GRBM; 2292 2293 if (tmp & VMC_BUSY) 2294 reset_mask |= RADEON_RESET_VMC; 2295 2296 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY | 2297 MCC_BUSY | MCD_BUSY)) 2298 reset_mask |= RADEON_RESET_MC; 2299 2300 if (evergreen_is_display_hung(rdev)) 2301 reset_mask |= RADEON_RESET_DISPLAY; 2302 2303 /* VM_L2_STATUS */ 2304 tmp = RREG32(VM_L2_STATUS); 2305 if (tmp & L2_BUSY) 2306 reset_mask |= RADEON_RESET_VMC; 2307 2308 /* Skip MC reset as it's mostly likely not hung, just busy */ 2309 if (reset_mask & RADEON_RESET_MC) { 2310 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 2311 reset_mask &= ~RADEON_RESET_MC; 2312 } 2313 2314 return reset_mask; 2315 } 2316 2317 static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 2318 { 2319 struct evergreen_mc_save save; 2320 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 2321 u32 tmp; 2322 2323 if (reset_mask == 0) 2324 return; 2325 2326 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 2327 2328 evergreen_print_gpu_status_regs(rdev); 2329 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 2330 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 2331 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 2332 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 2333 2334 /* Disable CP parsing/prefetching */ 2335 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 2336 2337 if (reset_mask & RADEON_RESET_DMA) { 2338 /* dma0 */ 2339 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 2340 tmp &= ~DMA_RB_ENABLE; 2341 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); 2342 } 2343 if (reset_mask & RADEON_RESET_DMA1) { 2344 /* dma1 */ 2345 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 2346 tmp &= ~DMA_RB_ENABLE; 2347 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); 2348 } 2349 2350 DRM_UDELAY(50); 2351 2352 evergreen_mc_stop(rdev, &save); 2353 if (evergreen_mc_wait_for_idle(rdev)) { 2354 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2355 } 2356 2357 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) { 2358 grbm_soft_reset = SOFT_RESET_CB | 2359 SOFT_RESET_DB | 2360 SOFT_RESET_GDS | 2361 SOFT_RESET_PA | 2362 SOFT_RESET_SC | 2363 SOFT_RESET_BCI | 2364 SOFT_RESET_SPI | 2365 SOFT_RESET_SX | 2366 SOFT_RESET_TC | 2367 SOFT_RESET_TA | 2368 SOFT_RESET_VGT | 2369 SOFT_RESET_IA; 2370 } 2371 2372 if (reset_mask & RADEON_RESET_CP) { 2373 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; 2374 2375 srbm_soft_reset |= SOFT_RESET_GRBM; 2376 } 2377 2378 if (reset_mask & RADEON_RESET_DMA) 2379 srbm_soft_reset |= SOFT_RESET_DMA; 2380 2381 if (reset_mask & RADEON_RESET_DMA1) 2382 srbm_soft_reset |= SOFT_RESET_DMA1; 2383 2384 if (reset_mask & RADEON_RESET_DISPLAY) 2385 srbm_soft_reset |= SOFT_RESET_DC; 2386 2387 if (reset_mask & RADEON_RESET_RLC) 2388 grbm_soft_reset |= SOFT_RESET_RLC; 2389 2390 if (reset_mask & RADEON_RESET_SEM) 2391 srbm_soft_reset |= SOFT_RESET_SEM; 2392 2393 if (reset_mask & RADEON_RESET_IH) 2394 srbm_soft_reset |= SOFT_RESET_IH; 2395 2396 if (reset_mask & RADEON_RESET_GRBM) 2397 srbm_soft_reset |= SOFT_RESET_GRBM; 2398 2399 if (reset_mask & RADEON_RESET_VMC) 2400 srbm_soft_reset |= SOFT_RESET_VMC; 2401 2402 if (reset_mask & RADEON_RESET_MC) 2403 srbm_soft_reset |= SOFT_RESET_MC; 2404 2405 if (grbm_soft_reset) { 2406 tmp = RREG32(GRBM_SOFT_RESET); 2407 tmp |= grbm_soft_reset; 2408 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 2409 WREG32(GRBM_SOFT_RESET, tmp); 2410 tmp = RREG32(GRBM_SOFT_RESET); 2411 2412 DRM_UDELAY(50); 2413 2414 tmp &= ~grbm_soft_reset; 2415 WREG32(GRBM_SOFT_RESET, tmp); 2416 tmp = RREG32(GRBM_SOFT_RESET); 2417 } 2418 2419 if (srbm_soft_reset) { 2420 tmp = RREG32(SRBM_SOFT_RESET); 2421 tmp |= srbm_soft_reset; 2422 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 2423 WREG32(SRBM_SOFT_RESET, tmp); 2424 tmp = RREG32(SRBM_SOFT_RESET); 2425 2426 DRM_UDELAY(50); 2427 2428 tmp &= ~srbm_soft_reset; 2429 WREG32(SRBM_SOFT_RESET, tmp); 2430 tmp = RREG32(SRBM_SOFT_RESET); 2431 } 2432 2433 /* Wait a little for things to settle down */ 2434 DRM_UDELAY(50); 2435 2436 evergreen_mc_resume(rdev, &save); 2437 DRM_UDELAY(50); 2438 2439 evergreen_print_gpu_status_regs(rdev); 2440 } 2441 2442 int si_asic_reset(struct radeon_device *rdev) 2443 { 2444 u32 reset_mask; 2445 2446 reset_mask = si_gpu_check_soft_reset(rdev); 2447 2448 if (reset_mask) 2449 r600_set_bios_scratch_engine_hung(rdev, true); 2450 2451 si_gpu_soft_reset(rdev, reset_mask); 2452 2453 reset_mask = si_gpu_check_soft_reset(rdev); 2454 2455 if (!reset_mask) 2456 r600_set_bios_scratch_engine_hung(rdev, false); 2457 2458 return 0; 2459 } 2460 2461 /** 2462 * si_gfx_is_lockup - Check if the GFX engine is locked up 2463 * 2464 * @rdev: radeon_device pointer 2465 * @ring: radeon_ring structure holding ring information 2466 * 2467 * Check if the GFX engine is locked up. 2468 * Returns true if the engine appears to be locked up, false if not. 2469 */ 2470 bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2471 { 2472 u32 reset_mask = si_gpu_check_soft_reset(rdev); 2473 2474 if (!(reset_mask & (RADEON_RESET_GFX | 2475 RADEON_RESET_COMPUTE | 2476 RADEON_RESET_CP))) { 2477 radeon_ring_lockup_update(ring); 2478 return false; 2479 } 2480 /* force CP activities */ 2481 radeon_ring_force_activity(rdev, ring); 2482 return radeon_ring_test_lockup(rdev, ring); 2483 } 2484 2485 /** 2486 * si_dma_is_lockup - Check if the DMA engine is locked up 2487 * 2488 * @rdev: radeon_device pointer 2489 * @ring: radeon_ring structure holding ring information 2490 * 2491 * Check if the async DMA engine is locked up. 2492 * Returns true if the engine appears to be locked up, false if not. 2493 */ 2494 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2495 { 2496 u32 reset_mask = si_gpu_check_soft_reset(rdev); 2497 u32 mask; 2498 2499 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 2500 mask = RADEON_RESET_DMA; 2501 else 2502 mask = RADEON_RESET_DMA1; 2503 2504 if (!(reset_mask & mask)) { 2505 radeon_ring_lockup_update(ring); 2506 return false; 2507 } 2508 /* force ring activities */ 2509 radeon_ring_force_activity(rdev, ring); 2510 return radeon_ring_test_lockup(rdev, ring); 2511 } 2512 2513 /* MC */ 2514 static void si_mc_program(struct radeon_device *rdev) 2515 { 2516 struct evergreen_mc_save save; 2517 u32 tmp; 2518 int i, j; 2519 2520 /* Initialize HDP */ 2521 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 2522 WREG32((0x2c14 + j), 0x00000000); 2523 WREG32((0x2c18 + j), 0x00000000); 2524 WREG32((0x2c1c + j), 0x00000000); 2525 WREG32((0x2c20 + j), 0x00000000); 2526 WREG32((0x2c24 + j), 0x00000000); 2527 } 2528 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 2529 2530 evergreen_mc_stop(rdev, &save); 2531 if (radeon_mc_wait_for_idle(rdev)) { 2532 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2533 } 2534 /* Lockout access through VGA aperture*/ 2535 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 2536 /* Update configuration */ 2537 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 2538 rdev->mc.vram_start >> 12); 2539 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 2540 rdev->mc.vram_end >> 12); 2541 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 2542 rdev->vram_scratch.gpu_addr >> 12); 2543 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 2544 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 2545 WREG32(MC_VM_FB_LOCATION, tmp); 2546 /* XXX double check these! */ 2547 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 2548 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 2549 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 2550 WREG32(MC_VM_AGP_BASE, 0); 2551 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 2552 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 2553 if (radeon_mc_wait_for_idle(rdev)) { 2554 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2555 } 2556 evergreen_mc_resume(rdev, &save); 2557 /* we need to own VRAM, so turn off the VGA renderer here 2558 * to stop it overwriting our objects */ 2559 rv515_vga_render_disable(rdev); 2560 } 2561 2562 /* SI MC address space is 40 bits */ 2563 static void si_vram_location(struct radeon_device *rdev, 2564 struct radeon_mc *mc, u64 base) 2565 { 2566 mc->vram_start = base; 2567 if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) { 2568 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 2569 mc->real_vram_size = mc->aper_size; 2570 mc->mc_vram_size = mc->aper_size; 2571 } 2572 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 2573 dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n", 2574 (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, 2575 (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); 2576 } 2577 2578 static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 2579 { 2580 u64 size_af, size_bf; 2581 2582 size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 2583 size_bf = mc->vram_start & ~mc->gtt_base_align; 2584 if (size_bf > size_af) { 2585 if (mc->gtt_size > size_bf) { 2586 dev_warn(rdev->dev, "limiting GTT\n"); 2587 mc->gtt_size = size_bf; 2588 } 2589 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 2590 } else { 2591 if (mc->gtt_size > size_af) { 2592 dev_warn(rdev->dev, "limiting GTT\n"); 2593 mc->gtt_size = size_af; 2594 } 2595 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 2596 } 2597 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 2598 dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n", 2599 (uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end); 2600 } 2601 2602 static void si_vram_gtt_location(struct radeon_device *rdev, 2603 struct radeon_mc *mc) 2604 { 2605 if (mc->mc_vram_size > 0xFFC0000000ULL) { 2606 /* leave room for at least 1024M GTT */ 2607 dev_warn(rdev->dev, "limiting VRAM\n"); 2608 mc->real_vram_size = 0xFFC0000000ULL; 2609 mc->mc_vram_size = 0xFFC0000000ULL; 2610 } 2611 si_vram_location(rdev, &rdev->mc, 0); 2612 rdev->mc.gtt_base_align = 0; 2613 si_gtt_location(rdev, mc); 2614 } 2615 2616 static int si_mc_init(struct radeon_device *rdev) 2617 { 2618 u32 tmp; 2619 int chansize, numchan; 2620 2621 /* Get VRAM informations */ 2622 rdev->mc.vram_is_ddr = true; 2623 tmp = RREG32(MC_ARB_RAMCFG); 2624 if (tmp & CHANSIZE_OVERRIDE) { 2625 chansize = 16; 2626 } else if (tmp & CHANSIZE_MASK) { 2627 chansize = 64; 2628 } else { 2629 chansize = 32; 2630 } 2631 tmp = RREG32(MC_SHARED_CHMAP); 2632 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 2633 case 0: 2634 default: 2635 numchan = 1; 2636 break; 2637 case 1: 2638 numchan = 2; 2639 break; 2640 case 2: 2641 numchan = 4; 2642 break; 2643 case 3: 2644 numchan = 8; 2645 break; 2646 case 4: 2647 numchan = 3; 2648 break; 2649 case 5: 2650 numchan = 6; 2651 break; 2652 case 6: 2653 numchan = 10; 2654 break; 2655 case 7: 2656 numchan = 12; 2657 break; 2658 case 8: 2659 numchan = 16; 2660 break; 2661 } 2662 rdev->mc.vram_width = numchan * chansize; 2663 /* Could aper size report 0 ? */ 2664 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 2665 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 2666 /* size in MB on si */ 2667 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2668 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2669 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2670 si_vram_gtt_location(rdev, &rdev->mc); 2671 radeon_update_bandwidth_info(rdev); 2672 2673 return 0; 2674 } 2675 2676 /* 2677 * GART 2678 */ 2679 void si_pcie_gart_tlb_flush(struct radeon_device *rdev) 2680 { 2681 /* flush hdp cache */ 2682 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2683 2684 /* bits 0-15 are the VM contexts0-15 */ 2685 WREG32(VM_INVALIDATE_REQUEST, 1); 2686 } 2687 2688 static int si_pcie_gart_enable(struct radeon_device *rdev) 2689 { 2690 int r, i; 2691 2692 if (rdev->gart.robj == NULL) { 2693 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 2694 return -EINVAL; 2695 } 2696 r = radeon_gart_table_vram_pin(rdev); 2697 if (r) 2698 return r; 2699 radeon_gart_restore(rdev); 2700 /* Setup TLB control */ 2701 WREG32(MC_VM_MX_L1_TLB_CNTL, 2702 (0xA << 7) | 2703 ENABLE_L1_TLB | 2704 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 2705 ENABLE_ADVANCED_DRIVER_MODEL | 2706 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 2707 /* Setup L2 cache */ 2708 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 2709 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 2710 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 2711 EFFECTIVE_L2_QUEUE_SIZE(7) | 2712 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 2713 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 2714 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 2715 L2_CACHE_BIGK_FRAGMENT_SIZE(0)); 2716 /* setup context0 */ 2717 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 2718 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 2719 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 2720 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 2721 (u32)(rdev->dummy_page.addr >> 12)); 2722 WREG32(VM_CONTEXT0_CNTL2, 0); 2723 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 2724 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); 2725 2726 WREG32(0x15D4, 0); 2727 WREG32(0x15D8, 0); 2728 WREG32(0x15DC, 0); 2729 2730 /* empty context1-15 */ 2731 /* set vm size, must be a multiple of 4 */ 2732 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 2733 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 2734 /* Assign the pt base to something valid for now; the pts used for 2735 * the VMs are determined by the application and setup and assigned 2736 * on the fly in the vm part of radeon_gart.c 2737 */ 2738 for (i = 1; i < 16; i++) { 2739 if (i < 8) 2740 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 2741 rdev->gart.table_addr >> 12); 2742 else 2743 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), 2744 rdev->gart.table_addr >> 12); 2745 } 2746 2747 /* enable context1-15 */ 2748 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 2749 (u32)(rdev->dummy_page.addr >> 12)); 2750 WREG32(VM_CONTEXT1_CNTL2, 4); 2751 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 2752 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 2753 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 2754 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 2755 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 2756 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 2757 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 2758 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 2759 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 2760 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 2761 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 2762 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 2763 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 2764 2765 si_pcie_gart_tlb_flush(rdev); 2766 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 2767 (unsigned)(rdev->mc.gtt_size >> 20), 2768 (unsigned long long)rdev->gart.table_addr); 2769 rdev->gart.ready = true; 2770 return 0; 2771 } 2772 2773 static void si_pcie_gart_disable(struct radeon_device *rdev) 2774 { 2775 /* Disable all tables */ 2776 WREG32(VM_CONTEXT0_CNTL, 0); 2777 WREG32(VM_CONTEXT1_CNTL, 0); 2778 /* Setup TLB control */ 2779 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | 2780 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 2781 /* Setup L2 cache */ 2782 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 2783 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 2784 EFFECTIVE_L2_QUEUE_SIZE(7) | 2785 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 2786 WREG32(VM_L2_CNTL2, 0); 2787 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 2788 L2_CACHE_BIGK_FRAGMENT_SIZE(0)); 2789 radeon_gart_table_vram_unpin(rdev); 2790 } 2791 2792 static void si_pcie_gart_fini(struct radeon_device *rdev) 2793 { 2794 si_pcie_gart_disable(rdev); 2795 radeon_gart_table_vram_free(rdev); 2796 radeon_gart_fini(rdev); 2797 } 2798 2799 /* vm parser */ 2800 static bool si_vm_reg_valid(u32 reg) 2801 { 2802 /* context regs are fine */ 2803 if (reg >= 0x28000) 2804 return true; 2805 2806 /* check config regs */ 2807 switch (reg) { 2808 case GRBM_GFX_INDEX: 2809 case CP_STRMOUT_CNTL: 2810 case VGT_VTX_VECT_EJECT_REG: 2811 case VGT_CACHE_INVALIDATION: 2812 case VGT_ESGS_RING_SIZE: 2813 case VGT_GSVS_RING_SIZE: 2814 case VGT_GS_VERTEX_REUSE: 2815 case VGT_PRIMITIVE_TYPE: 2816 case VGT_INDEX_TYPE: 2817 case VGT_NUM_INDICES: 2818 case VGT_NUM_INSTANCES: 2819 case VGT_TF_RING_SIZE: 2820 case VGT_HS_OFFCHIP_PARAM: 2821 case VGT_TF_MEMORY_BASE: 2822 case PA_CL_ENHANCE: 2823 case PA_SU_LINE_STIPPLE_VALUE: 2824 case PA_SC_LINE_STIPPLE_STATE: 2825 case PA_SC_ENHANCE: 2826 case SQC_CACHES: 2827 case SPI_STATIC_THREAD_MGMT_1: 2828 case SPI_STATIC_THREAD_MGMT_2: 2829 case SPI_STATIC_THREAD_MGMT_3: 2830 case SPI_PS_MAX_WAVE_ID: 2831 case SPI_CONFIG_CNTL: 2832 case SPI_CONFIG_CNTL_1: 2833 case TA_CNTL_AUX: 2834 return true; 2835 default: 2836 DRM_ERROR("Invalid register 0x%x in CS\n", reg); 2837 return false; 2838 } 2839 } 2840 2841 static int si_vm_packet3_ce_check(struct radeon_device *rdev, 2842 u32 *ib, struct radeon_cs_packet *pkt) 2843 { 2844 switch (pkt->opcode) { 2845 case PACKET3_NOP: 2846 case PACKET3_SET_BASE: 2847 case PACKET3_SET_CE_DE_COUNTERS: 2848 case PACKET3_LOAD_CONST_RAM: 2849 case PACKET3_WRITE_CONST_RAM: 2850 case PACKET3_WRITE_CONST_RAM_OFFSET: 2851 case PACKET3_DUMP_CONST_RAM: 2852 case PACKET3_INCREMENT_CE_COUNTER: 2853 case PACKET3_WAIT_ON_DE_COUNTER: 2854 case PACKET3_CE_WRITE: 2855 break; 2856 default: 2857 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode); 2858 return -EINVAL; 2859 } 2860 return 0; 2861 } 2862 2863 static int si_vm_packet3_gfx_check(struct radeon_device *rdev, 2864 u32 *ib, struct radeon_cs_packet *pkt) 2865 { 2866 u32 idx = pkt->idx + 1; 2867 u32 idx_value = ib[idx]; 2868 u32 start_reg, end_reg, reg, i; 2869 u32 command, info; 2870 2871 switch (pkt->opcode) { 2872 case PACKET3_NOP: 2873 case PACKET3_SET_BASE: 2874 case PACKET3_CLEAR_STATE: 2875 case PACKET3_INDEX_BUFFER_SIZE: 2876 case PACKET3_DISPATCH_DIRECT: 2877 case PACKET3_DISPATCH_INDIRECT: 2878 case PACKET3_ALLOC_GDS: 2879 case PACKET3_WRITE_GDS_RAM: 2880 case PACKET3_ATOMIC_GDS: 2881 case PACKET3_ATOMIC: 2882 case PACKET3_OCCLUSION_QUERY: 2883 case PACKET3_SET_PREDICATION: 2884 case PACKET3_COND_EXEC: 2885 case PACKET3_PRED_EXEC: 2886 case PACKET3_DRAW_INDIRECT: 2887 case PACKET3_DRAW_INDEX_INDIRECT: 2888 case PACKET3_INDEX_BASE: 2889 case PACKET3_DRAW_INDEX_2: 2890 case PACKET3_CONTEXT_CONTROL: 2891 case PACKET3_INDEX_TYPE: 2892 case PACKET3_DRAW_INDIRECT_MULTI: 2893 case PACKET3_DRAW_INDEX_AUTO: 2894 case PACKET3_DRAW_INDEX_IMMD: 2895 case PACKET3_NUM_INSTANCES: 2896 case PACKET3_DRAW_INDEX_MULTI_AUTO: 2897 case PACKET3_STRMOUT_BUFFER_UPDATE: 2898 case PACKET3_DRAW_INDEX_OFFSET_2: 2899 case PACKET3_DRAW_INDEX_MULTI_ELEMENT: 2900 case PACKET3_DRAW_INDEX_INDIRECT_MULTI: 2901 case PACKET3_MPEG_INDEX: 2902 case PACKET3_WAIT_REG_MEM: 2903 case PACKET3_MEM_WRITE: 2904 case PACKET3_PFP_SYNC_ME: 2905 case PACKET3_SURFACE_SYNC: 2906 case PACKET3_EVENT_WRITE: 2907 case PACKET3_EVENT_WRITE_EOP: 2908 case PACKET3_EVENT_WRITE_EOS: 2909 case PACKET3_SET_CONTEXT_REG: 2910 case PACKET3_SET_CONTEXT_REG_INDIRECT: 2911 case PACKET3_SET_SH_REG: 2912 case PACKET3_SET_SH_REG_OFFSET: 2913 case PACKET3_INCREMENT_DE_COUNTER: 2914 case PACKET3_WAIT_ON_CE_COUNTER: 2915 case PACKET3_WAIT_ON_AVAIL_BUFFER: 2916 case PACKET3_ME_WRITE: 2917 break; 2918 case PACKET3_COPY_DATA: 2919 if ((idx_value & 0xf00) == 0) { 2920 reg = ib[idx + 3] * 4; 2921 if (!si_vm_reg_valid(reg)) 2922 return -EINVAL; 2923 } 2924 break; 2925 case PACKET3_WRITE_DATA: 2926 if ((idx_value & 0xf00) == 0) { 2927 start_reg = ib[idx + 1] * 4; 2928 if (idx_value & 0x10000) { 2929 if (!si_vm_reg_valid(start_reg)) 2930 return -EINVAL; 2931 } else { 2932 for (i = 0; i < (pkt->count - 2); i++) { 2933 reg = start_reg + (4 * i); 2934 if (!si_vm_reg_valid(reg)) 2935 return -EINVAL; 2936 } 2937 } 2938 } 2939 break; 2940 case PACKET3_COND_WRITE: 2941 if (idx_value & 0x100) { 2942 reg = ib[idx + 5] * 4; 2943 if (!si_vm_reg_valid(reg)) 2944 return -EINVAL; 2945 } 2946 break; 2947 case PACKET3_COPY_DW: 2948 if (idx_value & 0x2) { 2949 reg = ib[idx + 3] * 4; 2950 if (!si_vm_reg_valid(reg)) 2951 return -EINVAL; 2952 } 2953 break; 2954 case PACKET3_SET_CONFIG_REG: 2955 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; 2956 end_reg = 4 * pkt->count + start_reg - 4; 2957 if ((start_reg < PACKET3_SET_CONFIG_REG_START) || 2958 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 2959 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 2960 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2961 return -EINVAL; 2962 } 2963 for (i = 0; i < pkt->count; i++) { 2964 reg = start_reg + (4 * i); 2965 if (!si_vm_reg_valid(reg)) 2966 return -EINVAL; 2967 } 2968 break; 2969 case PACKET3_CP_DMA: 2970 command = ib[idx + 4]; 2971 info = ib[idx + 1]; 2972 if (command & PACKET3_CP_DMA_CMD_SAS) { 2973 /* src address space is register */ 2974 if (((info & 0x60000000) >> 29) == 0) { 2975 start_reg = idx_value << 2; 2976 if (command & PACKET3_CP_DMA_CMD_SAIC) { 2977 reg = start_reg; 2978 if (!si_vm_reg_valid(reg)) { 2979 DRM_ERROR("CP DMA Bad SRC register\n"); 2980 return -EINVAL; 2981 } 2982 } else { 2983 for (i = 0; i < (command & 0x1fffff); i++) { 2984 reg = start_reg + (4 * i); 2985 if (!si_vm_reg_valid(reg)) { 2986 DRM_ERROR("CP DMA Bad SRC register\n"); 2987 return -EINVAL; 2988 } 2989 } 2990 } 2991 } 2992 } 2993 if (command & PACKET3_CP_DMA_CMD_DAS) { 2994 /* dst address space is register */ 2995 if (((info & 0x00300000) >> 20) == 0) { 2996 start_reg = ib[idx + 2]; 2997 if (command & PACKET3_CP_DMA_CMD_DAIC) { 2998 reg = start_reg; 2999 if (!si_vm_reg_valid(reg)) { 3000 DRM_ERROR("CP DMA Bad DST register\n"); 3001 return -EINVAL; 3002 } 3003 } else { 3004 for (i = 0; i < (command & 0x1fffff); i++) { 3005 reg = start_reg + (4 * i); 3006 if (!si_vm_reg_valid(reg)) { 3007 DRM_ERROR("CP DMA Bad DST register\n"); 3008 return -EINVAL; 3009 } 3010 } 3011 } 3012 } 3013 } 3014 break; 3015 default: 3016 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); 3017 return -EINVAL; 3018 } 3019 return 0; 3020 } 3021 3022 static int si_vm_packet3_compute_check(struct radeon_device *rdev, 3023 u32 *ib, struct radeon_cs_packet *pkt) 3024 { 3025 u32 idx = pkt->idx + 1; 3026 u32 idx_value = ib[idx]; 3027 u32 start_reg, reg, i; 3028 3029 switch (pkt->opcode) { 3030 case PACKET3_NOP: 3031 case PACKET3_SET_BASE: 3032 case PACKET3_CLEAR_STATE: 3033 case PACKET3_DISPATCH_DIRECT: 3034 case PACKET3_DISPATCH_INDIRECT: 3035 case PACKET3_ALLOC_GDS: 3036 case PACKET3_WRITE_GDS_RAM: 3037 case PACKET3_ATOMIC_GDS: 3038 case PACKET3_ATOMIC: 3039 case PACKET3_OCCLUSION_QUERY: 3040 case PACKET3_SET_PREDICATION: 3041 case PACKET3_COND_EXEC: 3042 case PACKET3_PRED_EXEC: 3043 case PACKET3_CONTEXT_CONTROL: 3044 case PACKET3_STRMOUT_BUFFER_UPDATE: 3045 case PACKET3_WAIT_REG_MEM: 3046 case PACKET3_MEM_WRITE: 3047 case PACKET3_PFP_SYNC_ME: 3048 case PACKET3_SURFACE_SYNC: 3049 case PACKET3_EVENT_WRITE: 3050 case PACKET3_EVENT_WRITE_EOP: 3051 case PACKET3_EVENT_WRITE_EOS: 3052 case PACKET3_SET_CONTEXT_REG: 3053 case PACKET3_SET_CONTEXT_REG_INDIRECT: 3054 case PACKET3_SET_SH_REG: 3055 case PACKET3_SET_SH_REG_OFFSET: 3056 case PACKET3_INCREMENT_DE_COUNTER: 3057 case PACKET3_WAIT_ON_CE_COUNTER: 3058 case PACKET3_WAIT_ON_AVAIL_BUFFER: 3059 case PACKET3_ME_WRITE: 3060 break; 3061 case PACKET3_COPY_DATA: 3062 if ((idx_value & 0xf00) == 0) { 3063 reg = ib[idx + 3] * 4; 3064 if (!si_vm_reg_valid(reg)) 3065 return -EINVAL; 3066 } 3067 break; 3068 case PACKET3_WRITE_DATA: 3069 if ((idx_value & 0xf00) == 0) { 3070 start_reg = ib[idx + 1] * 4; 3071 if (idx_value & 0x10000) { 3072 if (!si_vm_reg_valid(start_reg)) 3073 return -EINVAL; 3074 } else { 3075 for (i = 0; i < (pkt->count - 2); i++) { 3076 reg = start_reg + (4 * i); 3077 if (!si_vm_reg_valid(reg)) 3078 return -EINVAL; 3079 } 3080 } 3081 } 3082 break; 3083 case PACKET3_COND_WRITE: 3084 if (idx_value & 0x100) { 3085 reg = ib[idx + 5] * 4; 3086 if (!si_vm_reg_valid(reg)) 3087 return -EINVAL; 3088 } 3089 break; 3090 case PACKET3_COPY_DW: 3091 if (idx_value & 0x2) { 3092 reg = ib[idx + 3] * 4; 3093 if (!si_vm_reg_valid(reg)) 3094 return -EINVAL; 3095 } 3096 break; 3097 default: 3098 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); 3099 return -EINVAL; 3100 } 3101 return 0; 3102 } 3103 3104 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) 3105 { 3106 int ret = 0; 3107 u32 idx = 0; 3108 struct radeon_cs_packet pkt; 3109 3110 do { 3111 pkt.idx = idx; 3112 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); 3113 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); 3114 pkt.one_reg_wr = 0; 3115 switch (pkt.type) { 3116 case RADEON_PACKET_TYPE0: 3117 dev_err(rdev->dev, "Packet0 not allowed!\n"); 3118 ret = -EINVAL; 3119 break; 3120 case RADEON_PACKET_TYPE2: 3121 idx += 1; 3122 break; 3123 case RADEON_PACKET_TYPE3: 3124 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); 3125 if (ib->is_const_ib) 3126 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); 3127 else { 3128 switch (ib->ring) { 3129 case RADEON_RING_TYPE_GFX_INDEX: 3130 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); 3131 break; 3132 case CAYMAN_RING_TYPE_CP1_INDEX: 3133 case CAYMAN_RING_TYPE_CP2_INDEX: 3134 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); 3135 break; 3136 default: 3137 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); 3138 ret = -EINVAL; 3139 break; 3140 } 3141 } 3142 idx += pkt.count + 2; 3143 break; 3144 default: 3145 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); 3146 ret = -EINVAL; 3147 break; 3148 } 3149 if (ret) 3150 break; 3151 } while (idx < ib->length_dw); 3152 3153 return ret; 3154 } 3155 3156 /* 3157 * vm 3158 */ 3159 int si_vm_init(struct radeon_device *rdev) 3160 { 3161 /* number of VMs */ 3162 rdev->vm_manager.nvm = 16; 3163 /* base offset of vram pages */ 3164 rdev->vm_manager.vram_base_offset = 0; 3165 3166 return 0; 3167 } 3168 3169 void si_vm_fini(struct radeon_device *rdev) 3170 { 3171 } 3172 3173 /** 3174 * si_vm_set_page - update the page tables using the CP 3175 * 3176 * @rdev: radeon_device pointer 3177 * @ib: indirect buffer to fill with commands 3178 * @pe: addr of the page entry 3179 * @addr: dst addr to write into pe 3180 * @count: number of page entries to update 3181 * @incr: increase next addr by incr bytes 3182 * @flags: access flags 3183 * 3184 * Update the page tables using the CP (SI). 3185 */ 3186 void si_vm_set_page(struct radeon_device *rdev, 3187 struct radeon_ib *ib, 3188 uint64_t pe, 3189 uint64_t addr, unsigned count, 3190 uint32_t incr, uint32_t flags) 3191 { 3192 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 3193 uint64_t value; 3194 unsigned ndw; 3195 3196 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { 3197 while (count) { 3198 ndw = 2 + count * 2; 3199 if (ndw > 0x3FFE) 3200 ndw = 0x3FFE; 3201 3202 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); 3203 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | 3204 WRITE_DATA_DST_SEL(1)); 3205 ib->ptr[ib->length_dw++] = pe; 3206 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 3207 for (; ndw > 2; ndw -= 2, --count, pe += 8) { 3208 if (flags & RADEON_VM_PAGE_SYSTEM) { 3209 value = radeon_vm_map_gart(rdev, addr); 3210 value &= 0xFFFFFFFFFFFFF000ULL; 3211 } else if (flags & RADEON_VM_PAGE_VALID) { 3212 value = addr; 3213 } else { 3214 value = 0; 3215 } 3216 addr += incr; 3217 value |= r600_flags; 3218 ib->ptr[ib->length_dw++] = value; 3219 ib->ptr[ib->length_dw++] = upper_32_bits(value); 3220 } 3221 } 3222 } else { 3223 /* DMA */ 3224 if (flags & RADEON_VM_PAGE_SYSTEM) { 3225 while (count) { 3226 ndw = count * 2; 3227 if (ndw > 0xFFFFE) 3228 ndw = 0xFFFFE; 3229 3230 /* for non-physically contiguous pages (system) */ 3231 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); 3232 ib->ptr[ib->length_dw++] = pe; 3233 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 3234 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 3235 if (flags & RADEON_VM_PAGE_SYSTEM) { 3236 value = radeon_vm_map_gart(rdev, addr); 3237 value &= 0xFFFFFFFFFFFFF000ULL; 3238 } else if (flags & RADEON_VM_PAGE_VALID) { 3239 value = addr; 3240 } else { 3241 value = 0; 3242 } 3243 addr += incr; 3244 value |= r600_flags; 3245 ib->ptr[ib->length_dw++] = value; 3246 ib->ptr[ib->length_dw++] = upper_32_bits(value); 3247 } 3248 } 3249 } else { 3250 while (count) { 3251 ndw = count * 2; 3252 if (ndw > 0xFFFFE) 3253 ndw = 0xFFFFE; 3254 3255 if (flags & RADEON_VM_PAGE_VALID) 3256 value = addr; 3257 else 3258 value = 0; 3259 /* for physically contiguous pages (vram) */ 3260 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 3261 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 3262 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 3263 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 3264 ib->ptr[ib->length_dw++] = 0; 3265 ib->ptr[ib->length_dw++] = value; /* value */ 3266 ib->ptr[ib->length_dw++] = upper_32_bits(value); 3267 ib->ptr[ib->length_dw++] = incr; /* increment size */ 3268 ib->ptr[ib->length_dw++] = 0; 3269 pe += ndw * 4; 3270 addr += (ndw / 2) * incr; 3271 count -= ndw / 2; 3272 } 3273 } 3274 while (ib->length_dw & 0x7) 3275 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); 3276 } 3277 } 3278 3279 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 3280 { 3281 struct radeon_ring *ring = &rdev->ring[ridx]; 3282 3283 if (vm == NULL) 3284 return; 3285 3286 /* write new base address */ 3287 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3288 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3289 WRITE_DATA_DST_SEL(0))); 3290 3291 if (vm->id < 8) { 3292 radeon_ring_write(ring, 3293 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 3294 } else { 3295 radeon_ring_write(ring, 3296 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 3297 } 3298 radeon_ring_write(ring, 0); 3299 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 3300 3301 /* flush hdp cache */ 3302 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3303 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3304 WRITE_DATA_DST_SEL(0))); 3305 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); 3306 radeon_ring_write(ring, 0); 3307 radeon_ring_write(ring, 0x1); 3308 3309 /* bits 0-15 are the VM contexts0-15 */ 3310 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3311 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3312 WRITE_DATA_DST_SEL(0))); 3313 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 3314 radeon_ring_write(ring, 0); 3315 radeon_ring_write(ring, 1 << vm->id); 3316 3317 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3318 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3319 radeon_ring_write(ring, 0x0); 3320 } 3321 3322 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 3323 { 3324 struct radeon_ring *ring = &rdev->ring[ridx]; 3325 3326 if (vm == NULL) 3327 return; 3328 3329 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 3330 if (vm->id < 8) { 3331 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 3332 } else { 3333 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); 3334 } 3335 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 3336 3337 /* flush hdp cache */ 3338 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 3339 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 3340 radeon_ring_write(ring, 1); 3341 3342 /* bits 0-7 are the VM contexts0-7 */ 3343 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 3344 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 3345 radeon_ring_write(ring, 1 << vm->id); 3346 } 3347 3348 /* 3349 * RLC 3350 */ 3351 void si_rlc_fini(struct radeon_device *rdev) 3352 { 3353 int r; 3354 3355 /* save restore block */ 3356 if (rdev->rlc.save_restore_obj) { 3357 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3358 if (unlikely(r != 0)) 3359 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); 3360 radeon_bo_unpin(rdev->rlc.save_restore_obj); 3361 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3362 3363 radeon_bo_unref(&rdev->rlc.save_restore_obj); 3364 rdev->rlc.save_restore_obj = NULL; 3365 } 3366 3367 /* clear state block */ 3368 if (rdev->rlc.clear_state_obj) { 3369 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 3370 if (unlikely(r != 0)) 3371 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); 3372 radeon_bo_unpin(rdev->rlc.clear_state_obj); 3373 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3374 3375 radeon_bo_unref(&rdev->rlc.clear_state_obj); 3376 rdev->rlc.clear_state_obj = NULL; 3377 } 3378 } 3379 3380 int si_rlc_init(struct radeon_device *rdev) 3381 { 3382 int r; 3383 3384 /* save restore block */ 3385 if (rdev->rlc.save_restore_obj == NULL) { 3386 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 3387 RADEON_GEM_DOMAIN_VRAM, NULL, 3388 &rdev->rlc.save_restore_obj); 3389 if (r) { 3390 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 3391 return r; 3392 } 3393 } 3394 3395 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3396 if (unlikely(r != 0)) { 3397 si_rlc_fini(rdev); 3398 return r; 3399 } 3400 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3401 &rdev->rlc.save_restore_gpu_addr); 3402 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3403 if (r) { 3404 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 3405 si_rlc_fini(rdev); 3406 return r; 3407 } 3408 3409 /* clear state block */ 3410 if (rdev->rlc.clear_state_obj == NULL) { 3411 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 3412 RADEON_GEM_DOMAIN_VRAM, NULL, 3413 &rdev->rlc.clear_state_obj); 3414 if (r) { 3415 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 3416 si_rlc_fini(rdev); 3417 return r; 3418 } 3419 } 3420 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 3421 if (unlikely(r != 0)) { 3422 si_rlc_fini(rdev); 3423 return r; 3424 } 3425 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 3426 &rdev->rlc.clear_state_gpu_addr); 3427 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3428 if (r) { 3429 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 3430 si_rlc_fini(rdev); 3431 return r; 3432 } 3433 3434 return 0; 3435 } 3436 3437 static void si_rlc_stop(struct radeon_device *rdev) 3438 { 3439 WREG32(RLC_CNTL, 0); 3440 } 3441 3442 static void si_rlc_start(struct radeon_device *rdev) 3443 { 3444 WREG32(RLC_CNTL, RLC_ENABLE); 3445 } 3446 3447 static int si_rlc_resume(struct radeon_device *rdev) 3448 { 3449 u32 i; 3450 const __be32 *fw_data; 3451 3452 if (!rdev->rlc_fw) 3453 return -EINVAL; 3454 3455 si_rlc_stop(rdev); 3456 3457 WREG32(RLC_RL_BASE, 0); 3458 WREG32(RLC_RL_SIZE, 0); 3459 WREG32(RLC_LB_CNTL, 0); 3460 WREG32(RLC_LB_CNTR_MAX, 0xffffffff); 3461 WREG32(RLC_LB_CNTR_INIT, 0); 3462 3463 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 3464 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 3465 3466 WREG32(RLC_MC_CNTL, 0); 3467 WREG32(RLC_UCODE_CNTL, 0); 3468 3469 fw_data = (const __be32 *)rdev->rlc_fw->data; 3470 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { 3471 WREG32(RLC_UCODE_ADDR, i); 3472 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3473 } 3474 WREG32(RLC_UCODE_ADDR, 0); 3475 3476 si_rlc_start(rdev); 3477 3478 return 0; 3479 } 3480 3481 static void si_enable_interrupts(struct radeon_device *rdev) 3482 { 3483 u32 ih_cntl = RREG32(IH_CNTL); 3484 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3485 3486 ih_cntl |= ENABLE_INTR; 3487 ih_rb_cntl |= IH_RB_ENABLE; 3488 WREG32(IH_CNTL, ih_cntl); 3489 WREG32(IH_RB_CNTL, ih_rb_cntl); 3490 rdev->ih.enabled = true; 3491 } 3492 3493 static void si_disable_interrupts(struct radeon_device *rdev) 3494 { 3495 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3496 u32 ih_cntl = RREG32(IH_CNTL); 3497 3498 ih_rb_cntl &= ~IH_RB_ENABLE; 3499 ih_cntl &= ~ENABLE_INTR; 3500 WREG32(IH_RB_CNTL, ih_rb_cntl); 3501 WREG32(IH_CNTL, ih_cntl); 3502 /* set rptr, wptr to 0 */ 3503 WREG32(IH_RB_RPTR, 0); 3504 WREG32(IH_RB_WPTR, 0); 3505 rdev->ih.enabled = false; 3506 rdev->ih.rptr = 0; 3507 } 3508 3509 static void si_disable_interrupt_state(struct radeon_device *rdev) 3510 { 3511 u32 tmp; 3512 3513 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3514 WREG32(CP_INT_CNTL_RING1, 0); 3515 WREG32(CP_INT_CNTL_RING2, 0); 3516 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 3517 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp); 3518 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 3519 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); 3520 WREG32(GRBM_INT_CNTL, 0); 3521 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 3522 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 3523 if (rdev->num_crtc >= 4) { 3524 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 3525 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 3526 } 3527 if (rdev->num_crtc >= 6) { 3528 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 3529 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 3530 } 3531 3532 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 3533 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 3534 if (rdev->num_crtc >= 4) { 3535 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 3536 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 3537 } 3538 if (rdev->num_crtc >= 6) { 3539 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 3540 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 3541 } 3542 3543 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3544 3545 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3546 WREG32(DC_HPD1_INT_CONTROL, tmp); 3547 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3548 WREG32(DC_HPD2_INT_CONTROL, tmp); 3549 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3550 WREG32(DC_HPD3_INT_CONTROL, tmp); 3551 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3552 WREG32(DC_HPD4_INT_CONTROL, tmp); 3553 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3554 WREG32(DC_HPD5_INT_CONTROL, tmp); 3555 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3556 WREG32(DC_HPD6_INT_CONTROL, tmp); 3557 3558 } 3559 3560 static int si_irq_init(struct radeon_device *rdev) 3561 { 3562 int ret = 0; 3563 int rb_bufsz; 3564 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3565 3566 /* allocate ring */ 3567 ret = r600_ih_ring_alloc(rdev); 3568 if (ret) 3569 return ret; 3570 3571 /* disable irqs */ 3572 si_disable_interrupts(rdev); 3573 3574 /* init rlc */ 3575 ret = si_rlc_resume(rdev); 3576 if (ret) { 3577 r600_ih_ring_fini(rdev); 3578 return ret; 3579 } 3580 3581 /* setup interrupt control */ 3582 /* set dummy read address to ring address */ 3583 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3584 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3585 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3586 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3587 */ 3588 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3589 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3590 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3591 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3592 3593 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3594 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 3595 3596 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3597 IH_WPTR_OVERFLOW_CLEAR | 3598 (rb_bufsz << 1)); 3599 3600 if (rdev->wb.enabled) 3601 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3602 3603 /* set the writeback address whether it's enabled or not */ 3604 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3605 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3606 3607 WREG32(IH_RB_CNTL, ih_rb_cntl); 3608 3609 /* set rptr, wptr to 0 */ 3610 WREG32(IH_RB_RPTR, 0); 3611 WREG32(IH_RB_WPTR, 0); 3612 3613 /* Default settings for IH_CNTL (disabled at first) */ 3614 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); 3615 /* RPTR_REARM only works if msi's are enabled */ 3616 if (rdev->msi_enabled) 3617 ih_cntl |= RPTR_REARM; 3618 WREG32(IH_CNTL, ih_cntl); 3619 3620 /* force the active interrupt state to all disabled */ 3621 si_disable_interrupt_state(rdev); 3622 3623 pci_enable_busmaster(rdev->dev); 3624 3625 /* enable irqs */ 3626 si_enable_interrupts(rdev); 3627 3628 return ret; 3629 } 3630 3631 int si_irq_set(struct radeon_device *rdev) 3632 { 3633 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 3634 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 3635 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 3636 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 3637 u32 grbm_int_cntl = 0; 3638 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 3639 u32 dma_cntl, dma_cntl1; 3640 3641 if (!rdev->irq.installed) { 3642 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n"); 3643 return -EINVAL; 3644 } 3645 /* don't enable anything if the ih is disabled */ 3646 if (!rdev->ih.enabled) { 3647 si_disable_interrupts(rdev); 3648 /* force the active interrupt state to all disabled */ 3649 si_disable_interrupt_state(rdev); 3650 return 0; 3651 } 3652 3653 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3654 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3655 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3656 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 3657 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3658 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3659 3660 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 3661 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 3662 3663 /* enable CP interrupts on all rings */ 3664 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3665 DRM_DEBUG("si_irq_set: sw int gfx\n"); 3666 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3667 } 3668 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { 3669 DRM_DEBUG("si_irq_set: sw int cp1\n"); 3670 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; 3671 } 3672 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { 3673 DRM_DEBUG("si_irq_set: sw int cp2\n"); 3674 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; 3675 } 3676 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 3677 DRM_DEBUG("si_irq_set: sw int dma\n"); 3678 dma_cntl |= TRAP_ENABLE; 3679 } 3680 3681 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { 3682 DRM_DEBUG("si_irq_set: sw int dma1\n"); 3683 dma_cntl1 |= TRAP_ENABLE; 3684 } 3685 if (rdev->irq.crtc_vblank_int[0] || 3686 atomic_read(&rdev->irq.pflip[0])) { 3687 DRM_DEBUG("si_irq_set: vblank 0\n"); 3688 crtc1 |= VBLANK_INT_MASK; 3689 } 3690 if (rdev->irq.crtc_vblank_int[1] || 3691 atomic_read(&rdev->irq.pflip[1])) { 3692 DRM_DEBUG("si_irq_set: vblank 1\n"); 3693 crtc2 |= VBLANK_INT_MASK; 3694 } 3695 if (rdev->irq.crtc_vblank_int[2] || 3696 atomic_read(&rdev->irq.pflip[2])) { 3697 DRM_DEBUG("si_irq_set: vblank 2\n"); 3698 crtc3 |= VBLANK_INT_MASK; 3699 } 3700 if (rdev->irq.crtc_vblank_int[3] || 3701 atomic_read(&rdev->irq.pflip[3])) { 3702 DRM_DEBUG("si_irq_set: vblank 3\n"); 3703 crtc4 |= VBLANK_INT_MASK; 3704 } 3705 if (rdev->irq.crtc_vblank_int[4] || 3706 atomic_read(&rdev->irq.pflip[4])) { 3707 DRM_DEBUG("si_irq_set: vblank 4\n"); 3708 crtc5 |= VBLANK_INT_MASK; 3709 } 3710 if (rdev->irq.crtc_vblank_int[5] || 3711 atomic_read(&rdev->irq.pflip[5])) { 3712 DRM_DEBUG("si_irq_set: vblank 5\n"); 3713 crtc6 |= VBLANK_INT_MASK; 3714 } 3715 if (rdev->irq.hpd[0]) { 3716 DRM_DEBUG("si_irq_set: hpd 1\n"); 3717 hpd1 |= DC_HPDx_INT_EN; 3718 } 3719 if (rdev->irq.hpd[1]) { 3720 DRM_DEBUG("si_irq_set: hpd 2\n"); 3721 hpd2 |= DC_HPDx_INT_EN; 3722 } 3723 if (rdev->irq.hpd[2]) { 3724 DRM_DEBUG("si_irq_set: hpd 3\n"); 3725 hpd3 |= DC_HPDx_INT_EN; 3726 } 3727 if (rdev->irq.hpd[3]) { 3728 DRM_DEBUG("si_irq_set: hpd 4\n"); 3729 hpd4 |= DC_HPDx_INT_EN; 3730 } 3731 if (rdev->irq.hpd[4]) { 3732 DRM_DEBUG("si_irq_set: hpd 5\n"); 3733 hpd5 |= DC_HPDx_INT_EN; 3734 } 3735 if (rdev->irq.hpd[5]) { 3736 DRM_DEBUG("si_irq_set: hpd 6\n"); 3737 hpd6 |= DC_HPDx_INT_EN; 3738 } 3739 3740 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 3741 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); 3742 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); 3743 3744 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl); 3745 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1); 3746 3747 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3748 3749 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 3750 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 3751 if (rdev->num_crtc >= 4) { 3752 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 3753 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); 3754 } 3755 if (rdev->num_crtc >= 6) { 3756 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); 3757 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 3758 } 3759 3760 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 3761 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 3762 if (rdev->num_crtc >= 4) { 3763 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 3764 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 3765 } 3766 if (rdev->num_crtc >= 6) { 3767 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 3768 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 3769 } 3770 3771 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3772 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3773 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3774 WREG32(DC_HPD4_INT_CONTROL, hpd4); 3775 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3776 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3777 3778 return 0; 3779 } 3780 3781 static inline void si_irq_ack(struct radeon_device *rdev) 3782 { 3783 u32 tmp; 3784 3785 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3786 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3787 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 3788 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); 3789 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); 3790 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 3791 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); 3792 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); 3793 if (rdev->num_crtc >= 4) { 3794 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); 3795 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); 3796 } 3797 if (rdev->num_crtc >= 6) { 3798 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); 3799 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); 3800 } 3801 3802 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) 3803 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3804 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) 3805 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3806 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) 3807 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 3808 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) 3809 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); 3810 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) 3811 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); 3812 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) 3813 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 3814 3815 if (rdev->num_crtc >= 4) { 3816 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) 3817 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3818 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) 3819 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3820 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 3821 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 3822 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 3823 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); 3824 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) 3825 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); 3826 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) 3827 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); 3828 } 3829 3830 if (rdev->num_crtc >= 6) { 3831 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) 3832 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3833 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) 3834 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 3835 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 3836 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 3837 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 3838 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); 3839 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) 3840 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); 3841 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) 3842 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); 3843 } 3844 3845 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 3846 tmp = RREG32(DC_HPD1_INT_CONTROL); 3847 tmp |= DC_HPDx_INT_ACK; 3848 WREG32(DC_HPD1_INT_CONTROL, tmp); 3849 } 3850 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 3851 tmp = RREG32(DC_HPD2_INT_CONTROL); 3852 tmp |= DC_HPDx_INT_ACK; 3853 WREG32(DC_HPD2_INT_CONTROL, tmp); 3854 } 3855 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 3856 tmp = RREG32(DC_HPD3_INT_CONTROL); 3857 tmp |= DC_HPDx_INT_ACK; 3858 WREG32(DC_HPD3_INT_CONTROL, tmp); 3859 } 3860 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 3861 tmp = RREG32(DC_HPD4_INT_CONTROL); 3862 tmp |= DC_HPDx_INT_ACK; 3863 WREG32(DC_HPD4_INT_CONTROL, tmp); 3864 } 3865 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 3866 tmp = RREG32(DC_HPD5_INT_CONTROL); 3867 tmp |= DC_HPDx_INT_ACK; 3868 WREG32(DC_HPD5_INT_CONTROL, tmp); 3869 } 3870 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 3871 tmp = RREG32(DC_HPD5_INT_CONTROL); 3872 tmp |= DC_HPDx_INT_ACK; 3873 WREG32(DC_HPD6_INT_CONTROL, tmp); 3874 } 3875 } 3876 3877 static void si_irq_disable(struct radeon_device *rdev) 3878 { 3879 si_disable_interrupts(rdev); 3880 /* Wait and acknowledge irq */ 3881 DRM_MDELAY(1); 3882 si_irq_ack(rdev); 3883 si_disable_interrupt_state(rdev); 3884 } 3885 3886 static void si_irq_suspend(struct radeon_device *rdev) 3887 { 3888 si_irq_disable(rdev); 3889 si_rlc_stop(rdev); 3890 } 3891 3892 static void si_irq_fini(struct radeon_device *rdev) 3893 { 3894 si_irq_suspend(rdev); 3895 r600_ih_ring_fini(rdev); 3896 } 3897 3898 static inline u32 si_get_ih_wptr(struct radeon_device *rdev) 3899 { 3900 u32 wptr, tmp; 3901 3902 if (rdev->wb.enabled) 3903 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 3904 else 3905 wptr = RREG32(IH_RB_WPTR); 3906 3907 if (wptr & RB_OVERFLOW) { 3908 /* When a ring buffer overflow happen start parsing interrupt 3909 * from the last not overwritten vector (wptr + 16). Hopefully 3910 * this should allow us to catchup. 3911 */ 3912 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 3913 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 3914 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 3915 tmp = RREG32(IH_RB_CNTL); 3916 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3917 WREG32(IH_RB_CNTL, tmp); 3918 } 3919 return (wptr & rdev->ih.ptr_mask); 3920 } 3921 3922 /* SI IV Ring 3923 * Each IV ring entry is 128 bits: 3924 * [7:0] - interrupt source id 3925 * [31:8] - reserved 3926 * [59:32] - interrupt source data 3927 * [63:60] - reserved 3928 * [71:64] - RINGID 3929 * [79:72] - VMID 3930 * [127:80] - reserved 3931 */ 3932 irqreturn_t si_irq_process(struct radeon_device *rdev) 3933 { 3934 u32 wptr; 3935 u32 rptr; 3936 u32 src_id, src_data, ring_id; 3937 u32 ring_index; 3938 bool queue_hotplug = false; 3939 3940 if (!rdev->ih.enabled || rdev->shutdown) 3941 return IRQ_NONE; 3942 3943 wptr = si_get_ih_wptr(rdev); 3944 3945 restart_ih: 3946 /* is somebody else already processing irqs? */ 3947 if (atomic_xchg(&rdev->ih.lock, 1)) 3948 return IRQ_NONE; 3949 3950 rptr = rdev->ih.rptr; 3951 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3952 3953 /* Order reading of wptr vs. reading of IH ring data */ 3954 cpu_lfence(); 3955 3956 /* display interrupts */ 3957 si_irq_ack(rdev); 3958 3959 while (rptr != wptr) { 3960 /* wptr/rptr are in bytes! */ 3961 ring_index = rptr / 4; 3962 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 3963 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 3964 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; 3965 3966 switch (src_id) { 3967 case 1: /* D1 vblank/vline */ 3968 switch (src_data) { 3969 case 0: /* D1 vblank */ 3970 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 3971 if (rdev->irq.crtc_vblank_int[0]) { 3972 drm_handle_vblank(rdev->ddev, 0); 3973 rdev->pm.vblank_sync = true; 3974 DRM_WAKEUP(&rdev->irq.vblank_queue); 3975 } 3976 if (atomic_read(&rdev->irq.pflip[0])) 3977 radeon_crtc_handle_flip(rdev, 0); 3978 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3979 DRM_DEBUG("IH: D1 vblank\n"); 3980 } 3981 break; 3982 case 1: /* D1 vline */ 3983 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 3984 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 3985 DRM_DEBUG("IH: D1 vline\n"); 3986 } 3987 break; 3988 default: 3989 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3990 break; 3991 } 3992 break; 3993 case 2: /* D2 vblank/vline */ 3994 switch (src_data) { 3995 case 0: /* D2 vblank */ 3996 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 3997 if (rdev->irq.crtc_vblank_int[1]) { 3998 drm_handle_vblank(rdev->ddev, 1); 3999 rdev->pm.vblank_sync = true; 4000 DRM_WAKEUP(&rdev->irq.vblank_queue); 4001 } 4002 if (atomic_read(&rdev->irq.pflip[1])) 4003 radeon_crtc_handle_flip(rdev, 1); 4004 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 4005 DRM_DEBUG("IH: D2 vblank\n"); 4006 } 4007 break; 4008 case 1: /* D2 vline */ 4009 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 4010 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 4011 DRM_DEBUG("IH: D2 vline\n"); 4012 } 4013 break; 4014 default: 4015 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4016 break; 4017 } 4018 break; 4019 case 3: /* D3 vblank/vline */ 4020 switch (src_data) { 4021 case 0: /* D3 vblank */ 4022 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 4023 if (rdev->irq.crtc_vblank_int[2]) { 4024 drm_handle_vblank(rdev->ddev, 2); 4025 rdev->pm.vblank_sync = true; 4026 DRM_WAKEUP(&rdev->irq.vblank_queue); 4027 } 4028 if (atomic_read(&rdev->irq.pflip[2])) 4029 radeon_crtc_handle_flip(rdev, 2); 4030 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 4031 DRM_DEBUG("IH: D3 vblank\n"); 4032 } 4033 break; 4034 case 1: /* D3 vline */ 4035 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 4036 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 4037 DRM_DEBUG("IH: D3 vline\n"); 4038 } 4039 break; 4040 default: 4041 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4042 break; 4043 } 4044 break; 4045 case 4: /* D4 vblank/vline */ 4046 switch (src_data) { 4047 case 0: /* D4 vblank */ 4048 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 4049 if (rdev->irq.crtc_vblank_int[3]) { 4050 drm_handle_vblank(rdev->ddev, 3); 4051 rdev->pm.vblank_sync = true; 4052 DRM_WAKEUP(&rdev->irq.vblank_queue); 4053 } 4054 if (atomic_read(&rdev->irq.pflip[3])) 4055 radeon_crtc_handle_flip(rdev, 3); 4056 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 4057 DRM_DEBUG("IH: D4 vblank\n"); 4058 } 4059 break; 4060 case 1: /* D4 vline */ 4061 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 4062 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 4063 DRM_DEBUG("IH: D4 vline\n"); 4064 } 4065 break; 4066 default: 4067 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4068 break; 4069 } 4070 break; 4071 case 5: /* D5 vblank/vline */ 4072 switch (src_data) { 4073 case 0: /* D5 vblank */ 4074 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 4075 if (rdev->irq.crtc_vblank_int[4]) { 4076 drm_handle_vblank(rdev->ddev, 4); 4077 rdev->pm.vblank_sync = true; 4078 DRM_WAKEUP(&rdev->irq.vblank_queue); 4079 } 4080 if (atomic_read(&rdev->irq.pflip[4])) 4081 radeon_crtc_handle_flip(rdev, 4); 4082 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 4083 DRM_DEBUG("IH: D5 vblank\n"); 4084 } 4085 break; 4086 case 1: /* D5 vline */ 4087 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 4088 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 4089 DRM_DEBUG("IH: D5 vline\n"); 4090 } 4091 break; 4092 default: 4093 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4094 break; 4095 } 4096 break; 4097 case 6: /* D6 vblank/vline */ 4098 switch (src_data) { 4099 case 0: /* D6 vblank */ 4100 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 4101 if (rdev->irq.crtc_vblank_int[5]) { 4102 drm_handle_vblank(rdev->ddev, 5); 4103 rdev->pm.vblank_sync = true; 4104 DRM_WAKEUP(&rdev->irq.vblank_queue); 4105 } 4106 if (atomic_read(&rdev->irq.pflip[5])) 4107 radeon_crtc_handle_flip(rdev, 5); 4108 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 4109 DRM_DEBUG("IH: D6 vblank\n"); 4110 } 4111 break; 4112 case 1: /* D6 vline */ 4113 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 4114 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 4115 DRM_DEBUG("IH: D6 vline\n"); 4116 } 4117 break; 4118 default: 4119 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4120 break; 4121 } 4122 break; 4123 case 42: /* HPD hotplug */ 4124 switch (src_data) { 4125 case 0: 4126 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 4127 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 4128 queue_hotplug = true; 4129 DRM_DEBUG("IH: HPD1\n"); 4130 } 4131 break; 4132 case 1: 4133 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 4134 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 4135 queue_hotplug = true; 4136 DRM_DEBUG("IH: HPD2\n"); 4137 } 4138 break; 4139 case 2: 4140 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 4141 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 4142 queue_hotplug = true; 4143 DRM_DEBUG("IH: HPD3\n"); 4144 } 4145 break; 4146 case 3: 4147 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 4148 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 4149 queue_hotplug = true; 4150 DRM_DEBUG("IH: HPD4\n"); 4151 } 4152 break; 4153 case 4: 4154 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 4155 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 4156 queue_hotplug = true; 4157 DRM_DEBUG("IH: HPD5\n"); 4158 } 4159 break; 4160 case 5: 4161 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 4162 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 4163 queue_hotplug = true; 4164 DRM_DEBUG("IH: HPD6\n"); 4165 } 4166 break; 4167 default: 4168 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4169 break; 4170 } 4171 break; 4172 case 146: 4173 case 147: 4174 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 4175 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 4176 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 4177 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4178 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4179 /* reset addr and status */ 4180 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 4181 break; 4182 case 176: /* RINGID0 CP_INT */ 4183 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4184 break; 4185 case 177: /* RINGID1 CP_INT */ 4186 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 4187 break; 4188 case 178: /* RINGID2 CP_INT */ 4189 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 4190 break; 4191 case 181: /* CP EOP event */ 4192 DRM_DEBUG("IH: CP EOP\n"); 4193 switch (ring_id) { 4194 case 0: 4195 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4196 break; 4197 case 1: 4198 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 4199 break; 4200 case 2: 4201 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 4202 break; 4203 } 4204 break; 4205 case 224: /* DMA trap event */ 4206 DRM_DEBUG("IH: DMA trap\n"); 4207 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4208 break; 4209 case 233: /* GUI IDLE */ 4210 DRM_DEBUG("IH: GUI idle\n"); 4211 break; 4212 case 244: /* DMA trap event */ 4213 DRM_DEBUG("IH: DMA1 trap\n"); 4214 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 4215 break; 4216 default: 4217 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4218 break; 4219 } 4220 4221 /* wptr/rptr are in bytes! */ 4222 rptr += 16; 4223 rptr &= rdev->ih.ptr_mask; 4224 } 4225 if (queue_hotplug) 4226 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 4227 rdev->ih.rptr = rptr; 4228 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4229 atomic_set(&rdev->ih.lock, 0); 4230 4231 /* make sure wptr hasn't changed while processing */ 4232 wptr = si_get_ih_wptr(rdev); 4233 if (wptr != rptr) 4234 goto restart_ih; 4235 4236 return IRQ_HANDLED; 4237 } 4238 4239 /** 4240 * si_copy_dma - copy pages using the DMA engine 4241 * 4242 * @rdev: radeon_device pointer 4243 * @src_offset: src GPU address 4244 * @dst_offset: dst GPU address 4245 * @num_gpu_pages: number of GPU pages to xfer 4246 * @fence: radeon fence object 4247 * 4248 * Copy GPU paging using the DMA engine (SI). 4249 * Used by the radeon ttm implementation to move pages if 4250 * registered as the asic copy callback. 4251 */ 4252 int si_copy_dma(struct radeon_device *rdev, 4253 uint64_t src_offset, uint64_t dst_offset, 4254 unsigned num_gpu_pages, 4255 struct radeon_fence **fence) 4256 { 4257 struct radeon_semaphore *sem = NULL; 4258 int ring_index = rdev->asic->copy.dma_ring_index; 4259 struct radeon_ring *ring = &rdev->ring[ring_index]; 4260 u32 size_in_bytes, cur_size_in_bytes; 4261 int i, num_loops; 4262 int r = 0; 4263 4264 r = radeon_semaphore_create(rdev, &sem); 4265 if (r) { 4266 DRM_ERROR("radeon: moving bo (%d).\n", r); 4267 return r; 4268 } 4269 4270 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 4271 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); 4272 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); 4273 if (r) { 4274 DRM_ERROR("radeon: moving bo (%d).\n", r); 4275 radeon_semaphore_free(rdev, &sem, NULL); 4276 return r; 4277 } 4278 4279 if (radeon_fence_need_sync(*fence, ring->idx)) { 4280 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 4281 ring->idx); 4282 radeon_fence_note_sync(*fence, ring->idx); 4283 } else { 4284 radeon_semaphore_free(rdev, &sem, NULL); 4285 } 4286 4287 for (i = 0; i < num_loops; i++) { 4288 cur_size_in_bytes = size_in_bytes; 4289 if (cur_size_in_bytes > 0xFFFFF) 4290 cur_size_in_bytes = 0xFFFFF; 4291 size_in_bytes -= cur_size_in_bytes; 4292 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); 4293 radeon_ring_write(ring, dst_offset & 0xffffffff); 4294 radeon_ring_write(ring, src_offset & 0xffffffff); 4295 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 4296 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 4297 src_offset += cur_size_in_bytes; 4298 dst_offset += cur_size_in_bytes; 4299 } 4300 4301 r = radeon_fence_emit(rdev, fence, ring->idx); 4302 if (r) { 4303 radeon_ring_unlock_undo(rdev, ring); 4304 return r; 4305 } 4306 4307 radeon_ring_unlock_commit(rdev, ring); 4308 radeon_semaphore_free(rdev, &sem, *fence); 4309 4310 return r; 4311 } 4312 4313 /* 4314 * startup/shutdown callbacks 4315 */ 4316 static int si_startup(struct radeon_device *rdev) 4317 { 4318 struct radeon_ring *ring; 4319 int r; 4320 4321 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 4322 !rdev->rlc_fw || !rdev->mc_fw) { 4323 r = si_init_microcode(rdev); 4324 if (r) { 4325 DRM_ERROR("Failed to load firmware!\n"); 4326 return r; 4327 } 4328 } 4329 4330 r = si_mc_load_microcode(rdev); 4331 if (r) { 4332 DRM_ERROR("Failed to load MC firmware!\n"); 4333 return r; 4334 } 4335 4336 r = r600_vram_scratch_init(rdev); 4337 if (r) 4338 return r; 4339 4340 si_mc_program(rdev); 4341 r = si_pcie_gart_enable(rdev); 4342 if (r) 4343 return r; 4344 si_gpu_init(rdev); 4345 4346 #if 0 4347 r = evergreen_blit_init(rdev); 4348 if (r) { 4349 r600_blit_fini(rdev); 4350 rdev->asic->copy = NULL; 4351 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 4352 } 4353 #endif 4354 /* allocate rlc buffers */ 4355 r = si_rlc_init(rdev); 4356 if (r) { 4357 DRM_ERROR("Failed to init rlc BOs!\n"); 4358 return r; 4359 } 4360 4361 /* allocate wb buffer */ 4362 r = radeon_wb_init(rdev); 4363 if (r) 4364 return r; 4365 4366 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 4367 if (r) { 4368 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 4369 return r; 4370 } 4371 4372 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 4373 if (r) { 4374 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 4375 return r; 4376 } 4377 4378 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 4379 if (r) { 4380 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 4381 return r; 4382 } 4383 4384 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 4385 if (r) { 4386 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 4387 return r; 4388 } 4389 4390 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 4391 if (r) { 4392 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 4393 return r; 4394 } 4395 4396 /* Enable IRQ */ 4397 r = si_irq_init(rdev); 4398 if (r) { 4399 DRM_ERROR("radeon: IH init failed (%d).\n", r); 4400 radeon_irq_kms_fini(rdev); 4401 return r; 4402 } 4403 si_irq_set(rdev); 4404 4405 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 4406 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 4407 CP_RB0_RPTR, CP_RB0_WPTR, 4408 0, 0xfffff, RADEON_CP_PACKET2); 4409 if (r) 4410 return r; 4411 4412 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 4413 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 4414 CP_RB1_RPTR, CP_RB1_WPTR, 4415 0, 0xfffff, RADEON_CP_PACKET2); 4416 if (r) 4417 return r; 4418 4419 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 4420 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 4421 CP_RB2_RPTR, CP_RB2_WPTR, 4422 0, 0xfffff, RADEON_CP_PACKET2); 4423 if (r) 4424 return r; 4425 4426 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 4427 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 4428 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 4429 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 4430 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 4431 if (r) 4432 return r; 4433 4434 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 4435 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 4436 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 4437 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 4438 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 4439 if (r) 4440 return r; 4441 4442 r = si_cp_load_microcode(rdev); 4443 if (r) 4444 return r; 4445 r = si_cp_resume(rdev); 4446 if (r) 4447 return r; 4448 4449 r = cayman_dma_resume(rdev); 4450 if (r) 4451 return r; 4452 4453 r = radeon_ib_pool_init(rdev); 4454 if (r) { 4455 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 4456 return r; 4457 } 4458 4459 r = radeon_vm_manager_init(rdev); 4460 if (r) { 4461 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 4462 return r; 4463 } 4464 4465 return 0; 4466 } 4467 4468 int si_resume(struct radeon_device *rdev) 4469 { 4470 int r; 4471 4472 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 4473 * posting will perform necessary task to bring back GPU into good 4474 * shape. 4475 */ 4476 /* post card */ 4477 atom_asic_init(rdev->mode_info.atom_context); 4478 4479 rdev->accel_working = true; 4480 r = si_startup(rdev); 4481 if (r) { 4482 DRM_ERROR("si startup failed on resume\n"); 4483 rdev->accel_working = false; 4484 return r; 4485 } 4486 4487 return r; 4488 4489 } 4490 4491 int si_suspend(struct radeon_device *rdev) 4492 { 4493 radeon_vm_manager_fini(rdev); 4494 si_cp_enable(rdev, false); 4495 cayman_dma_stop(rdev); 4496 si_irq_suspend(rdev); 4497 radeon_wb_disable(rdev); 4498 si_pcie_gart_disable(rdev); 4499 return 0; 4500 } 4501 4502 /* Plan is to move initialization in that function and use 4503 * helper function so that radeon_device_init pretty much 4504 * do nothing more than calling asic specific function. This 4505 * should also allow to remove a bunch of callback function 4506 * like vram_info. 4507 */ 4508 int si_init(struct radeon_device *rdev) 4509 { 4510 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 4511 int r; 4512 4513 /* Read BIOS */ 4514 if (!radeon_get_bios(rdev)) { 4515 if (ASIC_IS_AVIVO(rdev)) 4516 return -EINVAL; 4517 } 4518 /* Must be an ATOMBIOS */ 4519 if (!rdev->is_atom_bios) { 4520 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 4521 return -EINVAL; 4522 } 4523 r = radeon_atombios_init(rdev); 4524 if (r) 4525 return r; 4526 4527 /* Post card if necessary */ 4528 if (!radeon_card_posted(rdev)) { 4529 if (!rdev->bios) { 4530 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 4531 return -EINVAL; 4532 } 4533 DRM_INFO("GPU not posted. posting now...\n"); 4534 atom_asic_init(rdev->mode_info.atom_context); 4535 } 4536 /* Initialize scratch registers */ 4537 si_scratch_init(rdev); 4538 /* Initialize surface registers */ 4539 radeon_surface_init(rdev); 4540 /* Initialize clocks */ 4541 radeon_get_clock_info(rdev->ddev); 4542 4543 /* Fence driver */ 4544 r = radeon_fence_driver_init(rdev); 4545 if (r) 4546 return r; 4547 4548 /* initialize memory controller */ 4549 r = si_mc_init(rdev); 4550 if (r) 4551 return r; 4552 /* Memory manager */ 4553 r = radeon_bo_init(rdev); 4554 if (r) 4555 return r; 4556 4557 r = radeon_irq_kms_init(rdev); 4558 if (r) 4559 return r; 4560 4561 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 4562 ring->ring_obj = NULL; 4563 r600_ring_init(rdev, ring, 1024 * 1024); 4564 4565 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 4566 ring->ring_obj = NULL; 4567 r600_ring_init(rdev, ring, 1024 * 1024); 4568 4569 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 4570 ring->ring_obj = NULL; 4571 r600_ring_init(rdev, ring, 1024 * 1024); 4572 4573 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 4574 ring->ring_obj = NULL; 4575 r600_ring_init(rdev, ring, 64 * 1024); 4576 4577 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 4578 ring->ring_obj = NULL; 4579 r600_ring_init(rdev, ring, 64 * 1024); 4580 4581 rdev->ih.ring_obj = NULL; 4582 r600_ih_ring_init(rdev, 64 * 1024); 4583 4584 r = r600_pcie_gart_init(rdev); 4585 if (r) 4586 return r; 4587 4588 rdev->accel_working = true; 4589 r = si_startup(rdev); 4590 if (r) { 4591 dev_err(rdev->dev, "disabling GPU acceleration\n"); 4592 si_cp_fini(rdev); 4593 cayman_dma_fini(rdev); 4594 si_irq_fini(rdev); 4595 si_rlc_fini(rdev); 4596 radeon_wb_fini(rdev); 4597 radeon_ib_pool_fini(rdev); 4598 radeon_vm_manager_fini(rdev); 4599 radeon_irq_kms_fini(rdev); 4600 si_pcie_gart_fini(rdev); 4601 rdev->accel_working = false; 4602 } 4603 4604 /* Don't start up if the MC ucode is missing. 4605 * The default clocks and voltages before the MC ucode 4606 * is loaded are not suffient for advanced operations. 4607 */ 4608 if (!rdev->mc_fw) { 4609 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 4610 return -EINVAL; 4611 } 4612 4613 return 0; 4614 } 4615 4616 void si_fini(struct radeon_device *rdev) 4617 { 4618 #if 0 4619 r600_blit_fini(rdev); 4620 #endif 4621 si_cp_fini(rdev); 4622 cayman_dma_fini(rdev); 4623 si_irq_fini(rdev); 4624 si_rlc_fini(rdev); 4625 radeon_wb_fini(rdev); 4626 radeon_vm_manager_fini(rdev); 4627 radeon_ib_pool_fini(rdev); 4628 radeon_irq_kms_fini(rdev); 4629 si_pcie_gart_fini(rdev); 4630 r600_vram_scratch_fini(rdev); 4631 radeon_gem_fini(rdev); 4632 radeon_fence_driver_fini(rdev); 4633 radeon_bo_fini(rdev); 4634 radeon_atombios_fini(rdev); 4635 si_fini_microcode(rdev); 4636 drm_free(rdev->bios, M_DRM); 4637 rdev->bios = NULL; 4638 } 4639 4640 /** 4641 * si_get_gpu_clock_counter - return GPU clock counter snapshot 4642 * 4643 * @rdev: radeon_device pointer 4644 * 4645 * Fetches a GPU clock counter snapshot (SI). 4646 * Returns the 64 bit clock counter snapshot. 4647 */ 4648 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev) 4649 { 4650 uint64_t clock; 4651 4652 spin_lock(&rdev->gpu_clock_mutex); 4653 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4654 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4655 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4656 spin_unlock(&rdev->gpu_clock_mutex); 4657 return clock; 4658 } 4659