1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 #include <drm/drmP.h> 27 #include "radeon.h" 28 #include "radeon_asic.h" 29 #include "radeon_audio.h" 30 #include <drm/radeon_drm.h> 31 #include "nid.h" 32 #include "atom.h" 33 #include "ni_reg.h" 34 #include "cayman_blit_shaders.h" 35 #include "radeon_ucode.h" 36 #include "clearstate_cayman.h" 37 38 /* 39 * Indirect registers accessor 40 */ 41 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 42 { 43 u32 r; 44 45 spin_lock(&rdev->smc_idx_lock); 46 WREG32(TN_SMC_IND_INDEX_0, (reg)); 47 r = RREG32(TN_SMC_IND_DATA_0); 48 spin_unlock(&rdev->smc_idx_lock); 49 return r; 50 } 51 52 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 53 { 54 spin_lock(&rdev->smc_idx_lock); 55 WREG32(TN_SMC_IND_INDEX_0, (reg)); 56 WREG32(TN_SMC_IND_DATA_0, (v)); 57 spin_unlock(&rdev->smc_idx_lock); 58 } 59 60 static const u32 tn_rlc_save_restore_register_list[] = 61 { 62 0x98fc, 63 0x98f0, 64 0x9834, 65 0x9838, 66 0x9870, 67 0x9874, 68 0x8a14, 69 0x8b24, 70 0x8bcc, 71 0x8b10, 72 0x8c30, 73 0x8d00, 74 0x8d04, 75 0x8c00, 76 0x8c04, 77 0x8c10, 78 0x8c14, 79 0x8d8c, 80 0x8cf0, 81 0x8e38, 82 0x9508, 83 0x9688, 84 0x9608, 85 0x960c, 86 0x9610, 87 0x9614, 88 0x88c4, 89 0x8978, 90 0x88d4, 91 0x900c, 92 0x9100, 93 0x913c, 94 0x90e8, 95 0x9354, 96 0xa008, 97 0x98f8, 98 0x9148, 99 0x914c, 100 0x3f94, 101 0x98f4, 102 0x9b7c, 103 0x3f8c, 104 0x8950, 105 0x8954, 106 0x8a18, 107 0x8b28, 108 0x9144, 109 0x3f90, 110 0x915c, 111 0x9160, 112 0x9178, 113 0x917c, 114 0x9180, 115 0x918c, 116 0x9190, 117 0x9194, 118 0x9198, 119 0x919c, 120 0x91a8, 121 0x91ac, 122 0x91b0, 123 0x91b4, 124 0x91b8, 125 0x91c4, 126 0x91c8, 127 0x91cc, 128 0x91d0, 129 0x91d4, 130 0x91e0, 131 0x91e4, 132 0x91ec, 133 0x91f0, 134 0x91f4, 135 0x9200, 136 0x9204, 137 0x929c, 138 0x8030, 139 0x9150, 140 0x9a60, 141 0x920c, 142 0x9210, 143 0x9228, 144 0x922c, 145 0x9244, 146 0x9248, 147 0x91e8, 148 0x9294, 149 0x9208, 150 0x9224, 151 0x9240, 152 0x9220, 153 0x923c, 154 0x9258, 155 0x9744, 156 0xa200, 157 0xa204, 158 0xa208, 159 0xa20c, 160 0x8d58, 161 0x9030, 162 0x9034, 163 0x9038, 164 0x903c, 165 0x9040, 166 0x9654, 167 0x897c, 168 0xa210, 169 0xa214, 170 0x9868, 171 0xa02c, 172 0x9664, 173 0x9698, 174 0x949c, 175 0x8e10, 176 0x8e18, 177 0x8c50, 178 0x8c58, 179 0x8c60, 180 0x8c68, 181 0x89b4, 182 0x9830, 183 0x802c, 184 }; 185 186 /* Firmware Names */ 187 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 188 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 189 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 190 MODULE_FIRMWARE("radeon/BARTS_smc.bin"); 191 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 192 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 193 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 194 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 195 MODULE_FIRMWARE("radeon/TURKS_smc.bin"); 196 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 197 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 198 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 199 MODULE_FIRMWARE("radeon/CAICOS_smc.bin"); 200 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 201 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 202 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 203 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 204 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin"); 205 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 206 MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 207 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 208 209 210 static const u32 cayman_golden_registers2[] = 211 { 212 0x3e5c, 0xffffffff, 0x00000000, 213 0x3e48, 0xffffffff, 0x00000000, 214 0x3e4c, 0xffffffff, 0x00000000, 215 0x3e64, 0xffffffff, 0x00000000, 216 0x3e50, 0xffffffff, 0x00000000, 217 0x3e60, 0xffffffff, 0x00000000 218 }; 219 220 static const u32 cayman_golden_registers[] = 221 { 222 0x5eb4, 0xffffffff, 0x00000002, 223 0x5e78, 0x8f311ff1, 0x001000f0, 224 0x3f90, 0xffff0000, 0xff000000, 225 0x9148, 0xffff0000, 0xff000000, 226 0x3f94, 0xffff0000, 0xff000000, 227 0x914c, 0xffff0000, 0xff000000, 228 0xc78, 0x00000080, 0x00000080, 229 0xbd4, 0x70073777, 0x00011003, 230 0xd02c, 0xbfffff1f, 0x08421000, 231 0xd0b8, 0x73773777, 0x02011003, 232 0x5bc0, 0x00200000, 0x50100000, 233 0x98f8, 0x33773777, 0x02011003, 234 0x98fc, 0xffffffff, 0x76541032, 235 0x7030, 0x31000311, 0x00000011, 236 0x2f48, 0x33773777, 0x42010001, 237 0x6b28, 0x00000010, 0x00000012, 238 0x7728, 0x00000010, 0x00000012, 239 0x10328, 0x00000010, 0x00000012, 240 0x10f28, 0x00000010, 0x00000012, 241 0x11b28, 0x00000010, 0x00000012, 242 0x12728, 0x00000010, 0x00000012, 243 0x240c, 0x000007ff, 0x00000000, 244 0x8a14, 0xf000001f, 0x00000007, 245 0x8b24, 0x3fff3fff, 0x00ff0fff, 246 0x8b10, 0x0000ff0f, 0x00000000, 247 0x28a4c, 0x07ffffff, 0x06000000, 248 0x10c, 0x00000001, 0x00010003, 249 0xa02c, 0xffffffff, 0x0000009b, 250 0x913c, 0x0000010f, 0x01000100, 251 0x8c04, 0xf8ff00ff, 0x40600060, 252 0x28350, 0x00000f01, 0x00000000, 253 0x9508, 0x3700001f, 0x00000002, 254 0x960c, 0xffffffff, 0x54763210, 255 0x88c4, 0x001f3ae3, 0x00000082, 256 0x88d0, 0xffffffff, 0x0f40df40, 257 0x88d4, 0x0000001f, 0x00000010, 258 0x8974, 0xffffffff, 0x00000000 259 }; 260 261 static const u32 dvst_golden_registers2[] = 262 { 263 0x8f8, 0xffffffff, 0, 264 0x8fc, 0x00380000, 0, 265 0x8f8, 0xffffffff, 1, 266 0x8fc, 0x0e000000, 0 267 }; 268 269 static const u32 dvst_golden_registers[] = 270 { 271 0x690, 0x3fff3fff, 0x20c00033, 272 0x918c, 0x0fff0fff, 0x00010006, 273 0x91a8, 0x0fff0fff, 0x00010006, 274 0x9150, 0xffffdfff, 0x6e944040, 275 0x917c, 0x0fff0fff, 0x00030002, 276 0x9198, 0x0fff0fff, 0x00030002, 277 0x915c, 0x0fff0fff, 0x00010000, 278 0x3f90, 0xffff0001, 0xff000000, 279 0x9178, 0x0fff0fff, 0x00070000, 280 0x9194, 0x0fff0fff, 0x00070000, 281 0x9148, 0xffff0001, 0xff000000, 282 0x9190, 0x0fff0fff, 0x00090008, 283 0x91ac, 0x0fff0fff, 0x00090008, 284 0x3f94, 0xffff0000, 0xff000000, 285 0x914c, 0xffff0000, 0xff000000, 286 0x929c, 0x00000fff, 0x00000001, 287 0x55e4, 0xff607fff, 0xfc000100, 288 0x8a18, 0xff000fff, 0x00000100, 289 0x8b28, 0xff000fff, 0x00000100, 290 0x9144, 0xfffc0fff, 0x00000100, 291 0x6ed8, 0x00010101, 0x00010000, 292 0x9830, 0xffffffff, 0x00000000, 293 0x9834, 0xf00fffff, 0x00000400, 294 0x9838, 0xfffffffe, 0x00000000, 295 0xd0c0, 0xff000fff, 0x00000100, 296 0xd02c, 0xbfffff1f, 0x08421000, 297 0xd0b8, 0x73773777, 0x12010001, 298 0x5bb0, 0x000000f0, 0x00000070, 299 0x98f8, 0x73773777, 0x12010001, 300 0x98fc, 0xffffffff, 0x00000010, 301 0x9b7c, 0x00ff0000, 0x00fc0000, 302 0x8030, 0x00001f0f, 0x0000100a, 303 0x2f48, 0x73773777, 0x12010001, 304 0x2408, 0x00030000, 0x000c007f, 305 0x8a14, 0xf000003f, 0x00000007, 306 0x8b24, 0x3fff3fff, 0x00ff0fff, 307 0x8b10, 0x0000ff0f, 0x00000000, 308 0x28a4c, 0x07ffffff, 0x06000000, 309 0x4d8, 0x00000fff, 0x00000100, 310 0xa008, 0xffffffff, 0x00010000, 311 0x913c, 0xffff03ff, 0x01000100, 312 0x8c00, 0x000000ff, 0x00000003, 313 0x8c04, 0xf8ff00ff, 0x40600060, 314 0x8cf0, 0x1fff1fff, 0x08e00410, 315 0x28350, 0x00000f01, 0x00000000, 316 0x9508, 0xf700071f, 0x00000002, 317 0x960c, 0xffffffff, 0x54763210, 318 0x20ef8, 0x01ff01ff, 0x00000002, 319 0x20e98, 0xfffffbff, 0x00200000, 320 0x2015c, 0xffffffff, 0x00000f40, 321 0x88c4, 0x001f3ae3, 0x00000082, 322 0x8978, 0x3fffffff, 0x04050140, 323 0x88d4, 0x0000001f, 0x00000010, 324 0x8974, 0xffffffff, 0x00000000 325 }; 326 327 static const u32 scrapper_golden_registers[] = 328 { 329 0x690, 0x3fff3fff, 0x20c00033, 330 0x918c, 0x0fff0fff, 0x00010006, 331 0x918c, 0x0fff0fff, 0x00010006, 332 0x91a8, 0x0fff0fff, 0x00010006, 333 0x91a8, 0x0fff0fff, 0x00010006, 334 0x9150, 0xffffdfff, 0x6e944040, 335 0x9150, 0xffffdfff, 0x6e944040, 336 0x917c, 0x0fff0fff, 0x00030002, 337 0x917c, 0x0fff0fff, 0x00030002, 338 0x9198, 0x0fff0fff, 0x00030002, 339 0x9198, 0x0fff0fff, 0x00030002, 340 0x915c, 0x0fff0fff, 0x00010000, 341 0x915c, 0x0fff0fff, 0x00010000, 342 0x3f90, 0xffff0001, 0xff000000, 343 0x3f90, 0xffff0001, 0xff000000, 344 0x9178, 0x0fff0fff, 0x00070000, 345 0x9178, 0x0fff0fff, 0x00070000, 346 0x9194, 0x0fff0fff, 0x00070000, 347 0x9194, 0x0fff0fff, 0x00070000, 348 0x9148, 0xffff0001, 0xff000000, 349 0x9148, 0xffff0001, 0xff000000, 350 0x9190, 0x0fff0fff, 0x00090008, 351 0x9190, 0x0fff0fff, 0x00090008, 352 0x91ac, 0x0fff0fff, 0x00090008, 353 0x91ac, 0x0fff0fff, 0x00090008, 354 0x3f94, 0xffff0000, 0xff000000, 355 0x3f94, 0xffff0000, 0xff000000, 356 0x914c, 0xffff0000, 0xff000000, 357 0x914c, 0xffff0000, 0xff000000, 358 0x929c, 0x00000fff, 0x00000001, 359 0x929c, 0x00000fff, 0x00000001, 360 0x55e4, 0xff607fff, 0xfc000100, 361 0x8a18, 0xff000fff, 0x00000100, 362 0x8a18, 0xff000fff, 0x00000100, 363 0x8b28, 0xff000fff, 0x00000100, 364 0x8b28, 0xff000fff, 0x00000100, 365 0x9144, 0xfffc0fff, 0x00000100, 366 0x9144, 0xfffc0fff, 0x00000100, 367 0x6ed8, 0x00010101, 0x00010000, 368 0x9830, 0xffffffff, 0x00000000, 369 0x9830, 0xffffffff, 0x00000000, 370 0x9834, 0xf00fffff, 0x00000400, 371 0x9834, 0xf00fffff, 0x00000400, 372 0x9838, 0xfffffffe, 0x00000000, 373 0x9838, 0xfffffffe, 0x00000000, 374 0xd0c0, 0xff000fff, 0x00000100, 375 0xd02c, 0xbfffff1f, 0x08421000, 376 0xd02c, 0xbfffff1f, 0x08421000, 377 0xd0b8, 0x73773777, 0x12010001, 378 0xd0b8, 0x73773777, 0x12010001, 379 0x5bb0, 0x000000f0, 0x00000070, 380 0x98f8, 0x73773777, 0x12010001, 381 0x98f8, 0x73773777, 0x12010001, 382 0x98fc, 0xffffffff, 0x00000010, 383 0x98fc, 0xffffffff, 0x00000010, 384 0x9b7c, 0x00ff0000, 0x00fc0000, 385 0x9b7c, 0x00ff0000, 0x00fc0000, 386 0x8030, 0x00001f0f, 0x0000100a, 387 0x8030, 0x00001f0f, 0x0000100a, 388 0x2f48, 0x73773777, 0x12010001, 389 0x2f48, 0x73773777, 0x12010001, 390 0x2408, 0x00030000, 0x000c007f, 391 0x8a14, 0xf000003f, 0x00000007, 392 0x8a14, 0xf000003f, 0x00000007, 393 0x8b24, 0x3fff3fff, 0x00ff0fff, 394 0x8b24, 0x3fff3fff, 0x00ff0fff, 395 0x8b10, 0x0000ff0f, 0x00000000, 396 0x8b10, 0x0000ff0f, 0x00000000, 397 0x28a4c, 0x07ffffff, 0x06000000, 398 0x28a4c, 0x07ffffff, 0x06000000, 399 0x4d8, 0x00000fff, 0x00000100, 400 0x4d8, 0x00000fff, 0x00000100, 401 0xa008, 0xffffffff, 0x00010000, 402 0xa008, 0xffffffff, 0x00010000, 403 0x913c, 0xffff03ff, 0x01000100, 404 0x913c, 0xffff03ff, 0x01000100, 405 0x90e8, 0x001fffff, 0x010400c0, 406 0x8c00, 0x000000ff, 0x00000003, 407 0x8c00, 0x000000ff, 0x00000003, 408 0x8c04, 0xf8ff00ff, 0x40600060, 409 0x8c04, 0xf8ff00ff, 0x40600060, 410 0x8c30, 0x0000000f, 0x00040005, 411 0x8cf0, 0x1fff1fff, 0x08e00410, 412 0x8cf0, 0x1fff1fff, 0x08e00410, 413 0x900c, 0x00ffffff, 0x0017071f, 414 0x28350, 0x00000f01, 0x00000000, 415 0x28350, 0x00000f01, 0x00000000, 416 0x9508, 0xf700071f, 0x00000002, 417 0x9508, 0xf700071f, 0x00000002, 418 0x9688, 0x00300000, 0x0017000f, 419 0x960c, 0xffffffff, 0x54763210, 420 0x960c, 0xffffffff, 0x54763210, 421 0x20ef8, 0x01ff01ff, 0x00000002, 422 0x20e98, 0xfffffbff, 0x00200000, 423 0x2015c, 0xffffffff, 0x00000f40, 424 0x88c4, 0x001f3ae3, 0x00000082, 425 0x88c4, 0x001f3ae3, 0x00000082, 426 0x8978, 0x3fffffff, 0x04050140, 427 0x8978, 0x3fffffff, 0x04050140, 428 0x88d4, 0x0000001f, 0x00000010, 429 0x88d4, 0x0000001f, 0x00000010, 430 0x8974, 0xffffffff, 0x00000000, 431 0x8974, 0xffffffff, 0x00000000 432 }; 433 434 static void ni_init_golden_registers(struct radeon_device *rdev) 435 { 436 switch (rdev->family) { 437 case CHIP_CAYMAN: 438 radeon_program_register_sequence(rdev, 439 cayman_golden_registers, 440 (const u32)ARRAY_SIZE(cayman_golden_registers)); 441 radeon_program_register_sequence(rdev, 442 cayman_golden_registers2, 443 (const u32)ARRAY_SIZE(cayman_golden_registers2)); 444 break; 445 case CHIP_ARUBA: 446 if ((rdev->pdev->device == 0x9900) || 447 (rdev->pdev->device == 0x9901) || 448 (rdev->pdev->device == 0x9903) || 449 (rdev->pdev->device == 0x9904) || 450 (rdev->pdev->device == 0x9905) || 451 (rdev->pdev->device == 0x9906) || 452 (rdev->pdev->device == 0x9907) || 453 (rdev->pdev->device == 0x9908) || 454 (rdev->pdev->device == 0x9909) || 455 (rdev->pdev->device == 0x990A) || 456 (rdev->pdev->device == 0x990B) || 457 (rdev->pdev->device == 0x990C) || 458 (rdev->pdev->device == 0x990D) || 459 (rdev->pdev->device == 0x990E) || 460 (rdev->pdev->device == 0x990F) || 461 (rdev->pdev->device == 0x9910) || 462 (rdev->pdev->device == 0x9913) || 463 (rdev->pdev->device == 0x9917) || 464 (rdev->pdev->device == 0x9918)) { 465 radeon_program_register_sequence(rdev, 466 dvst_golden_registers, 467 (const u32)ARRAY_SIZE(dvst_golden_registers)); 468 radeon_program_register_sequence(rdev, 469 dvst_golden_registers2, 470 (const u32)ARRAY_SIZE(dvst_golden_registers2)); 471 } else { 472 radeon_program_register_sequence(rdev, 473 scrapper_golden_registers, 474 (const u32)ARRAY_SIZE(scrapper_golden_registers)); 475 radeon_program_register_sequence(rdev, 476 dvst_golden_registers2, 477 (const u32)ARRAY_SIZE(dvst_golden_registers2)); 478 } 479 break; 480 default: 481 break; 482 } 483 } 484 485 #define BTC_IO_MC_REGS_SIZE 29 486 487 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 488 {0x00000077, 0xff010100}, 489 {0x00000078, 0x00000000}, 490 {0x00000079, 0x00001434}, 491 {0x0000007a, 0xcc08ec08}, 492 {0x0000007b, 0x00040000}, 493 {0x0000007c, 0x000080c0}, 494 {0x0000007d, 0x09000000}, 495 {0x0000007e, 0x00210404}, 496 {0x00000081, 0x08a8e800}, 497 {0x00000082, 0x00030444}, 498 {0x00000083, 0x00000000}, 499 {0x00000085, 0x00000001}, 500 {0x00000086, 0x00000002}, 501 {0x00000087, 0x48490000}, 502 {0x00000088, 0x20244647}, 503 {0x00000089, 0x00000005}, 504 {0x0000008b, 0x66030000}, 505 {0x0000008c, 0x00006603}, 506 {0x0000008d, 0x00000100}, 507 {0x0000008f, 0x00001c0a}, 508 {0x00000090, 0xff000001}, 509 {0x00000094, 0x00101101}, 510 {0x00000095, 0x00000fff}, 511 {0x00000096, 0x00116fff}, 512 {0x00000097, 0x60010000}, 513 {0x00000098, 0x10010000}, 514 {0x00000099, 0x00006000}, 515 {0x0000009a, 0x00001000}, 516 {0x0000009f, 0x00946a00} 517 }; 518 519 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 520 {0x00000077, 0xff010100}, 521 {0x00000078, 0x00000000}, 522 {0x00000079, 0x00001434}, 523 {0x0000007a, 0xcc08ec08}, 524 {0x0000007b, 0x00040000}, 525 {0x0000007c, 0x000080c0}, 526 {0x0000007d, 0x09000000}, 527 {0x0000007e, 0x00210404}, 528 {0x00000081, 0x08a8e800}, 529 {0x00000082, 0x00030444}, 530 {0x00000083, 0x00000000}, 531 {0x00000085, 0x00000001}, 532 {0x00000086, 0x00000002}, 533 {0x00000087, 0x48490000}, 534 {0x00000088, 0x20244647}, 535 {0x00000089, 0x00000005}, 536 {0x0000008b, 0x66030000}, 537 {0x0000008c, 0x00006603}, 538 {0x0000008d, 0x00000100}, 539 {0x0000008f, 0x00001c0a}, 540 {0x00000090, 0xff000001}, 541 {0x00000094, 0x00101101}, 542 {0x00000095, 0x00000fff}, 543 {0x00000096, 0x00116fff}, 544 {0x00000097, 0x60010000}, 545 {0x00000098, 0x10010000}, 546 {0x00000099, 0x00006000}, 547 {0x0000009a, 0x00001000}, 548 {0x0000009f, 0x00936a00} 549 }; 550 551 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 552 {0x00000077, 0xff010100}, 553 {0x00000078, 0x00000000}, 554 {0x00000079, 0x00001434}, 555 {0x0000007a, 0xcc08ec08}, 556 {0x0000007b, 0x00040000}, 557 {0x0000007c, 0x000080c0}, 558 {0x0000007d, 0x09000000}, 559 {0x0000007e, 0x00210404}, 560 {0x00000081, 0x08a8e800}, 561 {0x00000082, 0x00030444}, 562 {0x00000083, 0x00000000}, 563 {0x00000085, 0x00000001}, 564 {0x00000086, 0x00000002}, 565 {0x00000087, 0x48490000}, 566 {0x00000088, 0x20244647}, 567 {0x00000089, 0x00000005}, 568 {0x0000008b, 0x66030000}, 569 {0x0000008c, 0x00006603}, 570 {0x0000008d, 0x00000100}, 571 {0x0000008f, 0x00001c0a}, 572 {0x00000090, 0xff000001}, 573 {0x00000094, 0x00101101}, 574 {0x00000095, 0x00000fff}, 575 {0x00000096, 0x00116fff}, 576 {0x00000097, 0x60010000}, 577 {0x00000098, 0x10010000}, 578 {0x00000099, 0x00006000}, 579 {0x0000009a, 0x00001000}, 580 {0x0000009f, 0x00916a00} 581 }; 582 583 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 584 {0x00000077, 0xff010100}, 585 {0x00000078, 0x00000000}, 586 {0x00000079, 0x00001434}, 587 {0x0000007a, 0xcc08ec08}, 588 {0x0000007b, 0x00040000}, 589 {0x0000007c, 0x000080c0}, 590 {0x0000007d, 0x09000000}, 591 {0x0000007e, 0x00210404}, 592 {0x00000081, 0x08a8e800}, 593 {0x00000082, 0x00030444}, 594 {0x00000083, 0x00000000}, 595 {0x00000085, 0x00000001}, 596 {0x00000086, 0x00000002}, 597 {0x00000087, 0x48490000}, 598 {0x00000088, 0x20244647}, 599 {0x00000089, 0x00000005}, 600 {0x0000008b, 0x66030000}, 601 {0x0000008c, 0x00006603}, 602 {0x0000008d, 0x00000100}, 603 {0x0000008f, 0x00001c0a}, 604 {0x00000090, 0xff000001}, 605 {0x00000094, 0x00101101}, 606 {0x00000095, 0x00000fff}, 607 {0x00000096, 0x00116fff}, 608 {0x00000097, 0x60010000}, 609 {0x00000098, 0x10010000}, 610 {0x00000099, 0x00006000}, 611 {0x0000009a, 0x00001000}, 612 {0x0000009f, 0x00976b00} 613 }; 614 615 int ni_mc_load_microcode(struct radeon_device *rdev) 616 { 617 const __be32 *fw_data; 618 u32 mem_type, running, blackout = 0; 619 u32 *io_mc_regs; 620 int i, ucode_size, regs_size; 621 622 if (!rdev->mc_fw) 623 return -EINVAL; 624 625 switch (rdev->family) { 626 case CHIP_BARTS: 627 io_mc_regs = (u32 *)&barts_io_mc_regs; 628 ucode_size = BTC_MC_UCODE_SIZE; 629 regs_size = BTC_IO_MC_REGS_SIZE; 630 break; 631 case CHIP_TURKS: 632 io_mc_regs = (u32 *)&turks_io_mc_regs; 633 ucode_size = BTC_MC_UCODE_SIZE; 634 regs_size = BTC_IO_MC_REGS_SIZE; 635 break; 636 case CHIP_CAICOS: 637 default: 638 io_mc_regs = (u32 *)&caicos_io_mc_regs; 639 ucode_size = BTC_MC_UCODE_SIZE; 640 regs_size = BTC_IO_MC_REGS_SIZE; 641 break; 642 case CHIP_CAYMAN: 643 io_mc_regs = (u32 *)&cayman_io_mc_regs; 644 ucode_size = CAYMAN_MC_UCODE_SIZE; 645 regs_size = BTC_IO_MC_REGS_SIZE; 646 break; 647 } 648 649 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 650 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 651 652 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 653 if (running) { 654 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 655 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 656 } 657 658 /* reset the engine and set to writable */ 659 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 660 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 661 662 /* load mc io regs */ 663 for (i = 0; i < regs_size; i++) { 664 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 665 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 666 } 667 /* load the MC ucode */ 668 fw_data = (const __be32 *)rdev->mc_fw->data; 669 for (i = 0; i < ucode_size; i++) 670 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 671 672 /* put the engine back into the active state */ 673 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 674 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 675 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 676 677 /* wait for training to complete */ 678 for (i = 0; i < rdev->usec_timeout; i++) { 679 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 680 break; 681 udelay(1); 682 } 683 684 if (running) 685 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 686 } 687 688 return 0; 689 } 690 691 int ni_init_microcode(struct radeon_device *rdev) 692 { 693 const char *chip_name; 694 const char *rlc_chip_name; 695 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 696 size_t smc_req_size = 0; 697 char fw_name[30]; 698 int err; 699 700 DRM_DEBUG("\n"); 701 702 switch (rdev->family) { 703 case CHIP_BARTS: 704 chip_name = "BARTS"; 705 rlc_chip_name = "BTC"; 706 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 707 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 708 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 709 mc_req_size = BTC_MC_UCODE_SIZE * 4; 710 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4); 711 break; 712 case CHIP_TURKS: 713 chip_name = "TURKS"; 714 rlc_chip_name = "BTC"; 715 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 716 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 717 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 718 mc_req_size = BTC_MC_UCODE_SIZE * 4; 719 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4); 720 break; 721 case CHIP_CAICOS: 722 chip_name = "CAICOS"; 723 rlc_chip_name = "BTC"; 724 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 725 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 726 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 727 mc_req_size = BTC_MC_UCODE_SIZE * 4; 728 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4); 729 break; 730 case CHIP_CAYMAN: 731 chip_name = "CAYMAN"; 732 rlc_chip_name = "CAYMAN"; 733 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 734 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 735 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 736 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 737 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4); 738 break; 739 case CHIP_ARUBA: 740 chip_name = "ARUBA"; 741 rlc_chip_name = "ARUBA"; 742 /* pfp/me same size as CAYMAN */ 743 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 744 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 745 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 746 mc_req_size = 0; 747 break; 748 default: BUG(); 749 } 750 751 DRM_INFO("Loading %s Microcode\n", chip_name); 752 753 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 754 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 755 if (err) 756 goto out; 757 if (rdev->pfp_fw->datasize != pfp_req_size) { 758 printk(KERN_ERR 759 "ni_pfp: Bogus length %zu in firmware \"%s\"\n", 760 rdev->pfp_fw->datasize, fw_name); 761 err = -EINVAL; 762 goto out; 763 } 764 765 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 766 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 767 if (err) 768 goto out; 769 if (rdev->me_fw->datasize != me_req_size) { 770 printk(KERN_ERR 771 "ni_me: Bogus length %zu in firmware \"%s\"\n", 772 rdev->me_fw->datasize, fw_name); 773 err = -EINVAL; 774 } 775 776 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", 777 rlc_chip_name); 778 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 779 if (err) 780 goto out; 781 if (rdev->rlc_fw->datasize != rlc_req_size) { 782 printk(KERN_ERR 783 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 784 rdev->rlc_fw->datasize, fw_name); 785 err = -EINVAL; 786 } 787 788 /* no MC ucode on TN */ 789 if (!(rdev->flags & RADEON_IS_IGP)) { 790 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", chip_name); 791 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 792 if (err) 793 goto out; 794 if (rdev->mc_fw->datasize != mc_req_size) { 795 printk(KERN_ERR 796 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 797 rdev->mc_fw->datasize, fw_name); 798 err = -EINVAL; 799 } 800 } 801 802 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { 803 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", chip_name); 804 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 805 if (err) { 806 printk(KERN_ERR 807 "smc: error loading firmware \"%s\"\n", 808 fw_name); 809 release_firmware(rdev->smc_fw); 810 rdev->smc_fw = NULL; 811 err = 0; 812 } else if (rdev->smc_fw->datasize != smc_req_size) { 813 printk(KERN_ERR 814 "ni_smc: Bogus length %zu in firmware \"%s\"\n", 815 rdev->smc_fw->datasize, fw_name); 816 err = -EINVAL; 817 } 818 } 819 820 out: 821 if (err) { 822 if (err != -EINVAL) 823 printk(KERN_ERR 824 "ni_cp: Failed to load firmware \"%s\"\n", 825 fw_name); 826 release_firmware(rdev->pfp_fw); 827 rdev->pfp_fw = NULL; 828 release_firmware(rdev->me_fw); 829 rdev->me_fw = NULL; 830 release_firmware(rdev->rlc_fw); 831 rdev->rlc_fw = NULL; 832 release_firmware(rdev->mc_fw); 833 rdev->mc_fw = NULL; 834 release_firmware(rdev->smc_fw); 835 rdev->smc_fw = NULL; 836 } 837 return err; 838 } 839 840 /** 841 * ni_fini_microcode - drop the firmwares image references 842 * 843 * @rdev: radeon_device pointer 844 * 845 * Drop the pfp, me, mc and rlc firmwares image references. 846 * Called at driver shutdown. 847 */ 848 void ni_fini_microcode(struct radeon_device *rdev) 849 { 850 release_firmware(rdev->pfp_fw); 851 rdev->pfp_fw = NULL; 852 release_firmware(rdev->me_fw); 853 rdev->me_fw = NULL; 854 release_firmware(rdev->rlc_fw); 855 rdev->rlc_fw = NULL; 856 release_firmware(rdev->mc_fw); 857 rdev->mc_fw = NULL; 858 release_firmware(rdev->smc_fw); 859 rdev->smc_fw = NULL; 860 } 861 862 /** 863 * cayman_get_allowed_info_register - fetch the register for the info ioctl 864 * 865 * @rdev: radeon_device pointer 866 * @reg: register offset in bytes 867 * @val: register value 868 * 869 * Returns 0 for success or -EINVAL for an invalid register 870 * 871 */ 872 int cayman_get_allowed_info_register(struct radeon_device *rdev, 873 u32 reg, u32 *val) 874 { 875 switch (reg) { 876 case GRBM_STATUS: 877 case GRBM_STATUS_SE0: 878 case GRBM_STATUS_SE1: 879 case SRBM_STATUS: 880 case SRBM_STATUS2: 881 case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): 882 case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): 883 case UVD_STATUS: 884 *val = RREG32(reg); 885 return 0; 886 default: 887 return -EINVAL; 888 } 889 } 890 891 int tn_get_temp(struct radeon_device *rdev) 892 { 893 u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff; 894 int actual_temp = (temp / 8) - 49; 895 896 return actual_temp * 1000; 897 } 898 899 /* 900 * Core functions 901 */ 902 static void cayman_gpu_init(struct radeon_device *rdev) 903 { 904 u32 gb_addr_config = 0; 905 u32 mc_shared_chmap, mc_arb_ramcfg; 906 u32 cgts_tcc_disable; 907 u32 sx_debug_1; 908 u32 smx_dc_ctl0; 909 u32 cgts_sm_ctrl_reg; 910 u32 hdp_host_path_cntl; 911 u32 tmp; 912 u32 disabled_rb_mask; 913 int i, j; 914 915 switch (rdev->family) { 916 case CHIP_CAYMAN: 917 rdev->config.cayman.max_shader_engines = 2; 918 rdev->config.cayman.max_pipes_per_simd = 4; 919 rdev->config.cayman.max_tile_pipes = 8; 920 rdev->config.cayman.max_simds_per_se = 12; 921 rdev->config.cayman.max_backends_per_se = 4; 922 rdev->config.cayman.max_texture_channel_caches = 8; 923 rdev->config.cayman.max_gprs = 256; 924 rdev->config.cayman.max_threads = 256; 925 rdev->config.cayman.max_gs_threads = 32; 926 rdev->config.cayman.max_stack_entries = 512; 927 rdev->config.cayman.sx_num_of_sets = 8; 928 rdev->config.cayman.sx_max_export_size = 256; 929 rdev->config.cayman.sx_max_export_pos_size = 64; 930 rdev->config.cayman.sx_max_export_smx_size = 192; 931 rdev->config.cayman.max_hw_contexts = 8; 932 rdev->config.cayman.sq_num_cf_insts = 2; 933 934 rdev->config.cayman.sc_prim_fifo_size = 0x100; 935 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 936 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 937 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; 938 break; 939 case CHIP_ARUBA: 940 default: 941 rdev->config.cayman.max_shader_engines = 1; 942 rdev->config.cayman.max_pipes_per_simd = 4; 943 rdev->config.cayman.max_tile_pipes = 2; 944 if ((rdev->pdev->device == 0x9900) || 945 (rdev->pdev->device == 0x9901) || 946 (rdev->pdev->device == 0x9905) || 947 (rdev->pdev->device == 0x9906) || 948 (rdev->pdev->device == 0x9907) || 949 (rdev->pdev->device == 0x9908) || 950 (rdev->pdev->device == 0x9909) || 951 (rdev->pdev->device == 0x990B) || 952 (rdev->pdev->device == 0x990C) || 953 (rdev->pdev->device == 0x990F) || 954 (rdev->pdev->device == 0x9910) || 955 (rdev->pdev->device == 0x9917) || 956 (rdev->pdev->device == 0x9999) || 957 (rdev->pdev->device == 0x999C)) { 958 rdev->config.cayman.max_simds_per_se = 6; 959 rdev->config.cayman.max_backends_per_se = 2; 960 rdev->config.cayman.max_hw_contexts = 8; 961 rdev->config.cayman.sx_max_export_size = 256; 962 rdev->config.cayman.sx_max_export_pos_size = 64; 963 rdev->config.cayman.sx_max_export_smx_size = 192; 964 } else if ((rdev->pdev->device == 0x9903) || 965 (rdev->pdev->device == 0x9904) || 966 (rdev->pdev->device == 0x990A) || 967 (rdev->pdev->device == 0x990D) || 968 (rdev->pdev->device == 0x990E) || 969 (rdev->pdev->device == 0x9913) || 970 (rdev->pdev->device == 0x9918) || 971 (rdev->pdev->device == 0x999D)) { 972 rdev->config.cayman.max_simds_per_se = 4; 973 rdev->config.cayman.max_backends_per_se = 2; 974 rdev->config.cayman.max_hw_contexts = 8; 975 rdev->config.cayman.sx_max_export_size = 256; 976 rdev->config.cayman.sx_max_export_pos_size = 64; 977 rdev->config.cayman.sx_max_export_smx_size = 192; 978 } else if ((rdev->pdev->device == 0x9919) || 979 (rdev->pdev->device == 0x9990) || 980 (rdev->pdev->device == 0x9991) || 981 (rdev->pdev->device == 0x9994) || 982 (rdev->pdev->device == 0x9995) || 983 (rdev->pdev->device == 0x9996) || 984 (rdev->pdev->device == 0x999A) || 985 (rdev->pdev->device == 0x99A0)) { 986 rdev->config.cayman.max_simds_per_se = 3; 987 rdev->config.cayman.max_backends_per_se = 1; 988 rdev->config.cayman.max_hw_contexts = 4; 989 rdev->config.cayman.sx_max_export_size = 128; 990 rdev->config.cayman.sx_max_export_pos_size = 32; 991 rdev->config.cayman.sx_max_export_smx_size = 96; 992 } else { 993 rdev->config.cayman.max_simds_per_se = 2; 994 rdev->config.cayman.max_backends_per_se = 1; 995 rdev->config.cayman.max_hw_contexts = 4; 996 rdev->config.cayman.sx_max_export_size = 128; 997 rdev->config.cayman.sx_max_export_pos_size = 32; 998 rdev->config.cayman.sx_max_export_smx_size = 96; 999 } 1000 rdev->config.cayman.max_texture_channel_caches = 2; 1001 rdev->config.cayman.max_gprs = 256; 1002 rdev->config.cayman.max_threads = 256; 1003 rdev->config.cayman.max_gs_threads = 32; 1004 rdev->config.cayman.max_stack_entries = 512; 1005 rdev->config.cayman.sx_num_of_sets = 8; 1006 rdev->config.cayman.sq_num_cf_insts = 2; 1007 1008 rdev->config.cayman.sc_prim_fifo_size = 0x40; 1009 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 1010 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 1011 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; 1012 break; 1013 } 1014 1015 /* Initialize HDP */ 1016 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1017 WREG32((0x2c14 + j), 0x00000000); 1018 WREG32((0x2c18 + j), 0x00000000); 1019 WREG32((0x2c1c + j), 0x00000000); 1020 WREG32((0x2c20 + j), 0x00000000); 1021 WREG32((0x2c24 + j), 0x00000000); 1022 } 1023 1024 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1025 WREG32(SRBM_INT_CNTL, 0x1); 1026 WREG32(SRBM_INT_ACK, 0x1); 1027 1028 evergreen_fix_pci_max_read_req_size(rdev); 1029 1030 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1031 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1032 1033 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1034 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1035 if (rdev->config.cayman.mem_row_size_in_kb > 4) 1036 rdev->config.cayman.mem_row_size_in_kb = 4; 1037 /* XXX use MC settings? */ 1038 rdev->config.cayman.shader_engine_tile_size = 32; 1039 rdev->config.cayman.num_gpus = 1; 1040 rdev->config.cayman.multi_gpu_tile_size = 64; 1041 1042 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 1043 rdev->config.cayman.num_tile_pipes = (1 << tmp); 1044 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 1045 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 1046 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 1047 rdev->config.cayman.num_shader_engines = tmp + 1; 1048 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 1049 rdev->config.cayman.num_gpus = tmp + 1; 1050 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 1051 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 1052 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 1053 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 1054 1055 1056 /* setup tiling info dword. gb_addr_config is not adequate since it does 1057 * not have bank info, so create a custom tiling dword. 1058 * bits 3:0 num_pipes 1059 * bits 7:4 num_banks 1060 * bits 11:8 group_size 1061 * bits 15:12 row_size 1062 */ 1063 rdev->config.cayman.tile_config = 0; 1064 switch (rdev->config.cayman.num_tile_pipes) { 1065 case 1: 1066 default: 1067 rdev->config.cayman.tile_config |= (0 << 0); 1068 break; 1069 case 2: 1070 rdev->config.cayman.tile_config |= (1 << 0); 1071 break; 1072 case 4: 1073 rdev->config.cayman.tile_config |= (2 << 0); 1074 break; 1075 case 8: 1076 rdev->config.cayman.tile_config |= (3 << 0); 1077 break; 1078 } 1079 1080 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 1081 if (rdev->flags & RADEON_IS_IGP) 1082 rdev->config.cayman.tile_config |= 1 << 4; 1083 else { 1084 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 1085 case 0: /* four banks */ 1086 rdev->config.cayman.tile_config |= 0 << 4; 1087 break; 1088 case 1: /* eight banks */ 1089 rdev->config.cayman.tile_config |= 1 << 4; 1090 break; 1091 case 2: /* sixteen banks */ 1092 default: 1093 rdev->config.cayman.tile_config |= 2 << 4; 1094 break; 1095 } 1096 } 1097 rdev->config.cayman.tile_config |= 1098 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1099 rdev->config.cayman.tile_config |= 1100 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1101 1102 tmp = 0; 1103 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { 1104 u32 rb_disable_bitmap; 1105 1106 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 1107 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 1108 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 1109 tmp <<= 4; 1110 tmp |= rb_disable_bitmap; 1111 } 1112 /* enabled rb are just the one not disabled :) */ 1113 disabled_rb_mask = tmp; 1114 tmp = 0; 1115 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++) 1116 tmp |= (1 << i); 1117 /* if all the backends are disabled, fix it up here */ 1118 if ((disabled_rb_mask & tmp) == tmp) { 1119 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++) 1120 disabled_rb_mask &= ~(1 << i); 1121 } 1122 1123 for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) { 1124 u32 simd_disable_bitmap; 1125 1126 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 1127 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 1128 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; 1129 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se; 1130 tmp <<= 16; 1131 tmp |= simd_disable_bitmap; 1132 } 1133 rdev->config.cayman.active_simds = hweight32(~tmp); 1134 1135 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1136 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1137 1138 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1139 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1140 if (ASIC_IS_DCE6(rdev)) 1141 WREG32(DMIF_ADDR_CALC, gb_addr_config); 1142 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1143 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 1144 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 1145 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); 1146 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); 1147 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 1148 1149 if ((rdev->config.cayman.max_backends_per_se == 1) && 1150 (rdev->flags & RADEON_IS_IGP)) { 1151 if ((disabled_rb_mask & 3) == 2) { 1152 /* RB1 disabled, RB0 enabled */ 1153 tmp = 0x00000000; 1154 } else { 1155 /* RB0 disabled, RB1 enabled */ 1156 tmp = 0x11111111; 1157 } 1158 } else { 1159 tmp = gb_addr_config & NUM_PIPES_MASK; 1160 tmp = r6xx_remap_render_backend(rdev, tmp, 1161 rdev->config.cayman.max_backends_per_se * 1162 rdev->config.cayman.max_shader_engines, 1163 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 1164 } 1165 WREG32(GB_BACKEND_MAP, tmp); 1166 1167 cgts_tcc_disable = 0xffff0000; 1168 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 1169 cgts_tcc_disable &= ~(1 << (16 + i)); 1170 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 1171 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 1172 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 1173 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 1174 1175 /* reprogram the shader complex */ 1176 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 1177 for (i = 0; i < 16; i++) 1178 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 1179 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 1180 1181 /* set HW defaults for 3D engine */ 1182 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 1183 1184 sx_debug_1 = RREG32(SX_DEBUG_1); 1185 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 1186 WREG32(SX_DEBUG_1, sx_debug_1); 1187 1188 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 1189 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 1190 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 1191 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 1192 1193 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 1194 1195 /* need to be explicitly zero-ed */ 1196 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 1197 WREG32(SQ_LSTMP_RING_BASE, 0); 1198 WREG32(SQ_HSTMP_RING_BASE, 0); 1199 WREG32(SQ_ESTMP_RING_BASE, 0); 1200 WREG32(SQ_GSTMP_RING_BASE, 0); 1201 WREG32(SQ_VSTMP_RING_BASE, 0); 1202 WREG32(SQ_PSTMP_RING_BASE, 0); 1203 1204 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 1205 1206 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 1207 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 1208 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 1209 1210 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 1211 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 1212 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 1213 1214 1215 WREG32(VGT_NUM_INSTANCES, 1); 1216 1217 WREG32(CP_PERFMON_CNTL, 0); 1218 1219 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 1220 FETCH_FIFO_HIWATER(0x4) | 1221 DONE_FIFO_HIWATER(0xe0) | 1222 ALU_UPDATE_FIFO_HIWATER(0x8))); 1223 1224 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 1225 WREG32(SQ_CONFIG, (VC_ENABLE | 1226 EXPORT_SRC_C | 1227 GFX_PRIO(0) | 1228 CS1_PRIO(0) | 1229 CS2_PRIO(1))); 1230 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 1231 1232 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 1233 FORCE_EOV_MAX_REZ_CNT(255))); 1234 1235 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 1236 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 1237 1238 WREG32(VGT_GS_VERTEX_REUSE, 16); 1239 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 1240 1241 WREG32(CB_PERF_CTR0_SEL_0, 0); 1242 WREG32(CB_PERF_CTR0_SEL_1, 0); 1243 WREG32(CB_PERF_CTR1_SEL_0, 0); 1244 WREG32(CB_PERF_CTR1_SEL_1, 0); 1245 WREG32(CB_PERF_CTR2_SEL_0, 0); 1246 WREG32(CB_PERF_CTR2_SEL_1, 0); 1247 WREG32(CB_PERF_CTR3_SEL_0, 0); 1248 WREG32(CB_PERF_CTR3_SEL_1, 0); 1249 1250 tmp = RREG32(HDP_MISC_CNTL); 1251 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 1252 WREG32(HDP_MISC_CNTL, tmp); 1253 1254 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 1255 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 1256 1257 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 1258 1259 udelay(50); 1260 1261 /* set clockgating golden values on TN */ 1262 if (rdev->family == CHIP_ARUBA) { 1263 tmp = RREG32_CG(CG_CGTT_LOCAL_0); 1264 tmp &= ~0x00380000; 1265 WREG32_CG(CG_CGTT_LOCAL_0, tmp); 1266 tmp = RREG32_CG(CG_CGTT_LOCAL_1); 1267 tmp &= ~0x0e000000; 1268 WREG32_CG(CG_CGTT_LOCAL_1, tmp); 1269 } 1270 } 1271 1272 /* 1273 * GART 1274 */ 1275 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 1276 { 1277 /* flush hdp cache */ 1278 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1279 1280 /* bits 0-7 are the VM contexts0-7 */ 1281 WREG32(VM_INVALIDATE_REQUEST, 1); 1282 } 1283 1284 static int cayman_pcie_gart_enable(struct radeon_device *rdev) 1285 { 1286 int i, r; 1287 1288 if (rdev->gart.robj == NULL) { 1289 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1290 return -EINVAL; 1291 } 1292 r = radeon_gart_table_vram_pin(rdev); 1293 if (r) 1294 return r; 1295 /* Setup TLB control */ 1296 WREG32(MC_VM_MX_L1_TLB_CNTL, 1297 (0xA << 7) | 1298 ENABLE_L1_TLB | 1299 ENABLE_L1_FRAGMENT_PROCESSING | 1300 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1301 ENABLE_ADVANCED_DRIVER_MODEL | 1302 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1303 /* Setup L2 cache */ 1304 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 1305 ENABLE_L2_FRAGMENT_PROCESSING | 1306 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1307 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1308 EFFECTIVE_L2_QUEUE_SIZE(7) | 1309 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1310 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 1311 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1312 BANK_SELECT(6) | 1313 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1314 /* setup context0 */ 1315 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1316 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1317 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1318 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1319 (u32)(rdev->dummy_page.addr >> 12)); 1320 WREG32(VM_CONTEXT0_CNTL2, 0); 1321 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1322 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1323 1324 WREG32(0x15D4, 0); 1325 WREG32(0x15D8, 0); 1326 WREG32(0x15DC, 0); 1327 1328 /* empty context1-7 */ 1329 /* Assign the pt base to something valid for now; the pts used for 1330 * the VMs are determined by the application and setup and assigned 1331 * on the fly in the vm part of radeon_gart.c 1332 */ 1333 for (i = 1; i < 8; i++) { 1334 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 1335 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 1336 rdev->vm_manager.max_pfn - 1); 1337 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 1338 rdev->vm_manager.saved_table_addr[i]); 1339 } 1340 1341 /* enable context1-7 */ 1342 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 1343 (u32)(rdev->dummy_page.addr >> 12)); 1344 WREG32(VM_CONTEXT1_CNTL2, 4); 1345 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 1346 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | 1347 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1348 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 1349 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1350 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 1351 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 1352 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 1353 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 1354 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 1355 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 1356 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 1357 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1358 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 1359 1360 cayman_pcie_gart_tlb_flush(rdev); 1361 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1362 (unsigned)(rdev->mc.gtt_size >> 20), 1363 (unsigned long long)rdev->gart.table_addr); 1364 rdev->gart.ready = true; 1365 return 0; 1366 } 1367 1368 static void cayman_pcie_gart_disable(struct radeon_device *rdev) 1369 { 1370 unsigned i; 1371 1372 for (i = 1; i < 8; ++i) { 1373 rdev->vm_manager.saved_table_addr[i] = RREG32( 1374 VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2)); 1375 } 1376 1377 /* Disable all tables */ 1378 WREG32(VM_CONTEXT0_CNTL, 0); 1379 WREG32(VM_CONTEXT1_CNTL, 0); 1380 /* Setup TLB control */ 1381 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 1382 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1383 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1384 /* Setup L2 cache */ 1385 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1386 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1387 EFFECTIVE_L2_QUEUE_SIZE(7) | 1388 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1389 WREG32(VM_L2_CNTL2, 0); 1390 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1391 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1392 radeon_gart_table_vram_unpin(rdev); 1393 } 1394 1395 static void cayman_pcie_gart_fini(struct radeon_device *rdev) 1396 { 1397 cayman_pcie_gart_disable(rdev); 1398 radeon_gart_table_vram_free(rdev); 1399 radeon_gart_fini(rdev); 1400 } 1401 1402 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 1403 int ring, u32 cp_int_cntl) 1404 { 1405 WREG32(SRBM_GFX_CNTL, RINGID(ring)); 1406 WREG32(CP_INT_CNTL, cp_int_cntl); 1407 } 1408 1409 /* 1410 * CP. 1411 */ 1412 void cayman_fence_ring_emit(struct radeon_device *rdev, 1413 struct radeon_fence *fence) 1414 { 1415 struct radeon_ring *ring = &rdev->ring[fence->ring]; 1416 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 1417 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | 1418 PACKET3_SH_ACTION_ENA; 1419 1420 /* flush read cache over gart for this vmid */ 1421 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1422 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); 1423 radeon_ring_write(ring, 0xFFFFFFFF); 1424 radeon_ring_write(ring, 0); 1425 radeon_ring_write(ring, 10); /* poll interval */ 1426 /* EVENT_WRITE_EOP - flush caches, send int */ 1427 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 1428 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 1429 radeon_ring_write(ring, lower_32_bits(addr)); 1430 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 1431 radeon_ring_write(ring, fence->seq); 1432 radeon_ring_write(ring, 0); 1433 } 1434 1435 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1436 { 1437 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1438 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; 1439 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | 1440 PACKET3_SH_ACTION_ENA; 1441 1442 /* set to DX10/11 mode */ 1443 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 1444 radeon_ring_write(ring, 1); 1445 1446 if (ring->rptr_save_reg) { 1447 uint32_t next_rptr = ring->wptr + 3 + 4 + 8; 1448 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1449 radeon_ring_write(ring, ((ring->rptr_save_reg - 1450 PACKET3_SET_CONFIG_REG_START) >> 2)); 1451 radeon_ring_write(ring, next_rptr); 1452 } 1453 1454 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1455 radeon_ring_write(ring, 1456 #ifdef __BIG_ENDIAN 1457 (2 << 0) | 1458 #endif 1459 (ib->gpu_addr & 0xFFFFFFFC)); 1460 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 1461 radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); 1462 1463 /* flush read cache over gart for this vmid */ 1464 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1465 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); 1466 radeon_ring_write(ring, 0xFFFFFFFF); 1467 radeon_ring_write(ring, 0); 1468 radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */ 1469 } 1470 1471 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1472 { 1473 if (enable) 1474 WREG32(CP_ME_CNTL, 0); 1475 else { 1476 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 1477 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1478 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1479 WREG32(SCRATCH_UMSK, 0); 1480 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1481 } 1482 } 1483 1484 u32 cayman_gfx_get_rptr(struct radeon_device *rdev, 1485 struct radeon_ring *ring) 1486 { 1487 u32 rptr; 1488 1489 if (rdev->wb.enabled) 1490 rptr = rdev->wb.wb[ring->rptr_offs/4]; 1491 else { 1492 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) 1493 rptr = RREG32(CP_RB0_RPTR); 1494 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) 1495 rptr = RREG32(CP_RB1_RPTR); 1496 else 1497 rptr = RREG32(CP_RB2_RPTR); 1498 } 1499 1500 return rptr; 1501 } 1502 1503 u32 cayman_gfx_get_wptr(struct radeon_device *rdev, 1504 struct radeon_ring *ring) 1505 { 1506 u32 wptr; 1507 1508 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) 1509 wptr = RREG32(CP_RB0_WPTR); 1510 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) 1511 wptr = RREG32(CP_RB1_WPTR); 1512 else 1513 wptr = RREG32(CP_RB2_WPTR); 1514 1515 return wptr; 1516 } 1517 1518 void cayman_gfx_set_wptr(struct radeon_device *rdev, 1519 struct radeon_ring *ring) 1520 { 1521 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) { 1522 WREG32(CP_RB0_WPTR, ring->wptr); 1523 (void)RREG32(CP_RB0_WPTR); 1524 } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) { 1525 WREG32(CP_RB1_WPTR, ring->wptr); 1526 (void)RREG32(CP_RB1_WPTR); 1527 } else { 1528 WREG32(CP_RB2_WPTR, ring->wptr); 1529 (void)RREG32(CP_RB2_WPTR); 1530 } 1531 } 1532 1533 static int cayman_cp_load_microcode(struct radeon_device *rdev) 1534 { 1535 const __be32 *fw_data; 1536 int i; 1537 1538 if (!rdev->me_fw || !rdev->pfp_fw) 1539 return -EINVAL; 1540 1541 cayman_cp_enable(rdev, false); 1542 1543 fw_data = (const __be32 *)rdev->pfp_fw->data; 1544 WREG32(CP_PFP_UCODE_ADDR, 0); 1545 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1546 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1547 WREG32(CP_PFP_UCODE_ADDR, 0); 1548 1549 fw_data = (const __be32 *)rdev->me_fw->data; 1550 WREG32(CP_ME_RAM_WADDR, 0); 1551 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1552 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1553 1554 WREG32(CP_PFP_UCODE_ADDR, 0); 1555 WREG32(CP_ME_RAM_WADDR, 0); 1556 WREG32(CP_ME_RAM_RADDR, 0); 1557 return 0; 1558 } 1559 1560 static int cayman_cp_start(struct radeon_device *rdev) 1561 { 1562 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1563 int r, i; 1564 1565 r = radeon_ring_lock(rdev, ring, 7); 1566 if (r) { 1567 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1568 return r; 1569 } 1570 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1571 radeon_ring_write(ring, 0x1); 1572 radeon_ring_write(ring, 0x0); 1573 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 1574 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1575 radeon_ring_write(ring, 0); 1576 radeon_ring_write(ring, 0); 1577 radeon_ring_unlock_commit(rdev, ring, false); 1578 1579 cayman_cp_enable(rdev, true); 1580 1581 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 1582 if (r) { 1583 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1584 return r; 1585 } 1586 1587 /* setup clear context state */ 1588 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1589 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1590 1591 for (i = 0; i < cayman_default_size; i++) 1592 radeon_ring_write(ring, cayman_default_state[i]); 1593 1594 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1595 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 1596 1597 /* set clear context state */ 1598 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1599 radeon_ring_write(ring, 0); 1600 1601 /* SQ_VTX_BASE_VTX_LOC */ 1602 radeon_ring_write(ring, 0xc0026f00); 1603 radeon_ring_write(ring, 0x00000000); 1604 radeon_ring_write(ring, 0x00000000); 1605 radeon_ring_write(ring, 0x00000000); 1606 1607 /* Clear consts */ 1608 radeon_ring_write(ring, 0xc0036f00); 1609 radeon_ring_write(ring, 0x00000bc4); 1610 radeon_ring_write(ring, 0xffffffff); 1611 radeon_ring_write(ring, 0xffffffff); 1612 radeon_ring_write(ring, 0xffffffff); 1613 1614 radeon_ring_write(ring, 0xc0026900); 1615 radeon_ring_write(ring, 0x00000316); 1616 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1617 radeon_ring_write(ring, 0x00000010); /* */ 1618 1619 radeon_ring_unlock_commit(rdev, ring, false); 1620 1621 /* XXX init other rings */ 1622 1623 return 0; 1624 } 1625 1626 static void cayman_cp_fini(struct radeon_device *rdev) 1627 { 1628 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1629 cayman_cp_enable(rdev, false); 1630 radeon_ring_fini(rdev, ring); 1631 radeon_scratch_free(rdev, ring->rptr_save_reg); 1632 } 1633 1634 static int cayman_cp_resume(struct radeon_device *rdev) 1635 { 1636 static const int ridx[] = { 1637 RADEON_RING_TYPE_GFX_INDEX, 1638 CAYMAN_RING_TYPE_CP1_INDEX, 1639 CAYMAN_RING_TYPE_CP2_INDEX 1640 }; 1641 static const unsigned cp_rb_cntl[] = { 1642 CP_RB0_CNTL, 1643 CP_RB1_CNTL, 1644 CP_RB2_CNTL, 1645 }; 1646 static const unsigned cp_rb_rptr_addr[] = { 1647 CP_RB0_RPTR_ADDR, 1648 CP_RB1_RPTR_ADDR, 1649 CP_RB2_RPTR_ADDR 1650 }; 1651 static const unsigned cp_rb_rptr_addr_hi[] = { 1652 CP_RB0_RPTR_ADDR_HI, 1653 CP_RB1_RPTR_ADDR_HI, 1654 CP_RB2_RPTR_ADDR_HI 1655 }; 1656 static const unsigned cp_rb_base[] = { 1657 CP_RB0_BASE, 1658 CP_RB1_BASE, 1659 CP_RB2_BASE 1660 }; 1661 static const unsigned cp_rb_rptr[] = { 1662 CP_RB0_RPTR, 1663 CP_RB1_RPTR, 1664 CP_RB2_RPTR 1665 }; 1666 static const unsigned cp_rb_wptr[] = { 1667 CP_RB0_WPTR, 1668 CP_RB1_WPTR, 1669 CP_RB2_WPTR 1670 }; 1671 struct radeon_ring *ring; 1672 int i, r; 1673 1674 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1675 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1676 SOFT_RESET_PA | 1677 SOFT_RESET_SH | 1678 SOFT_RESET_VGT | 1679 SOFT_RESET_SPI | 1680 SOFT_RESET_SX)); 1681 RREG32(GRBM_SOFT_RESET); 1682 mdelay(15); 1683 WREG32(GRBM_SOFT_RESET, 0); 1684 RREG32(GRBM_SOFT_RESET); 1685 1686 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1687 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1688 1689 /* Set the write pointer delay */ 1690 WREG32(CP_RB_WPTR_DELAY, 0); 1691 1692 WREG32(CP_DEBUG, (1 << 27)); 1693 1694 /* set the wb address whether it's enabled or not */ 1695 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1696 WREG32(SCRATCH_UMSK, 0xff); 1697 1698 for (i = 0; i < 3; ++i) { 1699 uint32_t rb_cntl; 1700 uint64_t addr; 1701 1702 /* Set ring buffer size */ 1703 ring = &rdev->ring[ridx[i]]; 1704 rb_cntl = order_base_2(ring->ring_size / 8); 1705 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8; 1706 #ifdef __BIG_ENDIAN 1707 rb_cntl |= BUF_SWAP_32BIT; 1708 #endif 1709 WREG32(cp_rb_cntl[i], rb_cntl); 1710 1711 /* set the wb address whether it's enabled or not */ 1712 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; 1713 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); 1714 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); 1715 } 1716 1717 /* set the rb base addr, this causes an internal reset of ALL rings */ 1718 for (i = 0; i < 3; ++i) { 1719 ring = &rdev->ring[ridx[i]]; 1720 WREG32(cp_rb_base[i], ring->gpu_addr >> 8); 1721 } 1722 1723 for (i = 0; i < 3; ++i) { 1724 /* Initialize the ring buffer's read and write pointers */ 1725 ring = &rdev->ring[ridx[i]]; 1726 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); 1727 1728 ring->wptr = 0; 1729 WREG32(cp_rb_rptr[i], 0); 1730 WREG32(cp_rb_wptr[i], ring->wptr); 1731 1732 mdelay(1); 1733 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); 1734 } 1735 1736 /* start the rings */ 1737 cayman_cp_start(rdev); 1738 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1739 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1740 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1741 /* this only test cp0 */ 1742 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1743 if (r) { 1744 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1745 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1746 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1747 return r; 1748 } 1749 1750 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 1751 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1752 1753 return 0; 1754 } 1755 1756 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) 1757 { 1758 u32 reset_mask = 0; 1759 u32 tmp; 1760 1761 /* GRBM_STATUS */ 1762 tmp = RREG32(GRBM_STATUS); 1763 if (tmp & (PA_BUSY | SC_BUSY | 1764 SH_BUSY | SX_BUSY | 1765 TA_BUSY | VGT_BUSY | 1766 DB_BUSY | CB_BUSY | 1767 GDS_BUSY | SPI_BUSY | 1768 IA_BUSY | IA_BUSY_NO_DMA)) 1769 reset_mask |= RADEON_RESET_GFX; 1770 1771 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING | 1772 CP_BUSY | CP_COHERENCY_BUSY)) 1773 reset_mask |= RADEON_RESET_CP; 1774 1775 if (tmp & GRBM_EE_BUSY) 1776 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1777 1778 /* DMA_STATUS_REG 0 */ 1779 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 1780 if (!(tmp & DMA_IDLE)) 1781 reset_mask |= RADEON_RESET_DMA; 1782 1783 /* DMA_STATUS_REG 1 */ 1784 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 1785 if (!(tmp & DMA_IDLE)) 1786 reset_mask |= RADEON_RESET_DMA1; 1787 1788 /* SRBM_STATUS2 */ 1789 tmp = RREG32(SRBM_STATUS2); 1790 if (tmp & DMA_BUSY) 1791 reset_mask |= RADEON_RESET_DMA; 1792 1793 if (tmp & DMA1_BUSY) 1794 reset_mask |= RADEON_RESET_DMA1; 1795 1796 /* SRBM_STATUS */ 1797 tmp = RREG32(SRBM_STATUS); 1798 if (tmp & (RLC_RQ_PENDING | RLC_BUSY)) 1799 reset_mask |= RADEON_RESET_RLC; 1800 1801 if (tmp & IH_BUSY) 1802 reset_mask |= RADEON_RESET_IH; 1803 1804 if (tmp & SEM_BUSY) 1805 reset_mask |= RADEON_RESET_SEM; 1806 1807 if (tmp & GRBM_RQ_PENDING) 1808 reset_mask |= RADEON_RESET_GRBM; 1809 1810 if (tmp & VMC_BUSY) 1811 reset_mask |= RADEON_RESET_VMC; 1812 1813 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY | 1814 MCC_BUSY | MCD_BUSY)) 1815 reset_mask |= RADEON_RESET_MC; 1816 1817 if (evergreen_is_display_hung(rdev)) 1818 reset_mask |= RADEON_RESET_DISPLAY; 1819 1820 /* VM_L2_STATUS */ 1821 tmp = RREG32(VM_L2_STATUS); 1822 if (tmp & L2_BUSY) 1823 reset_mask |= RADEON_RESET_VMC; 1824 1825 /* Skip MC reset as it's mostly likely not hung, just busy */ 1826 if (reset_mask & RADEON_RESET_MC) { 1827 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1828 reset_mask &= ~RADEON_RESET_MC; 1829 } 1830 1831 return reset_mask; 1832 } 1833 1834 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1835 { 1836 struct evergreen_mc_save save; 1837 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1838 u32 tmp; 1839 1840 if (reset_mask == 0) 1841 return; 1842 1843 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1844 1845 evergreen_print_gpu_status_regs(rdev); 1846 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1847 RREG32(0x14F8)); 1848 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1849 RREG32(0x14D8)); 1850 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1851 RREG32(0x14FC)); 1852 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1853 RREG32(0x14DC)); 1854 1855 /* Disable CP parsing/prefetching */ 1856 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1857 1858 if (reset_mask & RADEON_RESET_DMA) { 1859 /* dma0 */ 1860 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1861 tmp &= ~DMA_RB_ENABLE; 1862 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); 1863 } 1864 1865 if (reset_mask & RADEON_RESET_DMA1) { 1866 /* dma1 */ 1867 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1868 tmp &= ~DMA_RB_ENABLE; 1869 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); 1870 } 1871 1872 udelay(50); 1873 1874 evergreen_mc_stop(rdev, &save); 1875 if (evergreen_mc_wait_for_idle(rdev)) { 1876 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1877 } 1878 1879 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1880 grbm_soft_reset = SOFT_RESET_CB | 1881 SOFT_RESET_DB | 1882 SOFT_RESET_GDS | 1883 SOFT_RESET_PA | 1884 SOFT_RESET_SC | 1885 SOFT_RESET_SPI | 1886 SOFT_RESET_SH | 1887 SOFT_RESET_SX | 1888 SOFT_RESET_TC | 1889 SOFT_RESET_TA | 1890 SOFT_RESET_VGT | 1891 SOFT_RESET_IA; 1892 } 1893 1894 if (reset_mask & RADEON_RESET_CP) { 1895 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; 1896 1897 srbm_soft_reset |= SOFT_RESET_GRBM; 1898 } 1899 1900 if (reset_mask & RADEON_RESET_DMA) 1901 srbm_soft_reset |= SOFT_RESET_DMA; 1902 1903 if (reset_mask & RADEON_RESET_DMA1) 1904 srbm_soft_reset |= SOFT_RESET_DMA1; 1905 1906 if (reset_mask & RADEON_RESET_DISPLAY) 1907 srbm_soft_reset |= SOFT_RESET_DC; 1908 1909 if (reset_mask & RADEON_RESET_RLC) 1910 srbm_soft_reset |= SOFT_RESET_RLC; 1911 1912 if (reset_mask & RADEON_RESET_SEM) 1913 srbm_soft_reset |= SOFT_RESET_SEM; 1914 1915 if (reset_mask & RADEON_RESET_IH) 1916 srbm_soft_reset |= SOFT_RESET_IH; 1917 1918 if (reset_mask & RADEON_RESET_GRBM) 1919 srbm_soft_reset |= SOFT_RESET_GRBM; 1920 1921 if (reset_mask & RADEON_RESET_VMC) 1922 srbm_soft_reset |= SOFT_RESET_VMC; 1923 1924 if (!(rdev->flags & RADEON_IS_IGP)) { 1925 if (reset_mask & RADEON_RESET_MC) 1926 srbm_soft_reset |= SOFT_RESET_MC; 1927 } 1928 1929 if (grbm_soft_reset) { 1930 tmp = RREG32(GRBM_SOFT_RESET); 1931 tmp |= grbm_soft_reset; 1932 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 1933 WREG32(GRBM_SOFT_RESET, tmp); 1934 tmp = RREG32(GRBM_SOFT_RESET); 1935 1936 udelay(50); 1937 1938 tmp &= ~grbm_soft_reset; 1939 WREG32(GRBM_SOFT_RESET, tmp); 1940 tmp = RREG32(GRBM_SOFT_RESET); 1941 } 1942 1943 if (srbm_soft_reset) { 1944 tmp = RREG32(SRBM_SOFT_RESET); 1945 tmp |= srbm_soft_reset; 1946 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1947 WREG32(SRBM_SOFT_RESET, tmp); 1948 tmp = RREG32(SRBM_SOFT_RESET); 1949 1950 udelay(50); 1951 1952 tmp &= ~srbm_soft_reset; 1953 WREG32(SRBM_SOFT_RESET, tmp); 1954 tmp = RREG32(SRBM_SOFT_RESET); 1955 } 1956 1957 /* Wait a little for things to settle down */ 1958 udelay(50); 1959 1960 evergreen_mc_resume(rdev, &save); 1961 udelay(50); 1962 1963 evergreen_print_gpu_status_regs(rdev); 1964 } 1965 1966 int cayman_asic_reset(struct radeon_device *rdev, bool hard) 1967 { 1968 u32 reset_mask; 1969 1970 if (hard) { 1971 evergreen_gpu_pci_config_reset(rdev); 1972 return 0; 1973 } 1974 1975 reset_mask = cayman_gpu_check_soft_reset(rdev); 1976 1977 if (reset_mask) 1978 r600_set_bios_scratch_engine_hung(rdev, true); 1979 1980 cayman_gpu_soft_reset(rdev, reset_mask); 1981 1982 reset_mask = cayman_gpu_check_soft_reset(rdev); 1983 1984 if (reset_mask) 1985 evergreen_gpu_pci_config_reset(rdev); 1986 1987 r600_set_bios_scratch_engine_hung(rdev, false); 1988 1989 return 0; 1990 } 1991 1992 /** 1993 * cayman_gfx_is_lockup - Check if the GFX engine is locked up 1994 * 1995 * @rdev: radeon_device pointer 1996 * @ring: radeon_ring structure holding ring information 1997 * 1998 * Check if the GFX engine is locked up. 1999 * Returns true if the engine appears to be locked up, false if not. 2000 */ 2001 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2002 { 2003 u32 reset_mask = cayman_gpu_check_soft_reset(rdev); 2004 2005 if (!(reset_mask & (RADEON_RESET_GFX | 2006 RADEON_RESET_COMPUTE | 2007 RADEON_RESET_CP))) { 2008 radeon_ring_lockup_update(rdev, ring); 2009 return false; 2010 } 2011 return radeon_ring_test_lockup(rdev, ring); 2012 } 2013 2014 static void cayman_uvd_init(struct radeon_device *rdev) 2015 { 2016 int r; 2017 2018 if (!rdev->has_uvd) 2019 return; 2020 2021 r = radeon_uvd_init(rdev); 2022 if (r) { 2023 dev_err(rdev->dev, "failed UVD (%d) init.\n", r); 2024 /* 2025 * At this point rdev->uvd.vcpu_bo is NULL which trickles down 2026 * to early fails uvd_v2_2_resume() and thus nothing happens 2027 * there. So it is pointless to try to go through that code 2028 * hence why we disable uvd here. 2029 */ 2030 rdev->has_uvd = 0; 2031 return; 2032 } 2033 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; 2034 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); 2035 } 2036 2037 static void cayman_uvd_start(struct radeon_device *rdev) 2038 { 2039 int r; 2040 2041 if (!rdev->has_uvd) 2042 return; 2043 2044 r = uvd_v2_2_resume(rdev); 2045 if (r) { 2046 dev_err(rdev->dev, "failed UVD resume (%d).\n", r); 2047 goto error; 2048 } 2049 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 2050 if (r) { 2051 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); 2052 goto error; 2053 } 2054 return; 2055 2056 error: 2057 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 2058 } 2059 2060 static void cayman_uvd_resume(struct radeon_device *rdev) 2061 { 2062 struct radeon_ring *ring; 2063 int r; 2064 2065 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size) 2066 return; 2067 2068 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2069 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); 2070 if (r) { 2071 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); 2072 return; 2073 } 2074 r = uvd_v1_0_init(rdev); 2075 if (r) { 2076 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r); 2077 return; 2078 } 2079 } 2080 2081 static void cayman_vce_init(struct radeon_device *rdev) 2082 { 2083 int r; 2084 2085 /* Only set for CHIP_ARUBA */ 2086 if (!rdev->has_vce) 2087 return; 2088 2089 r = radeon_vce_init(rdev); 2090 if (r) { 2091 dev_err(rdev->dev, "failed VCE (%d) init.\n", r); 2092 /* 2093 * At this point rdev->vce.vcpu_bo is NULL which trickles down 2094 * to early fails cayman_vce_start() and thus nothing happens 2095 * there. So it is pointless to try to go through that code 2096 * hence why we disable vce here. 2097 */ 2098 rdev->has_vce = 0; 2099 return; 2100 } 2101 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL; 2102 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096); 2103 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL; 2104 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096); 2105 } 2106 2107 static void cayman_vce_start(struct radeon_device *rdev) 2108 { 2109 int r; 2110 2111 if (!rdev->has_vce) 2112 return; 2113 2114 r = radeon_vce_resume(rdev); 2115 if (r) { 2116 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); 2117 goto error; 2118 } 2119 r = vce_v1_0_resume(rdev); 2120 if (r) { 2121 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); 2122 goto error; 2123 } 2124 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX); 2125 if (r) { 2126 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r); 2127 goto error; 2128 } 2129 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX); 2130 if (r) { 2131 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r); 2132 goto error; 2133 } 2134 return; 2135 2136 error: 2137 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; 2138 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; 2139 } 2140 2141 static void cayman_vce_resume(struct radeon_device *rdev) 2142 { 2143 struct radeon_ring *ring; 2144 int r; 2145 2146 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size) 2147 return; 2148 2149 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 2150 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2151 if (r) { 2152 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); 2153 return; 2154 } 2155 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 2156 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2157 if (r) { 2158 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); 2159 return; 2160 } 2161 r = vce_v1_0_init(rdev); 2162 if (r) { 2163 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r); 2164 return; 2165 } 2166 } 2167 2168 static int cayman_startup(struct radeon_device *rdev) 2169 { 2170 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2171 int r; 2172 2173 /* enable pcie gen2 link */ 2174 evergreen_pcie_gen2_enable(rdev); 2175 /* enable aspm */ 2176 evergreen_program_aspm(rdev); 2177 2178 /* scratch needs to be initialized before MC */ 2179 r = r600_vram_scratch_init(rdev); 2180 if (r) 2181 return r; 2182 2183 evergreen_mc_program(rdev); 2184 2185 if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { 2186 r = ni_mc_load_microcode(rdev); 2187 if (r) { 2188 DRM_ERROR("Failed to load MC firmware!\n"); 2189 return r; 2190 } 2191 } 2192 2193 r = cayman_pcie_gart_enable(rdev); 2194 if (r) 2195 return r; 2196 cayman_gpu_init(rdev); 2197 2198 /* allocate rlc buffers */ 2199 if (rdev->flags & RADEON_IS_IGP) { 2200 rdev->rlc.reg_list = tn_rlc_save_restore_register_list; 2201 rdev->rlc.reg_list_size = 2202 (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list); 2203 rdev->rlc.cs_data = cayman_cs_data; 2204 r = sumo_rlc_init(rdev); 2205 if (r) { 2206 DRM_ERROR("Failed to init rlc BOs!\n"); 2207 return r; 2208 } 2209 } 2210 2211 /* allocate wb buffer */ 2212 r = radeon_wb_init(rdev); 2213 if (r) 2214 return r; 2215 2216 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 2217 if (r) { 2218 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2219 return r; 2220 } 2221 2222 cayman_uvd_start(rdev); 2223 cayman_vce_start(rdev); 2224 2225 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 2226 if (r) { 2227 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2228 return r; 2229 } 2230 2231 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 2232 if (r) { 2233 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2234 return r; 2235 } 2236 2237 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 2238 if (r) { 2239 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 2240 return r; 2241 } 2242 2243 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 2244 if (r) { 2245 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 2246 return r; 2247 } 2248 2249 /* Enable IRQ */ 2250 if (!rdev->irq.installed) { 2251 r = radeon_irq_kms_init(rdev); 2252 if (r) 2253 return r; 2254 } 2255 2256 r = r600_irq_init(rdev); 2257 if (r) { 2258 DRM_ERROR("radeon: IH init failed (%d).\n", r); 2259 radeon_irq_kms_fini(rdev); 2260 return r; 2261 } 2262 evergreen_irq_set(rdev); 2263 2264 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2265 RADEON_CP_PACKET2); 2266 if (r) 2267 return r; 2268 2269 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2270 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 2271 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 2272 if (r) 2273 return r; 2274 2275 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 2276 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 2277 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 2278 if (r) 2279 return r; 2280 2281 r = cayman_cp_load_microcode(rdev); 2282 if (r) 2283 return r; 2284 r = cayman_cp_resume(rdev); 2285 if (r) 2286 return r; 2287 2288 r = cayman_dma_resume(rdev); 2289 if (r) 2290 return r; 2291 2292 cayman_uvd_resume(rdev); 2293 cayman_vce_resume(rdev); 2294 2295 r = radeon_ib_pool_init(rdev); 2296 if (r) { 2297 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2298 return r; 2299 } 2300 2301 r = radeon_vm_manager_init(rdev); 2302 if (r) { 2303 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 2304 return r; 2305 } 2306 2307 r = radeon_audio_init(rdev); 2308 if (r) 2309 return r; 2310 2311 return 0; 2312 } 2313 2314 int cayman_resume(struct radeon_device *rdev) 2315 { 2316 int r; 2317 2318 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 2319 * posting will perform necessary task to bring back GPU into good 2320 * shape. 2321 */ 2322 /* post card */ 2323 atom_asic_init(rdev->mode_info.atom_context); 2324 2325 /* init golden registers */ 2326 ni_init_golden_registers(rdev); 2327 2328 if (rdev->pm.pm_method == PM_METHOD_DPM) 2329 radeon_pm_resume(rdev); 2330 2331 rdev->accel_working = true; 2332 r = cayman_startup(rdev); 2333 if (r) { 2334 DRM_ERROR("cayman startup failed on resume\n"); 2335 rdev->accel_working = false; 2336 return r; 2337 } 2338 return r; 2339 } 2340 2341 int cayman_suspend(struct radeon_device *rdev) 2342 { 2343 radeon_pm_suspend(rdev); 2344 radeon_audio_fini(rdev); 2345 radeon_vm_manager_fini(rdev); 2346 cayman_cp_enable(rdev, false); 2347 cayman_dma_stop(rdev); 2348 if (rdev->has_uvd) { 2349 uvd_v1_0_fini(rdev); 2350 radeon_uvd_suspend(rdev); 2351 } 2352 evergreen_irq_suspend(rdev); 2353 radeon_wb_disable(rdev); 2354 cayman_pcie_gart_disable(rdev); 2355 return 0; 2356 } 2357 2358 /* Plan is to move initialization in that function and use 2359 * helper function so that radeon_device_init pretty much 2360 * do nothing more than calling asic specific function. This 2361 * should also allow to remove a bunch of callback function 2362 * like vram_info. 2363 */ 2364 int cayman_init(struct radeon_device *rdev) 2365 { 2366 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2367 int r; 2368 2369 /* Read BIOS */ 2370 if (!radeon_get_bios(rdev)) { 2371 if (ASIC_IS_AVIVO(rdev)) 2372 return -EINVAL; 2373 } 2374 /* Must be an ATOMBIOS */ 2375 if (!rdev->is_atom_bios) { 2376 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 2377 return -EINVAL; 2378 } 2379 r = radeon_atombios_init(rdev); 2380 if (r) 2381 return r; 2382 2383 /* Post card if necessary */ 2384 if (!radeon_card_posted(rdev)) { 2385 if (!rdev->bios) { 2386 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2387 return -EINVAL; 2388 } 2389 DRM_INFO("GPU not posted. posting now...\n"); 2390 atom_asic_init(rdev->mode_info.atom_context); 2391 } 2392 /* init golden registers */ 2393 ni_init_golden_registers(rdev); 2394 /* Initialize scratch registers */ 2395 r600_scratch_init(rdev); 2396 /* Initialize surface registers */ 2397 radeon_surface_init(rdev); 2398 /* Initialize clocks */ 2399 radeon_get_clock_info(rdev->ddev); 2400 /* Fence driver */ 2401 r = radeon_fence_driver_init(rdev); 2402 if (r) 2403 return r; 2404 /* initialize memory controller */ 2405 r = evergreen_mc_init(rdev); 2406 if (r) 2407 return r; 2408 /* Memory manager */ 2409 r = radeon_bo_init(rdev); 2410 if (r) 2411 return r; 2412 2413 if (rdev->flags & RADEON_IS_IGP) { 2414 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2415 r = ni_init_microcode(rdev); 2416 if (r) { 2417 DRM_ERROR("Failed to load firmware!\n"); 2418 return r; 2419 } 2420 } 2421 } else { 2422 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 2423 r = ni_init_microcode(rdev); 2424 if (r) { 2425 DRM_ERROR("Failed to load firmware!\n"); 2426 return r; 2427 } 2428 } 2429 } 2430 2431 /* Initialize power management */ 2432 radeon_pm_init(rdev); 2433 2434 ring->ring_obj = NULL; 2435 r600_ring_init(rdev, ring, 1024 * 1024); 2436 2437 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2438 ring->ring_obj = NULL; 2439 r600_ring_init(rdev, ring, 64 * 1024); 2440 2441 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 2442 ring->ring_obj = NULL; 2443 r600_ring_init(rdev, ring, 64 * 1024); 2444 2445 cayman_uvd_init(rdev); 2446 cayman_vce_init(rdev); 2447 2448 rdev->ih.ring_obj = NULL; 2449 r600_ih_ring_init(rdev, 64 * 1024); 2450 2451 r = r600_pcie_gart_init(rdev); 2452 if (r) 2453 return r; 2454 2455 rdev->accel_working = true; 2456 r = cayman_startup(rdev); 2457 if (r) { 2458 dev_err(rdev->dev, "disabling GPU acceleration\n"); 2459 cayman_cp_fini(rdev); 2460 cayman_dma_fini(rdev); 2461 r600_irq_fini(rdev); 2462 if (rdev->flags & RADEON_IS_IGP) 2463 sumo_rlc_fini(rdev); 2464 radeon_wb_fini(rdev); 2465 radeon_ib_pool_fini(rdev); 2466 radeon_vm_manager_fini(rdev); 2467 radeon_irq_kms_fini(rdev); 2468 cayman_pcie_gart_fini(rdev); 2469 rdev->accel_working = false; 2470 } 2471 2472 /* Don't start up if the MC ucode is missing. 2473 * The default clocks and voltages before the MC ucode 2474 * is loaded are not suffient for advanced operations. 2475 * 2476 * We can skip this check for TN, because there is no MC 2477 * ucode. 2478 */ 2479 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 2480 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 2481 return -EINVAL; 2482 } 2483 2484 return 0; 2485 } 2486 2487 void cayman_fini(struct radeon_device *rdev) 2488 { 2489 radeon_pm_fini(rdev); 2490 cayman_cp_fini(rdev); 2491 cayman_dma_fini(rdev); 2492 r600_irq_fini(rdev); 2493 if (rdev->flags & RADEON_IS_IGP) 2494 sumo_rlc_fini(rdev); 2495 radeon_wb_fini(rdev); 2496 radeon_vm_manager_fini(rdev); 2497 radeon_ib_pool_fini(rdev); 2498 radeon_irq_kms_fini(rdev); 2499 uvd_v1_0_fini(rdev); 2500 radeon_uvd_fini(rdev); 2501 if (rdev->has_vce) 2502 radeon_vce_fini(rdev); 2503 cayman_pcie_gart_fini(rdev); 2504 r600_vram_scratch_fini(rdev); 2505 radeon_gem_fini(rdev); 2506 radeon_fence_driver_fini(rdev); 2507 radeon_bo_fini(rdev); 2508 radeon_atombios_fini(rdev); 2509 ni_fini_microcode(rdev); 2510 kfree(rdev->bios); 2511 rdev->bios = NULL; 2512 } 2513 2514 /* 2515 * vm 2516 */ 2517 int cayman_vm_init(struct radeon_device *rdev) 2518 { 2519 /* number of VMs */ 2520 rdev->vm_manager.nvm = 8; 2521 /* base offset of vram pages */ 2522 if (rdev->flags & RADEON_IS_IGP) { 2523 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 2524 tmp <<= 22; 2525 rdev->vm_manager.vram_base_offset = tmp; 2526 } else 2527 rdev->vm_manager.vram_base_offset = 0; 2528 return 0; 2529 } 2530 2531 void cayman_vm_fini(struct radeon_device *rdev) 2532 { 2533 } 2534 2535 /** 2536 * cayman_vm_decode_fault - print human readable fault info 2537 * 2538 * @rdev: radeon_device pointer 2539 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value 2540 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value 2541 * 2542 * Print human readable fault information (cayman/TN). 2543 */ 2544 void cayman_vm_decode_fault(struct radeon_device *rdev, 2545 u32 status, u32 addr) 2546 { 2547 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 2548 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 2549 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 2550 char *block; 2551 2552 switch (mc_id) { 2553 case 32: 2554 case 16: 2555 case 96: 2556 case 80: 2557 case 160: 2558 case 144: 2559 case 224: 2560 case 208: 2561 block = "CB"; 2562 break; 2563 case 33: 2564 case 17: 2565 case 97: 2566 case 81: 2567 case 161: 2568 case 145: 2569 case 225: 2570 case 209: 2571 block = "CB_FMASK"; 2572 break; 2573 case 34: 2574 case 18: 2575 case 98: 2576 case 82: 2577 case 162: 2578 case 146: 2579 case 226: 2580 case 210: 2581 block = "CB_CMASK"; 2582 break; 2583 case 35: 2584 case 19: 2585 case 99: 2586 case 83: 2587 case 163: 2588 case 147: 2589 case 227: 2590 case 211: 2591 block = "CB_IMMED"; 2592 break; 2593 case 36: 2594 case 20: 2595 case 100: 2596 case 84: 2597 case 164: 2598 case 148: 2599 case 228: 2600 case 212: 2601 block = "DB"; 2602 break; 2603 case 37: 2604 case 21: 2605 case 101: 2606 case 85: 2607 case 165: 2608 case 149: 2609 case 229: 2610 case 213: 2611 block = "DB_HTILE"; 2612 break; 2613 case 38: 2614 case 22: 2615 case 102: 2616 case 86: 2617 case 166: 2618 case 150: 2619 case 230: 2620 case 214: 2621 block = "SX"; 2622 break; 2623 case 39: 2624 case 23: 2625 case 103: 2626 case 87: 2627 case 167: 2628 case 151: 2629 case 231: 2630 case 215: 2631 block = "DB_STEN"; 2632 break; 2633 case 40: 2634 case 24: 2635 case 104: 2636 case 88: 2637 case 232: 2638 case 216: 2639 case 168: 2640 case 152: 2641 block = "TC_TFETCH"; 2642 break; 2643 case 41: 2644 case 25: 2645 case 105: 2646 case 89: 2647 case 233: 2648 case 217: 2649 case 169: 2650 case 153: 2651 block = "TC_VFETCH"; 2652 break; 2653 case 42: 2654 case 26: 2655 case 106: 2656 case 90: 2657 case 234: 2658 case 218: 2659 case 170: 2660 case 154: 2661 block = "VC"; 2662 break; 2663 case 112: 2664 block = "CP"; 2665 break; 2666 case 113: 2667 case 114: 2668 block = "SH"; 2669 break; 2670 case 115: 2671 block = "VGT"; 2672 break; 2673 case 178: 2674 block = "IH"; 2675 break; 2676 case 51: 2677 block = "RLC"; 2678 break; 2679 case 55: 2680 block = "DMA"; 2681 break; 2682 case 56: 2683 block = "HDP"; 2684 break; 2685 default: 2686 block = "unknown"; 2687 break; 2688 } 2689 2690 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 2691 protections, vmid, addr, 2692 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 2693 block, mc_id); 2694 } 2695 2696 /** 2697 * cayman_vm_flush - vm flush using the CP 2698 * 2699 * @rdev: radeon_device pointer 2700 * 2701 * Update the page table base and flush the VM TLB 2702 * using the CP (cayman-si). 2703 */ 2704 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, 2705 unsigned vm_id, uint64_t pd_addr) 2706 { 2707 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0)); 2708 radeon_ring_write(ring, pd_addr >> 12); 2709 2710 /* flush hdp cache */ 2711 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 2712 radeon_ring_write(ring, 0x1); 2713 2714 /* bits 0-7 are the VM contexts0-7 */ 2715 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 2716 radeon_ring_write(ring, 1 << vm_id); 2717 2718 /* wait for the invalidate to complete */ 2719 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 2720 radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ 2721 WAIT_REG_MEM_ENGINE(0))); /* me */ 2722 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 2723 radeon_ring_write(ring, 0); 2724 radeon_ring_write(ring, 0); /* ref */ 2725 radeon_ring_write(ring, 0); /* mask */ 2726 radeon_ring_write(ring, 0x20); /* poll interval */ 2727 2728 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 2729 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2730 radeon_ring_write(ring, 0x0); 2731 } 2732 2733 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) 2734 { 2735 struct atom_clock_dividers dividers; 2736 int r, i; 2737 2738 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 2739 ecclk, false, ÷rs); 2740 if (r) 2741 return r; 2742 2743 for (i = 0; i < 100; i++) { 2744 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) 2745 break; 2746 mdelay(10); 2747 } 2748 if (i == 100) 2749 return -ETIMEDOUT; 2750 2751 WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK)); 2752 2753 for (i = 0; i < 100; i++) { 2754 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) 2755 break; 2756 mdelay(10); 2757 } 2758 if (i == 100) 2759 return -ETIMEDOUT; 2760 2761 return 0; 2762 } 2763