1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 /* Rough estimate of the typical request size, performing a flush, 38 * set-context and then emitting the batch. 39 */ 40 #define LEGACY_REQUEST_SIZE 200 41 42 int __intel_ring_space(int head, int tail, int size) 43 { 44 int space = head - tail; 45 if (space <= 0) 46 space += size; 47 return space - I915_RING_FREE_SPACE; 48 } 49 50 void intel_ring_update_space(struct intel_ring *ring) 51 { 52 if (ring->last_retired_head != -1) { 53 ring->head = ring->last_retired_head; 54 ring->last_retired_head = -1; 55 } 56 57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR, 58 ring->tail, ring->size); 59 } 60 61 static int 62 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 63 { 64 struct intel_ring *ring = req->ring; 65 u32 cmd; 66 int ret; 67 68 cmd = MI_FLUSH; 69 70 if (mode & EMIT_INVALIDATE) 71 cmd |= MI_READ_FLUSH; 72 73 ret = intel_ring_begin(req, 2); 74 if (ret) 75 return ret; 76 77 intel_ring_emit(ring, cmd); 78 intel_ring_emit(ring, MI_NOOP); 79 intel_ring_advance(ring); 80 81 return 0; 82 } 83 84 static int 85 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 86 { 87 struct intel_ring *ring = req->ring; 88 u32 cmd; 89 int ret; 90 91 /* 92 * read/write caches: 93 * 94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 96 * also flushed at 2d versus 3d pipeline switches. 97 * 98 * read-only caches: 99 * 100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 101 * MI_READ_FLUSH is set, and is always flushed on 965. 102 * 103 * I915_GEM_DOMAIN_COMMAND may not exist? 104 * 105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 106 * invalidated when MI_EXE_FLUSH is set. 107 * 108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 109 * invalidated with every MI_FLUSH. 110 * 111 * TLBs: 112 * 113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 116 * are flushed at any MI_FLUSH. 117 */ 118 119 cmd = MI_FLUSH; 120 if (mode & EMIT_INVALIDATE) { 121 cmd |= MI_EXE_FLUSH; 122 if (IS_G4X(req->i915) || IS_GEN5(req->i915)) 123 cmd |= MI_INVALIDATE_ISP; 124 } 125 126 ret = intel_ring_begin(req, 2); 127 if (ret) 128 return ret; 129 130 intel_ring_emit(ring, cmd); 131 intel_ring_emit(ring, MI_NOOP); 132 intel_ring_advance(ring); 133 134 return 0; 135 } 136 137 /** 138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 139 * implementing two workarounds on gen6. From section 1.4.7.1 140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 141 * 142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 143 * produced by non-pipelined state commands), software needs to first 144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 145 * 0. 146 * 147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 149 * 150 * And the workaround for these two requires this workaround first: 151 * 152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 153 * BEFORE the pipe-control with a post-sync op and no write-cache 154 * flushes. 155 * 156 * And this last workaround is tricky because of the requirements on 157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 158 * volume 2 part 1: 159 * 160 * "1 of the following must also be set: 161 * - Render Target Cache Flush Enable ([12] of DW1) 162 * - Depth Cache Flush Enable ([0] of DW1) 163 * - Stall at Pixel Scoreboard ([1] of DW1) 164 * - Depth Stall ([13] of DW1) 165 * - Post-Sync Operation ([13] of DW1) 166 * - Notify Enable ([8] of DW1)" 167 * 168 * The cache flushes require the workaround flush that triggered this 169 * one, so we can't use it. Depth stall would trigger the same. 170 * Post-sync nonzero is what triggered this second workaround, so we 171 * can't use that one either. Notify enable is IRQs, which aren't 172 * really our business. That leaves only stall at scoreboard. 173 */ 174 static int 175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 176 { 177 struct intel_ring *ring = req->ring; 178 u32 scratch_addr = 179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 180 int ret; 181 182 ret = intel_ring_begin(req, 6); 183 if (ret) 184 return ret; 185 186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 188 PIPE_CONTROL_STALL_AT_SCOREBOARD); 189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 190 intel_ring_emit(ring, 0); /* low dword */ 191 intel_ring_emit(ring, 0); /* high dword */ 192 intel_ring_emit(ring, MI_NOOP); 193 intel_ring_advance(ring); 194 195 ret = intel_ring_begin(req, 6); 196 if (ret) 197 return ret; 198 199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 202 intel_ring_emit(ring, 0); 203 intel_ring_emit(ring, 0); 204 intel_ring_emit(ring, MI_NOOP); 205 intel_ring_advance(ring); 206 207 return 0; 208 } 209 210 static int 211 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 212 { 213 struct intel_ring *ring = req->ring; 214 u32 scratch_addr = 215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 216 u32 flags = 0; 217 int ret; 218 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ 220 ret = intel_emit_post_sync_nonzero_flush(req); 221 if (ret) 222 return ret; 223 224 /* Just flush everything. Experiments have shown that reducing the 225 * number of bits based on the write domains has little performance 226 * impact. 227 */ 228 if (mode & EMIT_FLUSH) { 229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 231 /* 232 * Ensure that any following seqno writes only happen 233 * when the render cache is indeed flushed. 234 */ 235 flags |= PIPE_CONTROL_CS_STALL; 236 } 237 if (mode & EMIT_INVALIDATE) { 238 flags |= PIPE_CONTROL_TLB_INVALIDATE; 239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 244 /* 245 * TLB invalidate requires a post-sync write. 246 */ 247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 248 } 249 250 ret = intel_ring_begin(req, 4); 251 if (ret) 252 return ret; 253 254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 255 intel_ring_emit(ring, flags); 256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 257 intel_ring_emit(ring, 0); 258 intel_ring_advance(ring); 259 260 return 0; 261 } 262 263 static int 264 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 265 { 266 struct intel_ring *ring = req->ring; 267 int ret; 268 269 ret = intel_ring_begin(req, 4); 270 if (ret) 271 return ret; 272 273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 274 intel_ring_emit(ring, 275 PIPE_CONTROL_CS_STALL | 276 PIPE_CONTROL_STALL_AT_SCOREBOARD); 277 intel_ring_emit(ring, 0); 278 intel_ring_emit(ring, 0); 279 intel_ring_advance(ring); 280 281 return 0; 282 } 283 284 static int 285 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 286 { 287 struct intel_ring *ring = req->ring; 288 u32 scratch_addr = 289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 290 u32 flags = 0; 291 int ret; 292 293 /* 294 * Ensure that any following seqno writes only happen when the render 295 * cache is indeed flushed. 296 * 297 * Workaround: 4th PIPE_CONTROL command (except the ones with only 298 * read-cache invalidate bits set) must have the CS_STALL bit set. We 299 * don't try to be clever and just set it unconditionally. 300 */ 301 flags |= PIPE_CONTROL_CS_STALL; 302 303 /* Just flush everything. Experiments have shown that reducing the 304 * number of bits based on the write domains has little performance 305 * impact. 306 */ 307 if (mode & EMIT_FLUSH) { 308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 311 flags |= PIPE_CONTROL_FLUSH_ENABLE; 312 } 313 if (mode & EMIT_INVALIDATE) { 314 flags |= PIPE_CONTROL_TLB_INVALIDATE; 315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 321 /* 322 * TLB invalidate requires a post-sync write. 323 */ 324 flags |= PIPE_CONTROL_QW_WRITE; 325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 326 327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 328 329 /* Workaround: we must issue a pipe_control with CS-stall bit 330 * set before a pipe_control command that has the state cache 331 * invalidate bit set. */ 332 gen7_render_ring_cs_stall_wa(req); 333 } 334 335 ret = intel_ring_begin(req, 4); 336 if (ret) 337 return ret; 338 339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 340 intel_ring_emit(ring, flags); 341 intel_ring_emit(ring, scratch_addr); 342 intel_ring_emit(ring, 0); 343 intel_ring_advance(ring); 344 345 return 0; 346 } 347 348 static int 349 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 350 u32 flags, u32 scratch_addr) 351 { 352 struct intel_ring *ring = req->ring; 353 int ret; 354 355 ret = intel_ring_begin(req, 6); 356 if (ret) 357 return ret; 358 359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 360 intel_ring_emit(ring, flags); 361 intel_ring_emit(ring, scratch_addr); 362 intel_ring_emit(ring, 0); 363 intel_ring_emit(ring, 0); 364 intel_ring_emit(ring, 0); 365 intel_ring_advance(ring); 366 367 return 0; 368 } 369 370 static int 371 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 372 { 373 u32 scratch_addr = 374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 375 u32 flags = 0; 376 int ret; 377 378 flags |= PIPE_CONTROL_CS_STALL; 379 380 if (mode & EMIT_FLUSH) { 381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 384 flags |= PIPE_CONTROL_FLUSH_ENABLE; 385 } 386 if (mode & EMIT_INVALIDATE) { 387 flags |= PIPE_CONTROL_TLB_INVALIDATE; 388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 393 flags |= PIPE_CONTROL_QW_WRITE; 394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 395 396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 397 ret = gen8_emit_pipe_control(req, 398 PIPE_CONTROL_CS_STALL | 399 PIPE_CONTROL_STALL_AT_SCOREBOARD, 400 0); 401 if (ret) 402 return ret; 403 } 404 405 return gen8_emit_pipe_control(req, flags, scratch_addr); 406 } 407 408 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 409 { 410 struct drm_i915_private *dev_priv = engine->i915; 411 u32 addr; 412 413 addr = dev_priv->status_page_dmah->busaddr; 414 if (INTEL_GEN(dev_priv) >= 4) 415 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 416 I915_WRITE(HWS_PGA, addr); 417 } 418 419 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 420 { 421 struct drm_i915_private *dev_priv = engine->i915; 422 i915_reg_t mmio; 423 424 /* The ring status page addresses are no longer next to the rest of 425 * the ring registers as of gen7. 426 */ 427 if (IS_GEN7(dev_priv)) { 428 switch (engine->id) { 429 case RCS: 430 mmio = RENDER_HWS_PGA_GEN7; 431 break; 432 case BCS: 433 mmio = BLT_HWS_PGA_GEN7; 434 break; 435 /* 436 * VCS2 actually doesn't exist on Gen7. Only shut up 437 * gcc switch check warning 438 */ 439 case VCS2: 440 case VCS: 441 mmio = BSD_HWS_PGA_GEN7; 442 break; 443 case VECS: 444 mmio = VEBOX_HWS_PGA_GEN7; 445 break; 446 } 447 } else if (IS_GEN6(dev_priv)) { 448 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 449 } else { 450 /* XXX: gen8 returns to sanity */ 451 mmio = RING_HWS_PGA(engine->mmio_base); 452 } 453 454 I915_WRITE(mmio, engine->status_page.ggtt_offset); 455 POSTING_READ(mmio); 456 457 /* 458 * Flush the TLB for this page 459 * 460 * FIXME: These two bits have disappeared on gen8, so a question 461 * arises: do we still need this and if so how should we go about 462 * invalidating the TLB? 463 */ 464 if (IS_GEN(dev_priv, 6, 7)) { 465 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 466 467 /* ring should be idle before issuing a sync flush*/ 468 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 469 470 I915_WRITE(reg, 471 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 472 INSTPM_SYNC_FLUSH)); 473 if (intel_wait_for_register(dev_priv, 474 reg, INSTPM_SYNC_FLUSH, 0, 475 1000)) 476 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 477 engine->name); 478 } 479 } 480 481 static bool stop_ring(struct intel_engine_cs *engine) 482 { 483 struct drm_i915_private *dev_priv = engine->i915; 484 485 if (INTEL_GEN(dev_priv) > 2) { 486 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 487 if (intel_wait_for_register(dev_priv, 488 RING_MI_MODE(engine->mmio_base), 489 MODE_IDLE, 490 MODE_IDLE, 491 1000)) { 492 DRM_ERROR("%s : timed out trying to stop ring\n", 493 engine->name); 494 /* Sometimes we observe that the idle flag is not 495 * set even though the ring is empty. So double 496 * check before giving up. 497 */ 498 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 499 return false; 500 } 501 } 502 503 I915_WRITE_CTL(engine, 0); 504 I915_WRITE_HEAD(engine, 0); 505 I915_WRITE_TAIL(engine, 0); 506 507 if (INTEL_GEN(dev_priv) > 2) { 508 (void)I915_READ_CTL(engine); 509 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 510 } 511 512 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 513 } 514 515 static int init_ring_common(struct intel_engine_cs *engine) 516 { 517 struct drm_i915_private *dev_priv = engine->i915; 518 struct intel_ring *ring = engine->buffer; 519 int ret = 0; 520 521 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 522 523 if (!stop_ring(engine)) { 524 /* G45 ring initialization often fails to reset head to zero */ 525 DRM_DEBUG_KMS("%s head not reset to zero " 526 "ctl %08x head %08x tail %08x start %08x\n", 527 engine->name, 528 I915_READ_CTL(engine), 529 I915_READ_HEAD(engine), 530 I915_READ_TAIL(engine), 531 I915_READ_START(engine)); 532 533 if (!stop_ring(engine)) { 534 DRM_ERROR("failed to set %s head to zero " 535 "ctl %08x head %08x tail %08x start %08x\n", 536 engine->name, 537 I915_READ_CTL(engine), 538 I915_READ_HEAD(engine), 539 I915_READ_TAIL(engine), 540 I915_READ_START(engine)); 541 ret = -EIO; 542 goto out; 543 } 544 } 545 546 if (HWS_NEEDS_PHYSICAL(dev_priv)) 547 ring_setup_phys_status_page(engine); 548 else 549 intel_ring_setup_status_page(engine); 550 551 intel_engine_reset_breadcrumbs(engine); 552 553 /* Enforce ordering by reading HEAD register back */ 554 I915_READ_HEAD(engine); 555 556 /* Initialize the ring. This must happen _after_ we've cleared the ring 557 * registers with the above sequence (the readback of the HEAD registers 558 * also enforces ordering), otherwise the hw might lose the new ring 559 * register values. */ 560 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); 561 562 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 563 if (I915_READ_HEAD(engine)) 564 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 565 engine->name, I915_READ_HEAD(engine)); 566 567 intel_ring_update_space(ring); 568 I915_WRITE_HEAD(engine, ring->head); 569 I915_WRITE_TAIL(engine, ring->tail); 570 (void)I915_READ_TAIL(engine); 571 572 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); 573 574 /* If the head is still not zero, the ring is dead */ 575 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base), 576 RING_VALID, RING_VALID, 577 50)) { 578 DRM_ERROR("%s initialization failed " 579 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 580 engine->name, 581 I915_READ_CTL(engine), 582 I915_READ_CTL(engine) & RING_VALID, 583 I915_READ_HEAD(engine), ring->head, 584 I915_READ_TAIL(engine), ring->tail, 585 I915_READ_START(engine), 586 i915_ggtt_offset(ring->vma)); 587 ret = -EIO; 588 goto out; 589 } 590 591 intel_engine_init_hangcheck(engine); 592 593 out: 594 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 595 596 return ret; 597 } 598 599 static void reset_ring_common(struct intel_engine_cs *engine, 600 struct drm_i915_gem_request *request) 601 { 602 struct intel_ring *ring = request->ring; 603 604 ring->head = request->postfix; 605 ring->last_retired_head = -1; 606 } 607 608 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 609 { 610 struct intel_ring *ring = req->ring; 611 struct i915_workarounds *w = &req->i915->workarounds; 612 int ret, i; 613 614 if (w->count == 0) 615 return 0; 616 617 ret = req->engine->emit_flush(req, EMIT_BARRIER); 618 if (ret) 619 return ret; 620 621 ret = intel_ring_begin(req, (w->count * 2 + 2)); 622 if (ret) 623 return ret; 624 625 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 626 for (i = 0; i < w->count; i++) { 627 intel_ring_emit_reg(ring, w->reg[i].addr); 628 intel_ring_emit(ring, w->reg[i].value); 629 } 630 intel_ring_emit(ring, MI_NOOP); 631 632 intel_ring_advance(ring); 633 634 ret = req->engine->emit_flush(req, EMIT_BARRIER); 635 if (ret) 636 return ret; 637 638 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 639 640 return 0; 641 } 642 643 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 644 { 645 int ret; 646 647 ret = intel_ring_workarounds_emit(req); 648 if (ret != 0) 649 return ret; 650 651 ret = i915_gem_render_state_emit(req); 652 if (ret) 653 return ret; 654 655 return 0; 656 } 657 658 static int wa_add(struct drm_i915_private *dev_priv, 659 i915_reg_t addr, 660 const u32 mask, const u32 val) 661 { 662 const u32 idx = dev_priv->workarounds.count; 663 664 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 665 return -ENOSPC; 666 667 dev_priv->workarounds.reg[idx].addr = addr; 668 dev_priv->workarounds.reg[idx].value = val; 669 dev_priv->workarounds.reg[idx].mask = mask; 670 671 dev_priv->workarounds.count++; 672 673 return 0; 674 } 675 676 #define WA_REG(addr, mask, val) do { \ 677 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 678 if (r) \ 679 return r; \ 680 } while (0) 681 682 #define WA_SET_BIT_MASKED(addr, mask) \ 683 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 684 685 #define WA_CLR_BIT_MASKED(addr, mask) \ 686 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 687 688 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 689 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 690 691 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 692 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 693 694 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 695 696 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 697 i915_reg_t reg) 698 { 699 struct drm_i915_private *dev_priv = engine->i915; 700 struct i915_workarounds *wa = &dev_priv->workarounds; 701 const uint32_t index = wa->hw_whitelist_count[engine->id]; 702 703 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 704 return -EINVAL; 705 706 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), 707 i915_mmio_reg_offset(reg)); 708 wa->hw_whitelist_count[engine->id]++; 709 710 return 0; 711 } 712 713 static int gen8_init_workarounds(struct intel_engine_cs *engine) 714 { 715 struct drm_i915_private *dev_priv = engine->i915; 716 717 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 718 719 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 720 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 721 722 /* WaDisablePartialInstShootdown:bdw,chv */ 723 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 724 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 725 726 /* Use Force Non-Coherent whenever executing a 3D context. This is a 727 * workaround for for a possible hang in the unlikely event a TLB 728 * invalidation occurs during a PSD flush. 729 */ 730 /* WaForceEnableNonCoherent:bdw,chv */ 731 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 732 WA_SET_BIT_MASKED(HDC_CHICKEN0, 733 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 734 HDC_FORCE_NON_COHERENT); 735 736 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 737 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 738 * polygons in the same 8x4 pixel/sample area to be processed without 739 * stalling waiting for the earlier ones to write to Hierarchical Z 740 * buffer." 741 * 742 * This optimization is off by default for BDW and CHV; turn it on. 743 */ 744 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 745 746 /* Wa4x4STCOptimizationDisable:bdw,chv */ 747 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 748 749 /* 750 * BSpec recommends 8x4 when MSAA is used, 751 * however in practice 16x4 seems fastest. 752 * 753 * Note that PS/WM thread counts depend on the WIZ hashing 754 * disable bit, which we don't touch here, but it's good 755 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 756 */ 757 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 758 GEN6_WIZ_HASHING_MASK, 759 GEN6_WIZ_HASHING_16x4); 760 761 return 0; 762 } 763 764 static int bdw_init_workarounds(struct intel_engine_cs *engine) 765 { 766 struct drm_i915_private *dev_priv = engine->i915; 767 int ret; 768 769 ret = gen8_init_workarounds(engine); 770 if (ret) 771 return ret; 772 773 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 774 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 775 776 /* WaDisableDopClockGating:bdw */ 777 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 778 DOP_CLOCK_GATING_DISABLE); 779 780 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 781 GEN8_SAMPLER_POWER_BYPASS_DIS); 782 783 WA_SET_BIT_MASKED(HDC_CHICKEN0, 784 /* WaForceContextSaveRestoreNonCoherent:bdw */ 785 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 786 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 787 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 788 789 return 0; 790 } 791 792 static int chv_init_workarounds(struct intel_engine_cs *engine) 793 { 794 struct drm_i915_private *dev_priv = engine->i915; 795 int ret; 796 797 ret = gen8_init_workarounds(engine); 798 if (ret) 799 return ret; 800 801 /* WaDisableThreadStallDopClockGating:chv */ 802 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 803 804 /* Improve HiZ throughput on CHV. */ 805 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 806 807 return 0; 808 } 809 810 static int gen9_init_workarounds(struct intel_engine_cs *engine) 811 { 812 struct drm_i915_private *dev_priv = engine->i915; 813 int ret; 814 815 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 816 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 817 818 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ 819 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 820 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 821 822 /* WaDisableKillLogic:bxt,skl,kbl */ 823 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 824 ECOCHK_DIS_TLB); 825 826 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ 827 /* WaDisablePartialInstShootdown:skl,bxt,kbl */ 828 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 829 FLOW_CONTROL_ENABLE | 830 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 831 832 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 833 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 834 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 835 836 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */ 837 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 838 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 839 GEN9_DG_MIRROR_FIX_ENABLE); 840 841 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */ 842 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 843 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 844 GEN9_RHWO_OPTIMIZATION_DISABLE); 845 /* 846 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 847 * but we do that in per ctx batchbuffer as there is an issue 848 * with this register not getting restored on ctx restore 849 */ 850 } 851 852 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 853 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 854 GEN9_ENABLE_GPGPU_PREEMPTION); 855 856 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ 857 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 858 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 859 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 860 861 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ 862 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 863 GEN9_CCS_TLB_PREFETCH_ENABLE); 864 865 /* WaDisableMaskBasedCammingInRCC:bxt */ 866 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 867 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 868 PIXEL_MASK_CAMMING_DISABLE); 869 870 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 871 WA_SET_BIT_MASKED(HDC_CHICKEN0, 872 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 873 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 874 875 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 876 * both tied to WaForceContextSaveRestoreNonCoherent 877 * in some hsds for skl. We keep the tie for all gen9. The 878 * documentation is a bit hazy and so we want to get common behaviour, 879 * even though there is no clear evidence we would need both on kbl/bxt. 880 * This area has been source of system hangs so we play it safe 881 * and mimic the skl regardless of what bspec says. 882 * 883 * Use Force Non-Coherent whenever executing a 3D context. This 884 * is a workaround for a possible hang in the unlikely event 885 * a TLB invalidation occurs during a PSD flush. 886 */ 887 888 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 889 WA_SET_BIT_MASKED(HDC_CHICKEN0, 890 HDC_FORCE_NON_COHERENT); 891 892 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 893 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 894 BDW_DISABLE_HDC_INVALIDATION); 895 896 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 897 if (IS_SKYLAKE(dev_priv) || 898 IS_KABYLAKE(dev_priv) || 899 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 900 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 901 GEN8_SAMPLER_POWER_BYPASS_DIS); 902 903 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ 904 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 905 906 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 907 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 908 GEN8_LQSC_FLUSH_COHERENT_LINES)); 909 910 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ 911 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 912 if (ret) 913 return ret; 914 915 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 916 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 917 if (ret) 918 return ret; 919 920 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ 921 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 922 if (ret) 923 return ret; 924 925 return 0; 926 } 927 928 static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 929 { 930 struct drm_i915_private *dev_priv = engine->i915; 931 u8 vals[3] = { 0, 0, 0 }; 932 unsigned int i; 933 934 for (i = 0; i < 3; i++) { 935 u8 ss; 936 937 /* 938 * Only consider slices where one, and only one, subslice has 7 939 * EUs 940 */ 941 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i])) 942 continue; 943 944 /* 945 * subslice_7eu[i] != 0 (because of the check above) and 946 * ss_max == 4 (maximum number of subslices possible per slice) 947 * 948 * -> 0 <= ss <= 3; 949 */ 950 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1; 951 vals[i] = 3 - ss; 952 } 953 954 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 955 return 0; 956 957 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 958 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 959 GEN9_IZ_HASHING_MASK(2) | 960 GEN9_IZ_HASHING_MASK(1) | 961 GEN9_IZ_HASHING_MASK(0), 962 GEN9_IZ_HASHING(2, vals[2]) | 963 GEN9_IZ_HASHING(1, vals[1]) | 964 GEN9_IZ_HASHING(0, vals[0])); 965 966 return 0; 967 } 968 969 static int skl_init_workarounds(struct intel_engine_cs *engine) 970 { 971 struct drm_i915_private *dev_priv = engine->i915; 972 int ret; 973 974 ret = gen9_init_workarounds(engine); 975 if (ret) 976 return ret; 977 978 /* 979 * Actual WA is to disable percontext preemption granularity control 980 * until D0 which is the default case so this is equivalent to 981 * !WaDisablePerCtxtPreemptionGranularityControl:skl 982 */ 983 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 984 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 985 986 /* WaEnableGapsTsvCreditFix:skl */ 987 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 988 GEN9_GAPS_TSV_CREDIT_DISABLE)); 989 990 /* WaDisableGafsUnitClkGating:skl */ 991 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 992 993 /* WaInPlaceDecompressionHang:skl */ 994 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 995 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 996 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 997 998 /* WaDisableLSQCROPERFforOCL:skl */ 999 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1000 if (ret) 1001 return ret; 1002 1003 return skl_tune_iz_hashing(engine); 1004 } 1005 1006 static int bxt_init_workarounds(struct intel_engine_cs *engine) 1007 { 1008 struct drm_i915_private *dev_priv = engine->i915; 1009 int ret; 1010 1011 ret = gen9_init_workarounds(engine); 1012 if (ret) 1013 return ret; 1014 1015 /* WaStoreMultiplePTEenable:bxt */ 1016 /* This is a requirement according to Hardware specification */ 1017 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1018 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1019 1020 /* WaSetClckGatingDisableMedia:bxt */ 1021 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1022 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1023 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1024 } 1025 1026 /* WaDisableThreadStallDopClockGating:bxt */ 1027 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1028 STALL_DOP_GATING_DISABLE); 1029 1030 /* WaDisablePooledEuLoadBalancingFix:bxt */ 1031 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { 1032 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, 1033 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1034 } 1035 1036 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1037 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { 1038 WA_SET_BIT_MASKED( 1039 GEN7_HALF_SLICE_CHICKEN1, 1040 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1041 } 1042 1043 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ 1044 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1045 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1046 /* WaDisableLSQCROPERFforOCL:bxt */ 1047 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1048 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1049 if (ret) 1050 return ret; 1051 1052 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1053 if (ret) 1054 return ret; 1055 } 1056 1057 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1058 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1059 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1060 L3_HIGH_PRIO_CREDITS(2)); 1061 1062 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1063 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1064 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1065 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1066 1067 /* WaInPlaceDecompressionHang:bxt */ 1068 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1069 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1070 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1071 1072 return 0; 1073 } 1074 1075 static int kbl_init_workarounds(struct intel_engine_cs *engine) 1076 { 1077 struct drm_i915_private *dev_priv = engine->i915; 1078 int ret; 1079 1080 ret = gen9_init_workarounds(engine); 1081 if (ret) 1082 return ret; 1083 1084 /* WaEnableGapsTsvCreditFix:kbl */ 1085 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1086 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1087 1088 /* WaDisableDynamicCreditSharing:kbl */ 1089 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1090 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1091 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1092 1093 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1094 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1096 HDC_FENCE_DEST_SLM_DISABLE); 1097 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1101 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1102 1103 /* WaDisableGafsUnitClkGating:kbl */ 1104 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1105 1106 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1107 WA_SET_BIT_MASKED( 1108 GEN7_HALF_SLICE_CHICKEN1, 1109 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1110 1111 /* WaInPlaceDecompressionHang:kbl */ 1112 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1113 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1114 1115 /* WaDisableLSQCROPERFforOCL:kbl */ 1116 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1117 if (ret) 1118 return ret; 1119 1120 return 0; 1121 } 1122 1123 int init_workarounds_ring(struct intel_engine_cs *engine) 1124 { 1125 struct drm_i915_private *dev_priv = engine->i915; 1126 1127 WARN_ON(engine->id != RCS); 1128 1129 dev_priv->workarounds.count = 0; 1130 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1131 1132 if (IS_BROADWELL(dev_priv)) 1133 return bdw_init_workarounds(engine); 1134 1135 if (IS_CHERRYVIEW(dev_priv)) 1136 return chv_init_workarounds(engine); 1137 1138 if (IS_SKYLAKE(dev_priv)) 1139 return skl_init_workarounds(engine); 1140 1141 if (IS_BROXTON(dev_priv)) 1142 return bxt_init_workarounds(engine); 1143 1144 if (IS_KABYLAKE(dev_priv)) 1145 return kbl_init_workarounds(engine); 1146 1147 return 0; 1148 } 1149 1150 static int init_render_ring(struct intel_engine_cs *engine) 1151 { 1152 struct drm_i915_private *dev_priv = engine->i915; 1153 int ret = init_ring_common(engine); 1154 if (ret) 1155 return ret; 1156 1157 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1158 if (IS_GEN(dev_priv, 4, 6)) 1159 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1160 1161 /* We need to disable the AsyncFlip performance optimisations in order 1162 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1163 * programmed to '1' on all products. 1164 * 1165 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1166 */ 1167 if (IS_GEN(dev_priv, 6, 7)) 1168 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1169 1170 /* Required for the hardware to program scanline values for waiting */ 1171 /* WaEnableFlushTlbInvalidationMode:snb */ 1172 if (IS_GEN6(dev_priv)) 1173 I915_WRITE(GFX_MODE, 1174 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1175 1176 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1177 if (IS_GEN7(dev_priv)) 1178 I915_WRITE(GFX_MODE_GEN7, 1179 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1180 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1181 1182 if (IS_GEN6(dev_priv)) { 1183 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1184 * "If this bit is set, STCunit will have LRA as replacement 1185 * policy. [...] This bit must be reset. LRA replacement 1186 * policy is not supported." 1187 */ 1188 I915_WRITE(CACHE_MODE_0, 1189 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1190 } 1191 1192 if (IS_GEN(dev_priv, 6, 7)) 1193 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1194 1195 if (INTEL_INFO(dev_priv)->gen >= 6) 1196 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1197 1198 return init_workarounds_ring(engine); 1199 } 1200 1201 static void render_ring_cleanup(struct intel_engine_cs *engine) 1202 { 1203 struct drm_i915_private *dev_priv = engine->i915; 1204 1205 i915_vma_unpin_and_release(&dev_priv->semaphore); 1206 } 1207 1208 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out) 1209 { 1210 struct drm_i915_private *dev_priv = req->i915; 1211 struct intel_engine_cs *waiter; 1212 enum intel_engine_id id; 1213 1214 for_each_engine(waiter, dev_priv, id) { 1215 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 1216 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1217 continue; 1218 1219 *out++ = GFX_OP_PIPE_CONTROL(6); 1220 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB | 1221 PIPE_CONTROL_QW_WRITE | 1222 PIPE_CONTROL_CS_STALL); 1223 *out++ = lower_32_bits(gtt_offset); 1224 *out++ = upper_32_bits(gtt_offset); 1225 *out++ = req->global_seqno; 1226 *out++ = 0; 1227 *out++ = (MI_SEMAPHORE_SIGNAL | 1228 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1229 *out++ = 0; 1230 } 1231 1232 return out; 1233 } 1234 1235 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out) 1236 { 1237 struct drm_i915_private *dev_priv = req->i915; 1238 struct intel_engine_cs *waiter; 1239 enum intel_engine_id id; 1240 1241 for_each_engine(waiter, dev_priv, id) { 1242 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 1243 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1244 continue; 1245 1246 *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; 1247 *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT; 1248 *out++ = upper_32_bits(gtt_offset); 1249 *out++ = req->global_seqno; 1250 *out++ = (MI_SEMAPHORE_SIGNAL | 1251 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1252 *out++ = 0; 1253 } 1254 1255 return out; 1256 } 1257 1258 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out) 1259 { 1260 struct drm_i915_private *dev_priv = req->i915; 1261 struct intel_engine_cs *engine; 1262 enum intel_engine_id id; 1263 int num_rings = 0; 1264 1265 for_each_engine(engine, dev_priv, id) { 1266 i915_reg_t mbox_reg; 1267 1268 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) 1269 continue; 1270 1271 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; 1272 if (i915_mmio_reg_valid(mbox_reg)) { 1273 *out++ = MI_LOAD_REGISTER_IMM(1); 1274 *out++ = i915_mmio_reg_offset(mbox_reg); 1275 *out++ = req->global_seqno; 1276 num_rings++; 1277 } 1278 } 1279 if (num_rings & 1) 1280 *out++ = MI_NOOP; 1281 1282 return out; 1283 } 1284 1285 static void i9xx_submit_request(struct drm_i915_gem_request *request) 1286 { 1287 struct drm_i915_private *dev_priv = request->i915; 1288 1289 i915_gem_request_submit(request); 1290 1291 I915_WRITE_TAIL(request->engine, request->tail); 1292 } 1293 1294 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, 1295 u32 *out) 1296 { 1297 *out++ = MI_STORE_DWORD_INDEX; 1298 *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; 1299 *out++ = req->global_seqno; 1300 *out++ = MI_USER_INTERRUPT; 1301 1302 req->tail = intel_ring_offset(req->ring, out); 1303 } 1304 1305 static const int i9xx_emit_breadcrumb_sz = 4; 1306 1307 /** 1308 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers 1309 * 1310 * @request - request to write to the ring 1311 * 1312 * Update the mailbox registers in the *other* rings with the current seqno. 1313 * This acts like a signal in the canonical semaphore. 1314 */ 1315 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, 1316 u32 *out) 1317 { 1318 return i9xx_emit_breadcrumb(req, 1319 req->engine->semaphore.signal(req, out)); 1320 } 1321 1322 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req, 1323 u32 *out) 1324 { 1325 struct intel_engine_cs *engine = req->engine; 1326 1327 if (engine->semaphore.signal) 1328 out = engine->semaphore.signal(req, out); 1329 1330 *out++ = GFX_OP_PIPE_CONTROL(6); 1331 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB | 1332 PIPE_CONTROL_CS_STALL | 1333 PIPE_CONTROL_QW_WRITE); 1334 *out++ = intel_hws_seqno_address(engine); 1335 *out++ = 0; 1336 *out++ = req->global_seqno; 1337 /* We're thrashing one dword of HWS. */ 1338 *out++ = 0; 1339 *out++ = MI_USER_INTERRUPT; 1340 *out++ = MI_NOOP; 1341 1342 req->tail = intel_ring_offset(req->ring, out); 1343 } 1344 1345 static const int gen8_render_emit_breadcrumb_sz = 8; 1346 1347 /** 1348 * intel_ring_sync - sync the waiter to the signaller on seqno 1349 * 1350 * @waiter - ring that is waiting 1351 * @signaller - ring which has, or will signal 1352 * @seqno - seqno which the waiter will block on 1353 */ 1354 1355 static int 1356 gen8_ring_sync_to(struct drm_i915_gem_request *req, 1357 struct drm_i915_gem_request *signal) 1358 { 1359 struct intel_ring *ring = req->ring; 1360 struct drm_i915_private *dev_priv = req->i915; 1361 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id); 1362 struct i915_hw_ppgtt *ppgtt; 1363 int ret; 1364 1365 ret = intel_ring_begin(req, 4); 1366 if (ret) 1367 return ret; 1368 1369 intel_ring_emit(ring, 1370 MI_SEMAPHORE_WAIT | 1371 MI_SEMAPHORE_GLOBAL_GTT | 1372 MI_SEMAPHORE_SAD_GTE_SDD); 1373 intel_ring_emit(ring, signal->global_seqno); 1374 intel_ring_emit(ring, lower_32_bits(offset)); 1375 intel_ring_emit(ring, upper_32_bits(offset)); 1376 intel_ring_advance(ring); 1377 1378 /* When the !RCS engines idle waiting upon a semaphore, they lose their 1379 * pagetables and we must reload them before executing the batch. 1380 * We do this on the i915_switch_context() following the wait and 1381 * before the dispatch. 1382 */ 1383 ppgtt = req->ctx->ppgtt; 1384 if (ppgtt && req->engine->id != RCS) 1385 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine); 1386 return 0; 1387 } 1388 1389 static int 1390 gen6_ring_sync_to(struct drm_i915_gem_request *req, 1391 struct drm_i915_gem_request *signal) 1392 { 1393 struct intel_ring *ring = req->ring; 1394 u32 dw1 = MI_SEMAPHORE_MBOX | 1395 MI_SEMAPHORE_COMPARE | 1396 MI_SEMAPHORE_REGISTER; 1397 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; 1398 int ret; 1399 1400 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1401 1402 ret = intel_ring_begin(req, 4); 1403 if (ret) 1404 return ret; 1405 1406 intel_ring_emit(ring, dw1 | wait_mbox); 1407 /* Throughout all of the GEM code, seqno passed implies our current 1408 * seqno is >= the last seqno executed. However for hardware the 1409 * comparison is strictly greater than. 1410 */ 1411 intel_ring_emit(ring, signal->global_seqno - 1); 1412 intel_ring_emit(ring, 0); 1413 intel_ring_emit(ring, MI_NOOP); 1414 intel_ring_advance(ring); 1415 1416 return 0; 1417 } 1418 1419 static void 1420 gen5_seqno_barrier(struct intel_engine_cs *engine) 1421 { 1422 /* MI_STORE are internally buffered by the GPU and not flushed 1423 * either by MI_FLUSH or SyncFlush or any other combination of 1424 * MI commands. 1425 * 1426 * "Only the submission of the store operation is guaranteed. 1427 * The write result will be complete (coherent) some time later 1428 * (this is practically a finite period but there is no guaranteed 1429 * latency)." 1430 * 1431 * Empirically, we observe that we need a delay of at least 75us to 1432 * be sure that the seqno write is visible by the CPU. 1433 */ 1434 usleep_range(125, 250); 1435 } 1436 1437 static void 1438 gen6_seqno_barrier(struct intel_engine_cs *engine) 1439 { 1440 struct drm_i915_private *dev_priv = engine->i915; 1441 1442 /* Workaround to force correct ordering between irq and seqno writes on 1443 * ivb (and maybe also on snb) by reading from a CS register (like 1444 * ACTHD) before reading the status page. 1445 * 1446 * Note that this effectively stalls the read by the time it takes to 1447 * do a memory transaction, which more or less ensures that the write 1448 * from the GPU has sufficient time to invalidate the CPU cacheline. 1449 * Alternatively we could delay the interrupt from the CS ring to give 1450 * the write time to land, but that would incur a delay after every 1451 * batch i.e. much more frequent than a delay when waiting for the 1452 * interrupt (with the same net latency). 1453 * 1454 * Also note that to prevent whole machine hangs on gen7, we have to 1455 * take the spinlock to guard against concurrent cacheline access. 1456 */ 1457 spin_lock_irq(&dev_priv->uncore.lock); 1458 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1459 spin_unlock_irq(&dev_priv->uncore.lock); 1460 } 1461 1462 static void 1463 gen5_irq_enable(struct intel_engine_cs *engine) 1464 { 1465 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); 1466 } 1467 1468 static void 1469 gen5_irq_disable(struct intel_engine_cs *engine) 1470 { 1471 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); 1472 } 1473 1474 static void 1475 i9xx_irq_enable(struct intel_engine_cs *engine) 1476 { 1477 struct drm_i915_private *dev_priv = engine->i915; 1478 1479 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1480 I915_WRITE(IMR, dev_priv->irq_mask); 1481 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1482 } 1483 1484 static void 1485 i9xx_irq_disable(struct intel_engine_cs *engine) 1486 { 1487 struct drm_i915_private *dev_priv = engine->i915; 1488 1489 dev_priv->irq_mask |= engine->irq_enable_mask; 1490 I915_WRITE(IMR, dev_priv->irq_mask); 1491 } 1492 1493 static void 1494 i8xx_irq_enable(struct intel_engine_cs *engine) 1495 { 1496 struct drm_i915_private *dev_priv = engine->i915; 1497 1498 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1499 I915_WRITE16(IMR, dev_priv->irq_mask); 1500 POSTING_READ16(RING_IMR(engine->mmio_base)); 1501 } 1502 1503 static void 1504 i8xx_irq_disable(struct intel_engine_cs *engine) 1505 { 1506 struct drm_i915_private *dev_priv = engine->i915; 1507 1508 dev_priv->irq_mask |= engine->irq_enable_mask; 1509 I915_WRITE16(IMR, dev_priv->irq_mask); 1510 } 1511 1512 static int 1513 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1514 { 1515 struct intel_ring *ring = req->ring; 1516 int ret; 1517 1518 ret = intel_ring_begin(req, 2); 1519 if (ret) 1520 return ret; 1521 1522 intel_ring_emit(ring, MI_FLUSH); 1523 intel_ring_emit(ring, MI_NOOP); 1524 intel_ring_advance(ring); 1525 return 0; 1526 } 1527 1528 static void 1529 gen6_irq_enable(struct intel_engine_cs *engine) 1530 { 1531 struct drm_i915_private *dev_priv = engine->i915; 1532 1533 I915_WRITE_IMR(engine, 1534 ~(engine->irq_enable_mask | 1535 engine->irq_keep_mask)); 1536 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1537 } 1538 1539 static void 1540 gen6_irq_disable(struct intel_engine_cs *engine) 1541 { 1542 struct drm_i915_private *dev_priv = engine->i915; 1543 1544 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1545 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1546 } 1547 1548 static void 1549 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1550 { 1551 struct drm_i915_private *dev_priv = engine->i915; 1552 1553 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1554 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); 1555 } 1556 1557 static void 1558 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1559 { 1560 struct drm_i915_private *dev_priv = engine->i915; 1561 1562 I915_WRITE_IMR(engine, ~0); 1563 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask); 1564 } 1565 1566 static void 1567 gen8_irq_enable(struct intel_engine_cs *engine) 1568 { 1569 struct drm_i915_private *dev_priv = engine->i915; 1570 1571 I915_WRITE_IMR(engine, 1572 ~(engine->irq_enable_mask | 1573 engine->irq_keep_mask)); 1574 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1575 } 1576 1577 static void 1578 gen8_irq_disable(struct intel_engine_cs *engine) 1579 { 1580 struct drm_i915_private *dev_priv = engine->i915; 1581 1582 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1583 } 1584 1585 static int 1586 i965_emit_bb_start(struct drm_i915_gem_request *req, 1587 u64 offset, u32 length, 1588 unsigned int dispatch_flags) 1589 { 1590 struct intel_ring *ring = req->ring; 1591 int ret; 1592 1593 ret = intel_ring_begin(req, 2); 1594 if (ret) 1595 return ret; 1596 1597 intel_ring_emit(ring, 1598 MI_BATCH_BUFFER_START | 1599 MI_BATCH_GTT | 1600 (dispatch_flags & I915_DISPATCH_SECURE ? 1601 0 : MI_BATCH_NON_SECURE_I965)); 1602 intel_ring_emit(ring, offset); 1603 intel_ring_advance(ring); 1604 1605 return 0; 1606 } 1607 1608 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1609 #define I830_BATCH_LIMIT (256*1024) 1610 #define I830_TLB_ENTRIES (2) 1611 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1612 static int 1613 i830_emit_bb_start(struct drm_i915_gem_request *req, 1614 u64 offset, u32 len, 1615 unsigned int dispatch_flags) 1616 { 1617 struct intel_ring *ring = req->ring; 1618 u32 cs_offset = i915_ggtt_offset(req->engine->scratch); 1619 int ret; 1620 1621 ret = intel_ring_begin(req, 6); 1622 if (ret) 1623 return ret; 1624 1625 /* Evict the invalid PTE TLBs */ 1626 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1627 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1628 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1629 intel_ring_emit(ring, cs_offset); 1630 intel_ring_emit(ring, 0xdeadbeef); 1631 intel_ring_emit(ring, MI_NOOP); 1632 intel_ring_advance(ring); 1633 1634 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1635 if (len > I830_BATCH_LIMIT) 1636 return -ENOSPC; 1637 1638 ret = intel_ring_begin(req, 6 + 2); 1639 if (ret) 1640 return ret; 1641 1642 /* Blit the batch (which has now all relocs applied) to the 1643 * stable batch scratch bo area (so that the CS never 1644 * stumbles over its tlb invalidation bug) ... 1645 */ 1646 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1647 intel_ring_emit(ring, 1648 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1649 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1650 intel_ring_emit(ring, cs_offset); 1651 intel_ring_emit(ring, 4096); 1652 intel_ring_emit(ring, offset); 1653 1654 intel_ring_emit(ring, MI_FLUSH); 1655 intel_ring_emit(ring, MI_NOOP); 1656 intel_ring_advance(ring); 1657 1658 /* ... and execute it. */ 1659 offset = cs_offset; 1660 } 1661 1662 ret = intel_ring_begin(req, 2); 1663 if (ret) 1664 return ret; 1665 1666 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1667 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1668 0 : MI_BATCH_NON_SECURE)); 1669 intel_ring_advance(ring); 1670 1671 return 0; 1672 } 1673 1674 static int 1675 i915_emit_bb_start(struct drm_i915_gem_request *req, 1676 u64 offset, u32 len, 1677 unsigned int dispatch_flags) 1678 { 1679 struct intel_ring *ring = req->ring; 1680 int ret; 1681 1682 ret = intel_ring_begin(req, 2); 1683 if (ret) 1684 return ret; 1685 1686 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1687 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1688 0 : MI_BATCH_NON_SECURE)); 1689 intel_ring_advance(ring); 1690 1691 return 0; 1692 } 1693 1694 static void cleanup_phys_status_page(struct intel_engine_cs *engine) 1695 { 1696 struct drm_i915_private *dev_priv = engine->i915; 1697 1698 if (!dev_priv->status_page_dmah) 1699 return; 1700 1701 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); 1702 engine->status_page.page_addr = NULL; 1703 } 1704 1705 static void cleanup_status_page(struct intel_engine_cs *engine) 1706 { 1707 struct i915_vma *vma; 1708 struct drm_i915_gem_object *obj; 1709 1710 vma = fetch_and_zero(&engine->status_page.vma); 1711 if (!vma) 1712 return; 1713 1714 obj = vma->obj; 1715 1716 i915_vma_unpin(vma); 1717 i915_vma_close(vma); 1718 1719 i915_gem_object_unpin_map(obj); 1720 __i915_gem_object_release_unless_active(obj); 1721 } 1722 1723 static int init_status_page(struct intel_engine_cs *engine) 1724 { 1725 struct drm_i915_gem_object *obj; 1726 struct i915_vma *vma; 1727 unsigned int flags; 1728 void *vaddr; 1729 int ret; 1730 1731 obj = i915_gem_object_create_internal(engine->i915, 4096); 1732 if (IS_ERR(obj)) { 1733 DRM_ERROR("Failed to allocate status page\n"); 1734 return PTR_ERR(obj); 1735 } 1736 1737 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1738 if (ret) 1739 goto err; 1740 1741 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); 1742 if (IS_ERR(vma)) { 1743 ret = PTR_ERR(vma); 1744 goto err; 1745 } 1746 1747 flags = PIN_GLOBAL; 1748 if (!HAS_LLC(engine->i915)) 1749 /* On g33, we cannot place HWS above 256MiB, so 1750 * restrict its pinning to the low mappable arena. 1751 * Though this restriction is not documented for 1752 * gen4, gen5, or byt, they also behave similarly 1753 * and hang if the HWS is placed at the top of the 1754 * GTT. To generalise, it appears that all !llc 1755 * platforms have issues with us placing the HWS 1756 * above the mappable region (even though we never 1757 * actualy map it). 1758 */ 1759 flags |= PIN_MAPPABLE; 1760 ret = i915_vma_pin(vma, 0, 4096, flags); 1761 if (ret) 1762 goto err; 1763 1764 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1765 if (IS_ERR(vaddr)) { 1766 ret = PTR_ERR(vaddr); 1767 goto err_unpin; 1768 } 1769 1770 engine->status_page.vma = vma; 1771 engine->status_page.ggtt_offset = i915_ggtt_offset(vma); 1772 engine->status_page.page_addr = memset(vaddr, 0, 4096); 1773 1774 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1775 engine->name, i915_ggtt_offset(vma)); 1776 return 0; 1777 1778 err_unpin: 1779 i915_vma_unpin(vma); 1780 err: 1781 i915_gem_object_put(obj); 1782 return ret; 1783 } 1784 1785 static int init_phys_status_page(struct intel_engine_cs *engine) 1786 { 1787 struct drm_i915_private *dev_priv = engine->i915; 1788 1789 dev_priv->status_page_dmah = 1790 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); 1791 if (!dev_priv->status_page_dmah) 1792 return -ENOMEM; 1793 1794 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1795 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 1796 1797 return 0; 1798 } 1799 1800 int intel_ring_pin(struct intel_ring *ring) 1801 { 1802 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 1803 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; 1804 enum i915_map_type map; 1805 struct i915_vma *vma = ring->vma; 1806 void *addr; 1807 int ret; 1808 1809 GEM_BUG_ON(ring->vaddr); 1810 1811 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; 1812 1813 if (vma->obj->stolen) 1814 flags |= PIN_MAPPABLE; 1815 1816 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1817 if (flags & PIN_MAPPABLE || map == I915_MAP_WC) 1818 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1819 else 1820 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); 1821 if (unlikely(ret)) 1822 return ret; 1823 } 1824 1825 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); 1826 if (unlikely(ret)) 1827 return ret; 1828 1829 if (i915_vma_is_map_and_fenceable(vma)) 1830 addr = (void __force *)i915_vma_pin_iomap(vma); 1831 else 1832 addr = i915_gem_object_pin_map(vma->obj, map); 1833 if (IS_ERR(addr)) 1834 goto err; 1835 1836 ring->vaddr = addr; 1837 return 0; 1838 1839 err: 1840 i915_vma_unpin(vma); 1841 return PTR_ERR(addr); 1842 } 1843 1844 void intel_ring_unpin(struct intel_ring *ring) 1845 { 1846 GEM_BUG_ON(!ring->vma); 1847 GEM_BUG_ON(!ring->vaddr); 1848 1849 if (i915_vma_is_map_and_fenceable(ring->vma)) 1850 i915_vma_unpin_iomap(ring->vma); 1851 else 1852 i915_gem_object_unpin_map(ring->vma->obj); 1853 ring->vaddr = NULL; 1854 1855 i915_vma_unpin(ring->vma); 1856 } 1857 1858 static struct i915_vma * 1859 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) 1860 { 1861 struct drm_i915_gem_object *obj; 1862 struct i915_vma *vma; 1863 1864 obj = i915_gem_object_create_stolen(&dev_priv->drm, size); 1865 if (!obj) 1866 obj = i915_gem_object_create(&dev_priv->drm, size); 1867 if (IS_ERR(obj)) 1868 return ERR_CAST(obj); 1869 1870 /* mark ring buffers as read-only from GPU side by default */ 1871 obj->gt_ro = 1; 1872 1873 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); 1874 if (IS_ERR(vma)) 1875 goto err; 1876 1877 return vma; 1878 1879 err: 1880 i915_gem_object_put(obj); 1881 return vma; 1882 } 1883 1884 struct intel_ring * 1885 intel_engine_create_ring(struct intel_engine_cs *engine, int size) 1886 { 1887 struct intel_ring *ring; 1888 struct i915_vma *vma; 1889 1890 GEM_BUG_ON(!is_power_of_2(size)); 1891 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 1892 1893 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1894 if (!ring) 1895 return ERR_PTR(-ENOMEM); 1896 1897 ring->engine = engine; 1898 1899 INIT_LIST_HEAD(&ring->request_list); 1900 1901 ring->size = size; 1902 /* Workaround an erratum on the i830 which causes a hang if 1903 * the TAIL pointer points to within the last 2 cachelines 1904 * of the buffer. 1905 */ 1906 ring->effective_size = size; 1907 if (IS_I830(engine->i915) || IS_845G(engine->i915)) 1908 ring->effective_size -= 2 * CACHELINE_BYTES; 1909 1910 ring->last_retired_head = -1; 1911 intel_ring_update_space(ring); 1912 1913 vma = intel_ring_create_vma(engine->i915, size); 1914 if (IS_ERR(vma)) { 1915 kfree(ring); 1916 return ERR_CAST(vma); 1917 } 1918 ring->vma = vma; 1919 1920 return ring; 1921 } 1922 1923 void 1924 intel_ring_free(struct intel_ring *ring) 1925 { 1926 struct drm_i915_gem_object *obj = ring->vma->obj; 1927 1928 i915_vma_close(ring->vma); 1929 __i915_gem_object_release_unless_active(obj); 1930 1931 kfree(ring); 1932 } 1933 1934 static int intel_ring_context_pin(struct i915_gem_context *ctx, 1935 struct intel_engine_cs *engine) 1936 { 1937 struct intel_context *ce = &ctx->engine[engine->id]; 1938 int ret; 1939 1940 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1941 1942 if (ce->pin_count++) 1943 return 0; 1944 1945 if (ce->state) { 1946 struct i915_vma *vma; 1947 1948 vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH); 1949 if (IS_ERR(vma)) { 1950 ret = PTR_ERR(vma); 1951 goto error; 1952 } 1953 } 1954 1955 /* The kernel context is only used as a placeholder for flushing the 1956 * active context. It is never used for submitting user rendering and 1957 * as such never requires the golden render context, and so we can skip 1958 * emitting it when we switch to the kernel context. This is required 1959 * as during eviction we cannot allocate and pin the renderstate in 1960 * order to initialise the context. 1961 */ 1962 if (ctx == ctx->i915->kernel_context) 1963 ce->initialised = true; 1964 1965 i915_gem_context_get(ctx); 1966 return 0; 1967 1968 error: 1969 ce->pin_count = 0; 1970 return ret; 1971 } 1972 1973 static void intel_ring_context_unpin(struct i915_gem_context *ctx, 1974 struct intel_engine_cs *engine) 1975 { 1976 struct intel_context *ce = &ctx->engine[engine->id]; 1977 1978 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1979 1980 if (--ce->pin_count) 1981 return; 1982 1983 if (ce->state) 1984 i915_vma_unpin(ce->state); 1985 1986 i915_gem_context_put(ctx); 1987 } 1988 1989 static int intel_init_ring_buffer(struct intel_engine_cs *engine) 1990 { 1991 struct drm_i915_private *dev_priv = engine->i915; 1992 struct intel_ring *ring; 1993 int ret; 1994 1995 WARN_ON(engine->buffer); 1996 1997 intel_engine_setup_common(engine); 1998 1999 ret = intel_engine_init_common(engine); 2000 if (ret) 2001 goto error; 2002 2003 /* We may need to do things with the shrinker which 2004 * require us to immediately switch back to the default 2005 * context. This can cause a problem as pinning the 2006 * default context also requires GTT space which may not 2007 * be available. To avoid this we always pin the default 2008 * context. 2009 */ 2010 ret = intel_ring_context_pin(dev_priv->kernel_context, engine); 2011 if (ret) 2012 goto error; 2013 2014 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); 2015 if (IS_ERR(ring)) { 2016 ret = PTR_ERR(ring); 2017 goto error; 2018 } 2019 2020 if (HWS_NEEDS_PHYSICAL(dev_priv)) { 2021 WARN_ON(engine->id != RCS); 2022 ret = init_phys_status_page(engine); 2023 if (ret) 2024 goto error; 2025 } else { 2026 ret = init_status_page(engine); 2027 if (ret) 2028 goto error; 2029 } 2030 2031 ret = intel_ring_pin(ring); 2032 if (ret) { 2033 intel_ring_free(ring); 2034 goto error; 2035 } 2036 engine->buffer = ring; 2037 2038 return 0; 2039 2040 error: 2041 intel_engine_cleanup(engine); 2042 return ret; 2043 } 2044 2045 void intel_engine_cleanup(struct intel_engine_cs *engine) 2046 { 2047 struct drm_i915_private *dev_priv; 2048 2049 dev_priv = engine->i915; 2050 2051 if (engine->buffer) { 2052 WARN_ON(INTEL_GEN(dev_priv) > 2 && 2053 (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2054 2055 intel_ring_unpin(engine->buffer); 2056 intel_ring_free(engine->buffer); 2057 engine->buffer = NULL; 2058 } 2059 2060 if (engine->cleanup) 2061 engine->cleanup(engine); 2062 2063 if (HWS_NEEDS_PHYSICAL(dev_priv)) { 2064 WARN_ON(engine->id != RCS); 2065 cleanup_phys_status_page(engine); 2066 } else { 2067 cleanup_status_page(engine); 2068 } 2069 2070 intel_engine_cleanup_common(engine); 2071 2072 intel_ring_context_unpin(dev_priv->kernel_context, engine); 2073 2074 engine->i915 = NULL; 2075 dev_priv->engine[engine->id] = NULL; 2076 kfree(engine); 2077 } 2078 2079 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) 2080 { 2081 struct intel_engine_cs *engine; 2082 enum intel_engine_id id; 2083 2084 for_each_engine(engine, dev_priv, id) { 2085 engine->buffer->head = engine->buffer->tail; 2086 engine->buffer->last_retired_head = -1; 2087 } 2088 } 2089 2090 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2091 { 2092 int ret; 2093 2094 /* Flush enough space to reduce the likelihood of waiting after 2095 * we start building the request - in which case we will just 2096 * have to repeat work. 2097 */ 2098 request->reserved_space += LEGACY_REQUEST_SIZE; 2099 2100 request->ring = request->engine->buffer; 2101 2102 ret = intel_ring_begin(request, 0); 2103 if (ret) 2104 return ret; 2105 2106 request->reserved_space -= LEGACY_REQUEST_SIZE; 2107 return 0; 2108 } 2109 2110 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2111 { 2112 struct intel_ring *ring = req->ring; 2113 struct drm_i915_gem_request *target; 2114 long timeout; 2115 2116 lockdep_assert_held(&req->i915->drm.struct_mutex); 2117 2118 intel_ring_update_space(ring); 2119 if (ring->space >= bytes) 2120 return 0; 2121 2122 /* 2123 * Space is reserved in the ringbuffer for finalising the request, 2124 * as that cannot be allowed to fail. During request finalisation, 2125 * reserved_space is set to 0 to stop the overallocation and the 2126 * assumption is that then we never need to wait (which has the 2127 * risk of failing with EINTR). 2128 * 2129 * See also i915_gem_request_alloc() and i915_add_request(). 2130 */ 2131 GEM_BUG_ON(!req->reserved_space); 2132 2133 list_for_each_entry(target, &ring->request_list, ring_link) { 2134 unsigned space; 2135 2136 /* Would completion of this request free enough space? */ 2137 space = __intel_ring_space(target->postfix, ring->tail, 2138 ring->size); 2139 if (space >= bytes) 2140 break; 2141 } 2142 2143 if (WARN_ON(&target->ring_link == &ring->request_list)) 2144 return -ENOSPC; 2145 2146 timeout = i915_wait_request(target, 2147 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 2148 MAX_SCHEDULE_TIMEOUT); 2149 if (timeout < 0) 2150 return timeout; 2151 2152 i915_gem_request_retire_upto(target); 2153 2154 intel_ring_update_space(ring); 2155 GEM_BUG_ON(ring->space < bytes); 2156 return 0; 2157 } 2158 2159 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2160 { 2161 struct intel_ring *ring = req->ring; 2162 int remain_actual = ring->size - ring->tail; 2163 int remain_usable = ring->effective_size - ring->tail; 2164 int bytes = num_dwords * sizeof(u32); 2165 int total_bytes, wait_bytes; 2166 bool need_wrap = false; 2167 2168 total_bytes = bytes + req->reserved_space; 2169 2170 if (unlikely(bytes > remain_usable)) { 2171 /* 2172 * Not enough space for the basic request. So need to flush 2173 * out the remainder and then wait for base + reserved. 2174 */ 2175 wait_bytes = remain_actual + total_bytes; 2176 need_wrap = true; 2177 } else if (unlikely(total_bytes > remain_usable)) { 2178 /* 2179 * The base request will fit but the reserved space 2180 * falls off the end. So we don't need an immediate wrap 2181 * and only need to effectively wait for the reserved 2182 * size space from the start of ringbuffer. 2183 */ 2184 wait_bytes = remain_actual + req->reserved_space; 2185 } else { 2186 /* No wrapping required, just waiting. */ 2187 wait_bytes = total_bytes; 2188 } 2189 2190 if (wait_bytes > ring->space) { 2191 int ret = wait_for_space(req, wait_bytes); 2192 if (unlikely(ret)) 2193 return ret; 2194 } 2195 2196 if (unlikely(need_wrap)) { 2197 GEM_BUG_ON(remain_actual > ring->space); 2198 GEM_BUG_ON(ring->tail + remain_actual > ring->size); 2199 2200 /* Fill the tail with MI_NOOP */ 2201 memset(ring->vaddr + ring->tail, 0, remain_actual); 2202 ring->tail = 0; 2203 ring->space -= remain_actual; 2204 } 2205 2206 ring->space -= bytes; 2207 GEM_BUG_ON(ring->space < 0); 2208 return 0; 2209 } 2210 2211 /* Align the ring tail to a cacheline boundary */ 2212 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2213 { 2214 struct intel_ring *ring = req->ring; 2215 int num_dwords = 2216 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2217 int ret; 2218 2219 if (num_dwords == 0) 2220 return 0; 2221 2222 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2223 ret = intel_ring_begin(req, num_dwords); 2224 if (ret) 2225 return ret; 2226 2227 while (num_dwords--) 2228 intel_ring_emit(ring, MI_NOOP); 2229 2230 intel_ring_advance(ring); 2231 2232 return 0; 2233 } 2234 2235 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) 2236 { 2237 struct drm_i915_private *dev_priv = request->i915; 2238 2239 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2240 2241 /* Every tail move must follow the sequence below */ 2242 2243 /* Disable notification that the ring is IDLE. The GT 2244 * will then assume that it is busy and bring it out of rc6. 2245 */ 2246 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2247 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2248 2249 /* Clear the context id. Here be magic! */ 2250 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 2251 2252 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2253 if (intel_wait_for_register_fw(dev_priv, 2254 GEN6_BSD_SLEEP_PSMI_CONTROL, 2255 GEN6_BSD_SLEEP_INDICATOR, 2256 0, 2257 50)) 2258 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2259 2260 /* Now that the ring is fully powered up, update the tail */ 2261 i9xx_submit_request(request); 2262 2263 /* Let the ring send IDLE messages to the GT again, 2264 * and so let it sleep to conserve power when idle. 2265 */ 2266 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2267 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2268 2269 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2270 } 2271 2272 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 2273 { 2274 struct intel_ring *ring = req->ring; 2275 uint32_t cmd; 2276 int ret; 2277 2278 ret = intel_ring_begin(req, 4); 2279 if (ret) 2280 return ret; 2281 2282 cmd = MI_FLUSH_DW; 2283 if (INTEL_GEN(req->i915) >= 8) 2284 cmd += 1; 2285 2286 /* We always require a command barrier so that subsequent 2287 * commands, such as breadcrumb interrupts, are strictly ordered 2288 * wrt the contents of the write cache being flushed to memory 2289 * (and thus being coherent from the CPU). 2290 */ 2291 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2292 2293 /* 2294 * Bspec vol 1c.5 - video engine command streamer: 2295 * "If ENABLED, all TLBs will be invalidated once the flush 2296 * operation is complete. This bit is only valid when the 2297 * Post-Sync Operation field is a value of 1h or 3h." 2298 */ 2299 if (mode & EMIT_INVALIDATE) 2300 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2301 2302 intel_ring_emit(ring, cmd); 2303 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2304 if (INTEL_GEN(req->i915) >= 8) { 2305 intel_ring_emit(ring, 0); /* upper addr */ 2306 intel_ring_emit(ring, 0); /* value */ 2307 } else { 2308 intel_ring_emit(ring, 0); 2309 intel_ring_emit(ring, MI_NOOP); 2310 } 2311 intel_ring_advance(ring); 2312 return 0; 2313 } 2314 2315 static int 2316 gen8_emit_bb_start(struct drm_i915_gem_request *req, 2317 u64 offset, u32 len, 2318 unsigned int dispatch_flags) 2319 { 2320 struct intel_ring *ring = req->ring; 2321 bool ppgtt = USES_PPGTT(req->i915) && 2322 !(dispatch_flags & I915_DISPATCH_SECURE); 2323 int ret; 2324 2325 ret = intel_ring_begin(req, 4); 2326 if (ret) 2327 return ret; 2328 2329 /* FIXME(BDW): Address space and security selectors. */ 2330 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2331 (dispatch_flags & I915_DISPATCH_RS ? 2332 MI_BATCH_RESOURCE_STREAMER : 0)); 2333 intel_ring_emit(ring, lower_32_bits(offset)); 2334 intel_ring_emit(ring, upper_32_bits(offset)); 2335 intel_ring_emit(ring, MI_NOOP); 2336 intel_ring_advance(ring); 2337 2338 return 0; 2339 } 2340 2341 static int 2342 hsw_emit_bb_start(struct drm_i915_gem_request *req, 2343 u64 offset, u32 len, 2344 unsigned int dispatch_flags) 2345 { 2346 struct intel_ring *ring = req->ring; 2347 int ret; 2348 2349 ret = intel_ring_begin(req, 2); 2350 if (ret) 2351 return ret; 2352 2353 intel_ring_emit(ring, 2354 MI_BATCH_BUFFER_START | 2355 (dispatch_flags & I915_DISPATCH_SECURE ? 2356 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2357 (dispatch_flags & I915_DISPATCH_RS ? 2358 MI_BATCH_RESOURCE_STREAMER : 0)); 2359 /* bit0-7 is the length on GEN6+ */ 2360 intel_ring_emit(ring, offset); 2361 intel_ring_advance(ring); 2362 2363 return 0; 2364 } 2365 2366 static int 2367 gen6_emit_bb_start(struct drm_i915_gem_request *req, 2368 u64 offset, u32 len, 2369 unsigned int dispatch_flags) 2370 { 2371 struct intel_ring *ring = req->ring; 2372 int ret; 2373 2374 ret = intel_ring_begin(req, 2); 2375 if (ret) 2376 return ret; 2377 2378 intel_ring_emit(ring, 2379 MI_BATCH_BUFFER_START | 2380 (dispatch_flags & I915_DISPATCH_SECURE ? 2381 0 : MI_BATCH_NON_SECURE_I965)); 2382 /* bit0-7 is the length on GEN6+ */ 2383 intel_ring_emit(ring, offset); 2384 intel_ring_advance(ring); 2385 2386 return 0; 2387 } 2388 2389 /* Blitter support (SandyBridge+) */ 2390 2391 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) 2392 { 2393 struct intel_ring *ring = req->ring; 2394 uint32_t cmd; 2395 int ret; 2396 2397 ret = intel_ring_begin(req, 4); 2398 if (ret) 2399 return ret; 2400 2401 cmd = MI_FLUSH_DW; 2402 if (INTEL_GEN(req->i915) >= 8) 2403 cmd += 1; 2404 2405 /* We always require a command barrier so that subsequent 2406 * commands, such as breadcrumb interrupts, are strictly ordered 2407 * wrt the contents of the write cache being flushed to memory 2408 * (and thus being coherent from the CPU). 2409 */ 2410 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2411 2412 /* 2413 * Bspec vol 1c.3 - blitter engine command streamer: 2414 * "If ENABLED, all TLBs will be invalidated once the flush 2415 * operation is complete. This bit is only valid when the 2416 * Post-Sync Operation field is a value of 1h or 3h." 2417 */ 2418 if (mode & EMIT_INVALIDATE) 2419 cmd |= MI_INVALIDATE_TLB; 2420 intel_ring_emit(ring, cmd); 2421 intel_ring_emit(ring, 2422 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2423 if (INTEL_GEN(req->i915) >= 8) { 2424 intel_ring_emit(ring, 0); /* upper addr */ 2425 intel_ring_emit(ring, 0); /* value */ 2426 } else { 2427 intel_ring_emit(ring, 0); 2428 intel_ring_emit(ring, MI_NOOP); 2429 } 2430 intel_ring_advance(ring); 2431 2432 return 0; 2433 } 2434 2435 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, 2436 struct intel_engine_cs *engine) 2437 { 2438 struct drm_i915_gem_object *obj; 2439 int ret, i; 2440 2441 if (!i915.semaphores) 2442 return; 2443 2444 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { 2445 struct i915_vma *vma; 2446 2447 obj = i915_gem_object_create(&dev_priv->drm, 4096); 2448 if (IS_ERR(obj)) 2449 goto err; 2450 2451 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); 2452 if (IS_ERR(vma)) 2453 goto err_obj; 2454 2455 ret = i915_gem_object_set_to_gtt_domain(obj, false); 2456 if (ret) 2457 goto err_obj; 2458 2459 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 2460 if (ret) 2461 goto err_obj; 2462 2463 dev_priv->semaphore = vma; 2464 } 2465 2466 if (INTEL_GEN(dev_priv) >= 8) { 2467 u32 offset = i915_ggtt_offset(dev_priv->semaphore); 2468 2469 engine->semaphore.sync_to = gen8_ring_sync_to; 2470 engine->semaphore.signal = gen8_xcs_signal; 2471 2472 for (i = 0; i < I915_NUM_ENGINES; i++) { 2473 u32 ring_offset; 2474 2475 if (i != engine->id) 2476 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); 2477 else 2478 ring_offset = MI_SEMAPHORE_SYNC_INVALID; 2479 2480 engine->semaphore.signal_ggtt[i] = ring_offset; 2481 } 2482 } else if (INTEL_GEN(dev_priv) >= 6) { 2483 engine->semaphore.sync_to = gen6_ring_sync_to; 2484 engine->semaphore.signal = gen6_signal; 2485 2486 /* 2487 * The current semaphore is only applied on pre-gen8 2488 * platform. And there is no VCS2 ring on the pre-gen8 2489 * platform. So the semaphore between RCS and VCS2 is 2490 * initialized as INVALID. Gen8 will initialize the 2491 * sema between VCS2 and RCS later. 2492 */ 2493 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { 2494 static const struct { 2495 u32 wait_mbox; 2496 i915_reg_t mbox_reg; 2497 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { 2498 [RCS_HW] = { 2499 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, 2500 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, 2501 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, 2502 }, 2503 [VCS_HW] = { 2504 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, 2505 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, 2506 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, 2507 }, 2508 [BCS_HW] = { 2509 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, 2510 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, 2511 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, 2512 }, 2513 [VECS_HW] = { 2514 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, 2515 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, 2516 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, 2517 }, 2518 }; 2519 u32 wait_mbox; 2520 i915_reg_t mbox_reg; 2521 2522 if (i == engine->hw_id) { 2523 wait_mbox = MI_SEMAPHORE_SYNC_INVALID; 2524 mbox_reg = GEN6_NOSYNC; 2525 } else { 2526 wait_mbox = sem_data[engine->hw_id][i].wait_mbox; 2527 mbox_reg = sem_data[engine->hw_id][i].mbox_reg; 2528 } 2529 2530 engine->semaphore.mbox.wait[i] = wait_mbox; 2531 engine->semaphore.mbox.signal[i] = mbox_reg; 2532 } 2533 } 2534 2535 return; 2536 2537 err_obj: 2538 i915_gem_object_put(obj); 2539 err: 2540 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); 2541 i915.semaphores = 0; 2542 } 2543 2544 static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2545 struct intel_engine_cs *engine) 2546 { 2547 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift; 2548 2549 if (INTEL_GEN(dev_priv) >= 8) { 2550 engine->irq_enable = gen8_irq_enable; 2551 engine->irq_disable = gen8_irq_disable; 2552 engine->irq_seqno_barrier = gen6_seqno_barrier; 2553 } else if (INTEL_GEN(dev_priv) >= 6) { 2554 engine->irq_enable = gen6_irq_enable; 2555 engine->irq_disable = gen6_irq_disable; 2556 engine->irq_seqno_barrier = gen6_seqno_barrier; 2557 } else if (INTEL_GEN(dev_priv) >= 5) { 2558 engine->irq_enable = gen5_irq_enable; 2559 engine->irq_disable = gen5_irq_disable; 2560 engine->irq_seqno_barrier = gen5_seqno_barrier; 2561 } else if (INTEL_GEN(dev_priv) >= 3) { 2562 engine->irq_enable = i9xx_irq_enable; 2563 engine->irq_disable = i9xx_irq_disable; 2564 } else { 2565 engine->irq_enable = i8xx_irq_enable; 2566 engine->irq_disable = i8xx_irq_disable; 2567 } 2568 } 2569 2570 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2571 struct intel_engine_cs *engine) 2572 { 2573 intel_ring_init_irq(dev_priv, engine); 2574 intel_ring_init_semaphores(dev_priv, engine); 2575 2576 engine->init_hw = init_ring_common; 2577 engine->reset_hw = reset_ring_common; 2578 2579 engine->emit_breadcrumb = i9xx_emit_breadcrumb; 2580 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; 2581 if (i915.semaphores) { 2582 int num_rings; 2583 2584 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; 2585 2586 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; 2587 if (INTEL_GEN(dev_priv) >= 8) { 2588 engine->emit_breadcrumb_sz += num_rings * 6; 2589 } else { 2590 engine->emit_breadcrumb_sz += num_rings * 3; 2591 if (num_rings & 1) 2592 engine->emit_breadcrumb_sz++; 2593 } 2594 } 2595 engine->submit_request = i9xx_submit_request; 2596 2597 if (INTEL_GEN(dev_priv) >= 8) 2598 engine->emit_bb_start = gen8_emit_bb_start; 2599 else if (INTEL_GEN(dev_priv) >= 6) 2600 engine->emit_bb_start = gen6_emit_bb_start; 2601 else if (INTEL_GEN(dev_priv) >= 4) 2602 engine->emit_bb_start = i965_emit_bb_start; 2603 else if (IS_I830(dev_priv) || IS_845G(dev_priv)) 2604 engine->emit_bb_start = i830_emit_bb_start; 2605 else 2606 engine->emit_bb_start = i915_emit_bb_start; 2607 } 2608 2609 int intel_init_render_ring_buffer(struct intel_engine_cs *engine) 2610 { 2611 struct drm_i915_private *dev_priv = engine->i915; 2612 int ret; 2613 2614 intel_ring_default_vfuncs(dev_priv, engine); 2615 2616 if (HAS_L3_DPF(dev_priv)) 2617 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2618 2619 if (INTEL_GEN(dev_priv) >= 8) { 2620 engine->init_context = intel_rcs_ctx_init; 2621 engine->emit_breadcrumb = gen8_render_emit_breadcrumb; 2622 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; 2623 engine->emit_flush = gen8_render_ring_flush; 2624 if (i915.semaphores) { 2625 int num_rings; 2626 2627 engine->semaphore.signal = gen8_rcs_signal; 2628 2629 num_rings = 2630 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; 2631 engine->emit_breadcrumb_sz += num_rings * 6; 2632 } 2633 } else if (INTEL_GEN(dev_priv) >= 6) { 2634 engine->init_context = intel_rcs_ctx_init; 2635 engine->emit_flush = gen7_render_ring_flush; 2636 if (IS_GEN6(dev_priv)) 2637 engine->emit_flush = gen6_render_ring_flush; 2638 } else if (IS_GEN5(dev_priv)) { 2639 engine->emit_flush = gen4_render_ring_flush; 2640 } else { 2641 if (INTEL_GEN(dev_priv) < 4) 2642 engine->emit_flush = gen2_render_ring_flush; 2643 else 2644 engine->emit_flush = gen4_render_ring_flush; 2645 engine->irq_enable_mask = I915_USER_INTERRUPT; 2646 } 2647 2648 if (IS_HASWELL(dev_priv)) 2649 engine->emit_bb_start = hsw_emit_bb_start; 2650 2651 engine->init_hw = init_render_ring; 2652 engine->cleanup = render_ring_cleanup; 2653 2654 ret = intel_init_ring_buffer(engine); 2655 if (ret) 2656 return ret; 2657 2658 if (INTEL_GEN(dev_priv) >= 6) { 2659 ret = intel_engine_create_scratch(engine, 4096); 2660 if (ret) 2661 return ret; 2662 } else if (HAS_BROKEN_CS_TLB(dev_priv)) { 2663 ret = intel_engine_create_scratch(engine, I830_WA_SIZE); 2664 if (ret) 2665 return ret; 2666 } 2667 2668 return 0; 2669 } 2670 2671 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) 2672 { 2673 struct drm_i915_private *dev_priv = engine->i915; 2674 2675 intel_ring_default_vfuncs(dev_priv, engine); 2676 2677 if (INTEL_GEN(dev_priv) >= 6) { 2678 /* gen6 bsd needs a special wa for tail updates */ 2679 if (IS_GEN6(dev_priv)) 2680 engine->submit_request = gen6_bsd_submit_request; 2681 engine->emit_flush = gen6_bsd_ring_flush; 2682 if (INTEL_GEN(dev_priv) < 8) 2683 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2684 } else { 2685 engine->mmio_base = BSD_RING_BASE; 2686 engine->emit_flush = bsd_ring_flush; 2687 if (IS_GEN5(dev_priv)) 2688 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2689 else 2690 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2691 } 2692 2693 return intel_init_ring_buffer(engine); 2694 } 2695 2696 /** 2697 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 2698 */ 2699 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine) 2700 { 2701 struct drm_i915_private *dev_priv = engine->i915; 2702 2703 intel_ring_default_vfuncs(dev_priv, engine); 2704 2705 engine->emit_flush = gen6_bsd_ring_flush; 2706 2707 return intel_init_ring_buffer(engine); 2708 } 2709 2710 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) 2711 { 2712 struct drm_i915_private *dev_priv = engine->i915; 2713 2714 intel_ring_default_vfuncs(dev_priv, engine); 2715 2716 engine->emit_flush = gen6_ring_flush; 2717 if (INTEL_GEN(dev_priv) < 8) 2718 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2719 2720 return intel_init_ring_buffer(engine); 2721 } 2722 2723 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) 2724 { 2725 struct drm_i915_private *dev_priv = engine->i915; 2726 2727 intel_ring_default_vfuncs(dev_priv, engine); 2728 2729 engine->emit_flush = gen6_ring_flush; 2730 2731 if (INTEL_GEN(dev_priv) < 8) { 2732 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2733 engine->irq_enable = hsw_vebox_irq_enable; 2734 engine->irq_disable = hsw_vebox_irq_disable; 2735 } 2736 2737 return intel_init_ring_buffer(engine); 2738 } 2739