1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 /* Rough estimate of the typical request size, performing a flush, 38 * set-context and then emitting the batch. 39 */ 40 #define LEGACY_REQUEST_SIZE 200 41 42 int __intel_ring_space(int head, int tail, int size) 43 { 44 int space = head - tail; 45 if (space <= 0) 46 space += size; 47 return space - I915_RING_FREE_SPACE; 48 } 49 50 void intel_ring_update_space(struct intel_ring *ring) 51 { 52 if (ring->last_retired_head != -1) { 53 ring->head = ring->last_retired_head; 54 ring->last_retired_head = -1; 55 } 56 57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR, 58 ring->tail, ring->size); 59 } 60 61 static int 62 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 63 { 64 struct intel_ring *ring = req->ring; 65 u32 cmd; 66 int ret; 67 68 cmd = MI_FLUSH; 69 70 if (mode & EMIT_INVALIDATE) 71 cmd |= MI_READ_FLUSH; 72 73 ret = intel_ring_begin(req, 2); 74 if (ret) 75 return ret; 76 77 intel_ring_emit(ring, cmd); 78 intel_ring_emit(ring, MI_NOOP); 79 intel_ring_advance(ring); 80 81 return 0; 82 } 83 84 static int 85 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 86 { 87 struct intel_ring *ring = req->ring; 88 u32 cmd; 89 int ret; 90 91 /* 92 * read/write caches: 93 * 94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 96 * also flushed at 2d versus 3d pipeline switches. 97 * 98 * read-only caches: 99 * 100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 101 * MI_READ_FLUSH is set, and is always flushed on 965. 102 * 103 * I915_GEM_DOMAIN_COMMAND may not exist? 104 * 105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 106 * invalidated when MI_EXE_FLUSH is set. 107 * 108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 109 * invalidated with every MI_FLUSH. 110 * 111 * TLBs: 112 * 113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 116 * are flushed at any MI_FLUSH. 117 */ 118 119 cmd = MI_FLUSH; 120 if (mode & EMIT_INVALIDATE) { 121 cmd |= MI_EXE_FLUSH; 122 if (IS_G4X(req->i915) || IS_GEN5(req->i915)) 123 cmd |= MI_INVALIDATE_ISP; 124 } 125 126 ret = intel_ring_begin(req, 2); 127 if (ret) 128 return ret; 129 130 intel_ring_emit(ring, cmd); 131 intel_ring_emit(ring, MI_NOOP); 132 intel_ring_advance(ring); 133 134 return 0; 135 } 136 137 /** 138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 139 * implementing two workarounds on gen6. From section 1.4.7.1 140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 141 * 142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 143 * produced by non-pipelined state commands), software needs to first 144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 145 * 0. 146 * 147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 149 * 150 * And the workaround for these two requires this workaround first: 151 * 152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 153 * BEFORE the pipe-control with a post-sync op and no write-cache 154 * flushes. 155 * 156 * And this last workaround is tricky because of the requirements on 157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 158 * volume 2 part 1: 159 * 160 * "1 of the following must also be set: 161 * - Render Target Cache Flush Enable ([12] of DW1) 162 * - Depth Cache Flush Enable ([0] of DW1) 163 * - Stall at Pixel Scoreboard ([1] of DW1) 164 * - Depth Stall ([13] of DW1) 165 * - Post-Sync Operation ([13] of DW1) 166 * - Notify Enable ([8] of DW1)" 167 * 168 * The cache flushes require the workaround flush that triggered this 169 * one, so we can't use it. Depth stall would trigger the same. 170 * Post-sync nonzero is what triggered this second workaround, so we 171 * can't use that one either. Notify enable is IRQs, which aren't 172 * really our business. That leaves only stall at scoreboard. 173 */ 174 static int 175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 176 { 177 struct intel_ring *ring = req->ring; 178 u32 scratch_addr = 179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 180 int ret; 181 182 ret = intel_ring_begin(req, 6); 183 if (ret) 184 return ret; 185 186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 188 PIPE_CONTROL_STALL_AT_SCOREBOARD); 189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 190 intel_ring_emit(ring, 0); /* low dword */ 191 intel_ring_emit(ring, 0); /* high dword */ 192 intel_ring_emit(ring, MI_NOOP); 193 intel_ring_advance(ring); 194 195 ret = intel_ring_begin(req, 6); 196 if (ret) 197 return ret; 198 199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 202 intel_ring_emit(ring, 0); 203 intel_ring_emit(ring, 0); 204 intel_ring_emit(ring, MI_NOOP); 205 intel_ring_advance(ring); 206 207 return 0; 208 } 209 210 static int 211 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 212 { 213 struct intel_ring *ring = req->ring; 214 u32 scratch_addr = 215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 216 u32 flags = 0; 217 int ret; 218 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ 220 ret = intel_emit_post_sync_nonzero_flush(req); 221 if (ret) 222 return ret; 223 224 /* Just flush everything. Experiments have shown that reducing the 225 * number of bits based on the write domains has little performance 226 * impact. 227 */ 228 if (mode & EMIT_FLUSH) { 229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 231 /* 232 * Ensure that any following seqno writes only happen 233 * when the render cache is indeed flushed. 234 */ 235 flags |= PIPE_CONTROL_CS_STALL; 236 } 237 if (mode & EMIT_INVALIDATE) { 238 flags |= PIPE_CONTROL_TLB_INVALIDATE; 239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 244 /* 245 * TLB invalidate requires a post-sync write. 246 */ 247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 248 } 249 250 ret = intel_ring_begin(req, 4); 251 if (ret) 252 return ret; 253 254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 255 intel_ring_emit(ring, flags); 256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 257 intel_ring_emit(ring, 0); 258 intel_ring_advance(ring); 259 260 return 0; 261 } 262 263 static int 264 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 265 { 266 struct intel_ring *ring = req->ring; 267 int ret; 268 269 ret = intel_ring_begin(req, 4); 270 if (ret) 271 return ret; 272 273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 274 intel_ring_emit(ring, 275 PIPE_CONTROL_CS_STALL | 276 PIPE_CONTROL_STALL_AT_SCOREBOARD); 277 intel_ring_emit(ring, 0); 278 intel_ring_emit(ring, 0); 279 intel_ring_advance(ring); 280 281 return 0; 282 } 283 284 static int 285 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 286 { 287 struct intel_ring *ring = req->ring; 288 u32 scratch_addr = 289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 290 u32 flags = 0; 291 int ret; 292 293 /* 294 * Ensure that any following seqno writes only happen when the render 295 * cache is indeed flushed. 296 * 297 * Workaround: 4th PIPE_CONTROL command (except the ones with only 298 * read-cache invalidate bits set) must have the CS_STALL bit set. We 299 * don't try to be clever and just set it unconditionally. 300 */ 301 flags |= PIPE_CONTROL_CS_STALL; 302 303 /* Just flush everything. Experiments have shown that reducing the 304 * number of bits based on the write domains has little performance 305 * impact. 306 */ 307 if (mode & EMIT_FLUSH) { 308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 311 flags |= PIPE_CONTROL_FLUSH_ENABLE; 312 } 313 if (mode & EMIT_INVALIDATE) { 314 flags |= PIPE_CONTROL_TLB_INVALIDATE; 315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 321 /* 322 * TLB invalidate requires a post-sync write. 323 */ 324 flags |= PIPE_CONTROL_QW_WRITE; 325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 326 327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 328 329 /* Workaround: we must issue a pipe_control with CS-stall bit 330 * set before a pipe_control command that has the state cache 331 * invalidate bit set. */ 332 gen7_render_ring_cs_stall_wa(req); 333 } 334 335 ret = intel_ring_begin(req, 4); 336 if (ret) 337 return ret; 338 339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 340 intel_ring_emit(ring, flags); 341 intel_ring_emit(ring, scratch_addr); 342 intel_ring_emit(ring, 0); 343 intel_ring_advance(ring); 344 345 return 0; 346 } 347 348 static int 349 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 350 u32 flags, u32 scratch_addr) 351 { 352 struct intel_ring *ring = req->ring; 353 int ret; 354 355 ret = intel_ring_begin(req, 6); 356 if (ret) 357 return ret; 358 359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 360 intel_ring_emit(ring, flags); 361 intel_ring_emit(ring, scratch_addr); 362 intel_ring_emit(ring, 0); 363 intel_ring_emit(ring, 0); 364 intel_ring_emit(ring, 0); 365 intel_ring_advance(ring); 366 367 return 0; 368 } 369 370 static int 371 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 372 { 373 u32 scratch_addr = 374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 375 u32 flags = 0; 376 int ret; 377 378 flags |= PIPE_CONTROL_CS_STALL; 379 380 if (mode & EMIT_FLUSH) { 381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 384 flags |= PIPE_CONTROL_FLUSH_ENABLE; 385 } 386 if (mode & EMIT_INVALIDATE) { 387 flags |= PIPE_CONTROL_TLB_INVALIDATE; 388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 393 flags |= PIPE_CONTROL_QW_WRITE; 394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 395 396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 397 ret = gen8_emit_pipe_control(req, 398 PIPE_CONTROL_CS_STALL | 399 PIPE_CONTROL_STALL_AT_SCOREBOARD, 400 0); 401 if (ret) 402 return ret; 403 } 404 405 return gen8_emit_pipe_control(req, flags, scratch_addr); 406 } 407 408 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 409 { 410 struct drm_i915_private *dev_priv = engine->i915; 411 u32 addr; 412 413 addr = dev_priv->status_page_dmah->busaddr; 414 if (INTEL_GEN(dev_priv) >= 4) 415 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 416 I915_WRITE(HWS_PGA, addr); 417 } 418 419 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 420 { 421 struct drm_i915_private *dev_priv = engine->i915; 422 i915_reg_t mmio; 423 424 /* The ring status page addresses are no longer next to the rest of 425 * the ring registers as of gen7. 426 */ 427 if (IS_GEN7(dev_priv)) { 428 switch (engine->id) { 429 case RCS: 430 mmio = RENDER_HWS_PGA_GEN7; 431 break; 432 case BCS: 433 mmio = BLT_HWS_PGA_GEN7; 434 break; 435 /* 436 * VCS2 actually doesn't exist on Gen7. Only shut up 437 * gcc switch check warning 438 */ 439 case VCS2: 440 case VCS: 441 mmio = BSD_HWS_PGA_GEN7; 442 break; 443 case VECS: 444 mmio = VEBOX_HWS_PGA_GEN7; 445 break; 446 } 447 } else if (IS_GEN6(dev_priv)) { 448 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 449 } else { 450 /* XXX: gen8 returns to sanity */ 451 mmio = RING_HWS_PGA(engine->mmio_base); 452 } 453 454 I915_WRITE(mmio, engine->status_page.ggtt_offset); 455 POSTING_READ(mmio); 456 457 /* 458 * Flush the TLB for this page 459 * 460 * FIXME: These two bits have disappeared on gen8, so a question 461 * arises: do we still need this and if so how should we go about 462 * invalidating the TLB? 463 */ 464 if (IS_GEN(dev_priv, 6, 7)) { 465 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 466 467 /* ring should be idle before issuing a sync flush*/ 468 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 469 470 I915_WRITE(reg, 471 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 472 INSTPM_SYNC_FLUSH)); 473 if (intel_wait_for_register(dev_priv, 474 reg, INSTPM_SYNC_FLUSH, 0, 475 1000)) 476 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 477 engine->name); 478 } 479 } 480 481 static bool stop_ring(struct intel_engine_cs *engine) 482 { 483 struct drm_i915_private *dev_priv = engine->i915; 484 485 if (INTEL_GEN(dev_priv) > 2) { 486 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 487 if (intel_wait_for_register(dev_priv, 488 RING_MI_MODE(engine->mmio_base), 489 MODE_IDLE, 490 MODE_IDLE, 491 1000)) { 492 DRM_ERROR("%s : timed out trying to stop ring\n", 493 engine->name); 494 /* Sometimes we observe that the idle flag is not 495 * set even though the ring is empty. So double 496 * check before giving up. 497 */ 498 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 499 return false; 500 } 501 } 502 503 I915_WRITE_CTL(engine, 0); 504 I915_WRITE_HEAD(engine, 0); 505 I915_WRITE_TAIL(engine, 0); 506 507 if (INTEL_GEN(dev_priv) > 2) { 508 (void)I915_READ_CTL(engine); 509 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 510 } 511 512 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 513 } 514 515 static int init_ring_common(struct intel_engine_cs *engine) 516 { 517 struct drm_i915_private *dev_priv = engine->i915; 518 struct intel_ring *ring = engine->buffer; 519 int ret = 0; 520 521 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 522 523 if (!stop_ring(engine)) { 524 /* G45 ring initialization often fails to reset head to zero */ 525 DRM_DEBUG_KMS("%s head not reset to zero " 526 "ctl %08x head %08x tail %08x start %08x\n", 527 engine->name, 528 I915_READ_CTL(engine), 529 I915_READ_HEAD(engine), 530 I915_READ_TAIL(engine), 531 I915_READ_START(engine)); 532 533 if (!stop_ring(engine)) { 534 DRM_ERROR("failed to set %s head to zero " 535 "ctl %08x head %08x tail %08x start %08x\n", 536 engine->name, 537 I915_READ_CTL(engine), 538 I915_READ_HEAD(engine), 539 I915_READ_TAIL(engine), 540 I915_READ_START(engine)); 541 ret = -EIO; 542 goto out; 543 } 544 } 545 546 if (HWS_NEEDS_PHYSICAL(dev_priv)) 547 ring_setup_phys_status_page(engine); 548 else 549 intel_ring_setup_status_page(engine); 550 551 intel_engine_reset_breadcrumbs(engine); 552 553 /* Enforce ordering by reading HEAD register back */ 554 I915_READ_HEAD(engine); 555 556 /* Initialize the ring. This must happen _after_ we've cleared the ring 557 * registers with the above sequence (the readback of the HEAD registers 558 * also enforces ordering), otherwise the hw might lose the new ring 559 * register values. */ 560 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); 561 562 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 563 if (I915_READ_HEAD(engine)) 564 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 565 engine->name, I915_READ_HEAD(engine)); 566 567 intel_ring_update_space(ring); 568 I915_WRITE_HEAD(engine, ring->head); 569 I915_WRITE_TAIL(engine, ring->tail); 570 (void)I915_READ_TAIL(engine); 571 572 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); 573 574 /* If the head is still not zero, the ring is dead */ 575 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base), 576 RING_VALID, RING_VALID, 577 50)) { 578 DRM_ERROR("%s initialization failed " 579 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 580 engine->name, 581 I915_READ_CTL(engine), 582 I915_READ_CTL(engine) & RING_VALID, 583 I915_READ_HEAD(engine), ring->head, 584 I915_READ_TAIL(engine), ring->tail, 585 I915_READ_START(engine), 586 i915_ggtt_offset(ring->vma)); 587 ret = -EIO; 588 goto out; 589 } 590 591 intel_engine_init_hangcheck(engine); 592 593 out: 594 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 595 596 return ret; 597 } 598 599 static void reset_ring_common(struct intel_engine_cs *engine, 600 struct drm_i915_gem_request *request) 601 { 602 struct intel_ring *ring = request->ring; 603 604 ring->head = request->postfix; 605 ring->last_retired_head = -1; 606 } 607 608 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 609 { 610 struct intel_ring *ring = req->ring; 611 struct i915_workarounds *w = &req->i915->workarounds; 612 int ret, i; 613 614 if (w->count == 0) 615 return 0; 616 617 ret = req->engine->emit_flush(req, EMIT_BARRIER); 618 if (ret) 619 return ret; 620 621 ret = intel_ring_begin(req, (w->count * 2 + 2)); 622 if (ret) 623 return ret; 624 625 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 626 for (i = 0; i < w->count; i++) { 627 intel_ring_emit_reg(ring, w->reg[i].addr); 628 intel_ring_emit(ring, w->reg[i].value); 629 } 630 intel_ring_emit(ring, MI_NOOP); 631 632 intel_ring_advance(ring); 633 634 ret = req->engine->emit_flush(req, EMIT_BARRIER); 635 if (ret) 636 return ret; 637 638 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 639 640 return 0; 641 } 642 643 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 644 { 645 int ret; 646 647 ret = intel_ring_workarounds_emit(req); 648 if (ret != 0) 649 return ret; 650 651 ret = i915_gem_render_state_init(req); 652 if (ret) 653 return ret; 654 655 return 0; 656 } 657 658 static int wa_add(struct drm_i915_private *dev_priv, 659 i915_reg_t addr, 660 const u32 mask, const u32 val) 661 { 662 const u32 idx = dev_priv->workarounds.count; 663 664 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 665 return -ENOSPC; 666 667 dev_priv->workarounds.reg[idx].addr = addr; 668 dev_priv->workarounds.reg[idx].value = val; 669 dev_priv->workarounds.reg[idx].mask = mask; 670 671 dev_priv->workarounds.count++; 672 673 return 0; 674 } 675 676 #define WA_REG(addr, mask, val) do { \ 677 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 678 if (r) \ 679 return r; \ 680 } while (0) 681 682 #define WA_SET_BIT_MASKED(addr, mask) \ 683 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 684 685 #define WA_CLR_BIT_MASKED(addr, mask) \ 686 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 687 688 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 689 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 690 691 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 692 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 693 694 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 695 696 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 697 i915_reg_t reg) 698 { 699 struct drm_i915_private *dev_priv = engine->i915; 700 struct i915_workarounds *wa = &dev_priv->workarounds; 701 const uint32_t index = wa->hw_whitelist_count[engine->id]; 702 703 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 704 return -EINVAL; 705 706 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), 707 i915_mmio_reg_offset(reg)); 708 wa->hw_whitelist_count[engine->id]++; 709 710 return 0; 711 } 712 713 static int gen8_init_workarounds(struct intel_engine_cs *engine) 714 { 715 struct drm_i915_private *dev_priv = engine->i915; 716 717 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 718 719 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 720 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 721 722 /* WaDisablePartialInstShootdown:bdw,chv */ 723 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 724 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 725 726 /* Use Force Non-Coherent whenever executing a 3D context. This is a 727 * workaround for for a possible hang in the unlikely event a TLB 728 * invalidation occurs during a PSD flush. 729 */ 730 /* WaForceEnableNonCoherent:bdw,chv */ 731 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 732 WA_SET_BIT_MASKED(HDC_CHICKEN0, 733 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 734 HDC_FORCE_NON_COHERENT); 735 736 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 737 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 738 * polygons in the same 8x4 pixel/sample area to be processed without 739 * stalling waiting for the earlier ones to write to Hierarchical Z 740 * buffer." 741 * 742 * This optimization is off by default for BDW and CHV; turn it on. 743 */ 744 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 745 746 /* Wa4x4STCOptimizationDisable:bdw,chv */ 747 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 748 749 /* 750 * BSpec recommends 8x4 when MSAA is used, 751 * however in practice 16x4 seems fastest. 752 * 753 * Note that PS/WM thread counts depend on the WIZ hashing 754 * disable bit, which we don't touch here, but it's good 755 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 756 */ 757 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 758 GEN6_WIZ_HASHING_MASK, 759 GEN6_WIZ_HASHING_16x4); 760 761 return 0; 762 } 763 764 static int bdw_init_workarounds(struct intel_engine_cs *engine) 765 { 766 struct drm_i915_private *dev_priv = engine->i915; 767 int ret; 768 769 ret = gen8_init_workarounds(engine); 770 if (ret) 771 return ret; 772 773 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 774 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 775 776 /* WaDisableDopClockGating:bdw */ 777 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 778 DOP_CLOCK_GATING_DISABLE); 779 780 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 781 GEN8_SAMPLER_POWER_BYPASS_DIS); 782 783 WA_SET_BIT_MASKED(HDC_CHICKEN0, 784 /* WaForceContextSaveRestoreNonCoherent:bdw */ 785 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 786 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 787 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 788 789 return 0; 790 } 791 792 static int chv_init_workarounds(struct intel_engine_cs *engine) 793 { 794 struct drm_i915_private *dev_priv = engine->i915; 795 int ret; 796 797 ret = gen8_init_workarounds(engine); 798 if (ret) 799 return ret; 800 801 /* WaDisableThreadStallDopClockGating:chv */ 802 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 803 804 /* Improve HiZ throughput on CHV. */ 805 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 806 807 return 0; 808 } 809 810 static int gen9_init_workarounds(struct intel_engine_cs *engine) 811 { 812 struct drm_i915_private *dev_priv = engine->i915; 813 int ret; 814 815 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 816 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 817 818 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ 819 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 820 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 821 822 /* WaDisableKillLogic:bxt,skl,kbl */ 823 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 824 ECOCHK_DIS_TLB); 825 826 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ 827 /* WaDisablePartialInstShootdown:skl,bxt,kbl */ 828 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 829 FLOW_CONTROL_ENABLE | 830 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 831 832 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 833 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 834 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 835 836 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */ 837 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 838 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 839 GEN9_DG_MIRROR_FIX_ENABLE); 840 841 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */ 842 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 843 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 844 GEN9_RHWO_OPTIMIZATION_DISABLE); 845 /* 846 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 847 * but we do that in per ctx batchbuffer as there is an issue 848 * with this register not getting restored on ctx restore 849 */ 850 } 851 852 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 853 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 854 GEN9_ENABLE_GPGPU_PREEMPTION); 855 856 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ 857 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 858 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 859 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 860 861 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ 862 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 863 GEN9_CCS_TLB_PREFETCH_ENABLE); 864 865 /* WaDisableMaskBasedCammingInRCC:bxt */ 866 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 867 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 868 PIXEL_MASK_CAMMING_DISABLE); 869 870 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 871 WA_SET_BIT_MASKED(HDC_CHICKEN0, 872 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 873 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 874 875 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 876 * both tied to WaForceContextSaveRestoreNonCoherent 877 * in some hsds for skl. We keep the tie for all gen9. The 878 * documentation is a bit hazy and so we want to get common behaviour, 879 * even though there is no clear evidence we would need both on kbl/bxt. 880 * This area has been source of system hangs so we play it safe 881 * and mimic the skl regardless of what bspec says. 882 * 883 * Use Force Non-Coherent whenever executing a 3D context. This 884 * is a workaround for a possible hang in the unlikely event 885 * a TLB invalidation occurs during a PSD flush. 886 */ 887 888 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 889 WA_SET_BIT_MASKED(HDC_CHICKEN0, 890 HDC_FORCE_NON_COHERENT); 891 892 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 893 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 894 BDW_DISABLE_HDC_INVALIDATION); 895 896 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 897 if (IS_SKYLAKE(dev_priv) || 898 IS_KABYLAKE(dev_priv) || 899 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 900 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 901 GEN8_SAMPLER_POWER_BYPASS_DIS); 902 903 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ 904 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 905 906 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 907 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 908 GEN8_LQSC_FLUSH_COHERENT_LINES)); 909 910 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ 911 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 912 if (ret) 913 return ret; 914 915 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 916 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 917 if (ret) 918 return ret; 919 920 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ 921 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 922 if (ret) 923 return ret; 924 925 return 0; 926 } 927 928 static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 929 { 930 struct drm_i915_private *dev_priv = engine->i915; 931 u8 vals[3] = { 0, 0, 0 }; 932 unsigned int i; 933 934 for (i = 0; i < 3; i++) { 935 u8 ss; 936 937 /* 938 * Only consider slices where one, and only one, subslice has 7 939 * EUs 940 */ 941 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i])) 942 continue; 943 944 /* 945 * subslice_7eu[i] != 0 (because of the check above) and 946 * ss_max == 4 (maximum number of subslices possible per slice) 947 * 948 * -> 0 <= ss <= 3; 949 */ 950 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1; 951 vals[i] = 3 - ss; 952 } 953 954 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 955 return 0; 956 957 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 958 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 959 GEN9_IZ_HASHING_MASK(2) | 960 GEN9_IZ_HASHING_MASK(1) | 961 GEN9_IZ_HASHING_MASK(0), 962 GEN9_IZ_HASHING(2, vals[2]) | 963 GEN9_IZ_HASHING(1, vals[1]) | 964 GEN9_IZ_HASHING(0, vals[0])); 965 966 return 0; 967 } 968 969 static int skl_init_workarounds(struct intel_engine_cs *engine) 970 { 971 struct drm_i915_private *dev_priv = engine->i915; 972 int ret; 973 974 ret = gen9_init_workarounds(engine); 975 if (ret) 976 return ret; 977 978 /* 979 * Actual WA is to disable percontext preemption granularity control 980 * until D0 which is the default case so this is equivalent to 981 * !WaDisablePerCtxtPreemptionGranularityControl:skl 982 */ 983 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 984 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 985 986 /* WaEnableGapsTsvCreditFix:skl */ 987 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 988 GEN9_GAPS_TSV_CREDIT_DISABLE)); 989 990 /* WaDisableGafsUnitClkGating:skl */ 991 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 992 993 /* WaInPlaceDecompressionHang:skl */ 994 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 995 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 996 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 997 998 /* WaDisableLSQCROPERFforOCL:skl */ 999 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1000 if (ret) 1001 return ret; 1002 1003 return skl_tune_iz_hashing(engine); 1004 } 1005 1006 static int bxt_init_workarounds(struct intel_engine_cs *engine) 1007 { 1008 struct drm_i915_private *dev_priv = engine->i915; 1009 int ret; 1010 1011 ret = gen9_init_workarounds(engine); 1012 if (ret) 1013 return ret; 1014 1015 /* WaStoreMultiplePTEenable:bxt */ 1016 /* This is a requirement according to Hardware specification */ 1017 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1018 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1019 1020 /* WaSetClckGatingDisableMedia:bxt */ 1021 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1022 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1023 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1024 } 1025 1026 /* WaDisableThreadStallDopClockGating:bxt */ 1027 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1028 STALL_DOP_GATING_DISABLE); 1029 1030 /* WaDisablePooledEuLoadBalancingFix:bxt */ 1031 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { 1032 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, 1033 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1034 } 1035 1036 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1037 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { 1038 WA_SET_BIT_MASKED( 1039 GEN7_HALF_SLICE_CHICKEN1, 1040 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1041 } 1042 1043 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ 1044 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1045 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1046 /* WaDisableLSQCROPERFforOCL:bxt */ 1047 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1048 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1049 if (ret) 1050 return ret; 1051 1052 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1053 if (ret) 1054 return ret; 1055 } 1056 1057 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1058 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1059 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1060 L3_HIGH_PRIO_CREDITS(2)); 1061 1062 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1063 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1064 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1065 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1066 1067 /* WaInPlaceDecompressionHang:bxt */ 1068 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1069 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1070 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1071 1072 return 0; 1073 } 1074 1075 static int kbl_init_workarounds(struct intel_engine_cs *engine) 1076 { 1077 struct drm_i915_private *dev_priv = engine->i915; 1078 int ret; 1079 1080 ret = gen9_init_workarounds(engine); 1081 if (ret) 1082 return ret; 1083 1084 /* WaEnableGapsTsvCreditFix:kbl */ 1085 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1086 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1087 1088 /* WaDisableDynamicCreditSharing:kbl */ 1089 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1090 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1091 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1092 1093 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1094 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1096 HDC_FENCE_DEST_SLM_DISABLE); 1097 1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1099 * involving this register should also be added to WA batch as required. 1100 */ 1101 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1102 /* WaDisableLSQCROPERFforOCL:kbl */ 1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1104 GEN8_LQSC_RO_PERF_DIS); 1105 1106 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1109 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1110 1111 /* WaDisableGafsUnitClkGating:kbl */ 1112 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1113 1114 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1115 WA_SET_BIT_MASKED( 1116 GEN7_HALF_SLICE_CHICKEN1, 1117 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1118 1119 /* WaInPlaceDecompressionHang:kbl */ 1120 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1121 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1122 1123 /* WaDisableLSQCROPERFforOCL:kbl */ 1124 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1125 if (ret) 1126 return ret; 1127 1128 return 0; 1129 } 1130 1131 int init_workarounds_ring(struct intel_engine_cs *engine) 1132 { 1133 struct drm_i915_private *dev_priv = engine->i915; 1134 1135 WARN_ON(engine->id != RCS); 1136 1137 dev_priv->workarounds.count = 0; 1138 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1139 1140 if (IS_BROADWELL(dev_priv)) 1141 return bdw_init_workarounds(engine); 1142 1143 if (IS_CHERRYVIEW(dev_priv)) 1144 return chv_init_workarounds(engine); 1145 1146 if (IS_SKYLAKE(dev_priv)) 1147 return skl_init_workarounds(engine); 1148 1149 if (IS_BROXTON(dev_priv)) 1150 return bxt_init_workarounds(engine); 1151 1152 if (IS_KABYLAKE(dev_priv)) 1153 return kbl_init_workarounds(engine); 1154 1155 return 0; 1156 } 1157 1158 static int init_render_ring(struct intel_engine_cs *engine) 1159 { 1160 struct drm_i915_private *dev_priv = engine->i915; 1161 int ret = init_ring_common(engine); 1162 if (ret) 1163 return ret; 1164 1165 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1166 if (IS_GEN(dev_priv, 4, 6)) 1167 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1168 1169 /* We need to disable the AsyncFlip performance optimisations in order 1170 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1171 * programmed to '1' on all products. 1172 * 1173 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1174 */ 1175 if (IS_GEN(dev_priv, 6, 7)) 1176 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1177 1178 /* Required for the hardware to program scanline values for waiting */ 1179 /* WaEnableFlushTlbInvalidationMode:snb */ 1180 if (IS_GEN6(dev_priv)) 1181 I915_WRITE(GFX_MODE, 1182 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1183 1184 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1185 if (IS_GEN7(dev_priv)) 1186 I915_WRITE(GFX_MODE_GEN7, 1187 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1188 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1189 1190 if (IS_GEN6(dev_priv)) { 1191 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1192 * "If this bit is set, STCunit will have LRA as replacement 1193 * policy. [...] This bit must be reset. LRA replacement 1194 * policy is not supported." 1195 */ 1196 I915_WRITE(CACHE_MODE_0, 1197 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1198 } 1199 1200 if (IS_GEN(dev_priv, 6, 7)) 1201 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1202 1203 if (INTEL_INFO(dev_priv)->gen >= 6) 1204 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1205 1206 return init_workarounds_ring(engine); 1207 } 1208 1209 static void render_ring_cleanup(struct intel_engine_cs *engine) 1210 { 1211 struct drm_i915_private *dev_priv = engine->i915; 1212 1213 i915_vma_unpin_and_release(&dev_priv->semaphore); 1214 } 1215 1216 static int gen8_rcs_signal(struct drm_i915_gem_request *req) 1217 { 1218 struct intel_ring *ring = req->ring; 1219 struct drm_i915_private *dev_priv = req->i915; 1220 struct intel_engine_cs *waiter; 1221 enum intel_engine_id id; 1222 int ret, num_rings; 1223 1224 num_rings = INTEL_INFO(dev_priv)->num_rings; 1225 ret = intel_ring_begin(req, (num_rings-1) * 8); 1226 if (ret) 1227 return ret; 1228 1229 for_each_engine(waiter, dev_priv, id) { 1230 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 1231 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1232 continue; 1233 1234 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1235 intel_ring_emit(ring, 1236 PIPE_CONTROL_GLOBAL_GTT_IVB | 1237 PIPE_CONTROL_QW_WRITE | 1238 PIPE_CONTROL_CS_STALL); 1239 intel_ring_emit(ring, lower_32_bits(gtt_offset)); 1240 intel_ring_emit(ring, upper_32_bits(gtt_offset)); 1241 intel_ring_emit(ring, req->fence.seqno); 1242 intel_ring_emit(ring, 0); 1243 intel_ring_emit(ring, 1244 MI_SEMAPHORE_SIGNAL | 1245 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1246 intel_ring_emit(ring, 0); 1247 } 1248 intel_ring_advance(ring); 1249 1250 return 0; 1251 } 1252 1253 static int gen8_xcs_signal(struct drm_i915_gem_request *req) 1254 { 1255 struct intel_ring *ring = req->ring; 1256 struct drm_i915_private *dev_priv = req->i915; 1257 struct intel_engine_cs *waiter; 1258 enum intel_engine_id id; 1259 int ret, num_rings; 1260 1261 num_rings = INTEL_INFO(dev_priv)->num_rings; 1262 ret = intel_ring_begin(req, (num_rings-1) * 6); 1263 if (ret) 1264 return ret; 1265 1266 for_each_engine(waiter, dev_priv, id) { 1267 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 1268 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1269 continue; 1270 1271 intel_ring_emit(ring, 1272 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1273 intel_ring_emit(ring, 1274 lower_32_bits(gtt_offset) | 1275 MI_FLUSH_DW_USE_GTT); 1276 intel_ring_emit(ring, upper_32_bits(gtt_offset)); 1277 intel_ring_emit(ring, req->fence.seqno); 1278 intel_ring_emit(ring, 1279 MI_SEMAPHORE_SIGNAL | 1280 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1281 intel_ring_emit(ring, 0); 1282 } 1283 intel_ring_advance(ring); 1284 1285 return 0; 1286 } 1287 1288 static int gen6_signal(struct drm_i915_gem_request *req) 1289 { 1290 struct intel_ring *ring = req->ring; 1291 struct drm_i915_private *dev_priv = req->i915; 1292 struct intel_engine_cs *engine; 1293 enum intel_engine_id id; 1294 int ret, num_rings; 1295 1296 num_rings = INTEL_INFO(dev_priv)->num_rings; 1297 ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2)); 1298 if (ret) 1299 return ret; 1300 1301 for_each_engine(engine, dev_priv, id) { 1302 i915_reg_t mbox_reg; 1303 1304 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) 1305 continue; 1306 1307 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; 1308 if (i915_mmio_reg_valid(mbox_reg)) { 1309 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1310 intel_ring_emit_reg(ring, mbox_reg); 1311 intel_ring_emit(ring, req->fence.seqno); 1312 } 1313 } 1314 1315 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1316 if (num_rings % 2 == 0) 1317 intel_ring_emit(ring, MI_NOOP); 1318 intel_ring_advance(ring); 1319 1320 return 0; 1321 } 1322 1323 static void i9xx_submit_request(struct drm_i915_gem_request *request) 1324 { 1325 struct drm_i915_private *dev_priv = request->i915; 1326 1327 I915_WRITE_TAIL(request->engine, 1328 intel_ring_offset(request->ring, request->tail)); 1329 } 1330 1331 static int i9xx_emit_request(struct drm_i915_gem_request *req) 1332 { 1333 struct intel_ring *ring = req->ring; 1334 int ret; 1335 1336 ret = intel_ring_begin(req, 4); 1337 if (ret) 1338 return ret; 1339 1340 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1341 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1342 intel_ring_emit(ring, req->fence.seqno); 1343 intel_ring_emit(ring, MI_USER_INTERRUPT); 1344 intel_ring_advance(ring); 1345 1346 req->tail = ring->tail; 1347 1348 return 0; 1349 } 1350 1351 /** 1352 * gen6_sema_emit_request - Update the semaphore mailbox registers 1353 * 1354 * @request - request to write to the ring 1355 * 1356 * Update the mailbox registers in the *other* rings with the current seqno. 1357 * This acts like a signal in the canonical semaphore. 1358 */ 1359 static int gen6_sema_emit_request(struct drm_i915_gem_request *req) 1360 { 1361 int ret; 1362 1363 ret = req->engine->semaphore.signal(req); 1364 if (ret) 1365 return ret; 1366 1367 return i9xx_emit_request(req); 1368 } 1369 1370 static int gen8_render_emit_request(struct drm_i915_gem_request *req) 1371 { 1372 struct intel_engine_cs *engine = req->engine; 1373 struct intel_ring *ring = req->ring; 1374 int ret; 1375 1376 if (engine->semaphore.signal) { 1377 ret = engine->semaphore.signal(req); 1378 if (ret) 1379 return ret; 1380 } 1381 1382 ret = intel_ring_begin(req, 8); 1383 if (ret) 1384 return ret; 1385 1386 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1387 intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB | 1388 PIPE_CONTROL_CS_STALL | 1389 PIPE_CONTROL_QW_WRITE)); 1390 intel_ring_emit(ring, intel_hws_seqno_address(engine)); 1391 intel_ring_emit(ring, 0); 1392 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1393 /* We're thrashing one dword of HWS. */ 1394 intel_ring_emit(ring, 0); 1395 intel_ring_emit(ring, MI_USER_INTERRUPT); 1396 intel_ring_emit(ring, MI_NOOP); 1397 intel_ring_advance(ring); 1398 1399 req->tail = ring->tail; 1400 1401 return 0; 1402 } 1403 1404 /** 1405 * intel_ring_sync - sync the waiter to the signaller on seqno 1406 * 1407 * @waiter - ring that is waiting 1408 * @signaller - ring which has, or will signal 1409 * @seqno - seqno which the waiter will block on 1410 */ 1411 1412 static int 1413 gen8_ring_sync_to(struct drm_i915_gem_request *req, 1414 struct drm_i915_gem_request *signal) 1415 { 1416 struct intel_ring *ring = req->ring; 1417 struct drm_i915_private *dev_priv = req->i915; 1418 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id); 1419 struct i915_hw_ppgtt *ppgtt; 1420 int ret; 1421 1422 ret = intel_ring_begin(req, 4); 1423 if (ret) 1424 return ret; 1425 1426 intel_ring_emit(ring, 1427 MI_SEMAPHORE_WAIT | 1428 MI_SEMAPHORE_GLOBAL_GTT | 1429 MI_SEMAPHORE_SAD_GTE_SDD); 1430 intel_ring_emit(ring, signal->fence.seqno); 1431 intel_ring_emit(ring, lower_32_bits(offset)); 1432 intel_ring_emit(ring, upper_32_bits(offset)); 1433 intel_ring_advance(ring); 1434 1435 /* When the !RCS engines idle waiting upon a semaphore, they lose their 1436 * pagetables and we must reload them before executing the batch. 1437 * We do this on the i915_switch_context() following the wait and 1438 * before the dispatch. 1439 */ 1440 ppgtt = req->ctx->ppgtt; 1441 if (ppgtt && req->engine->id != RCS) 1442 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine); 1443 return 0; 1444 } 1445 1446 static int 1447 gen6_ring_sync_to(struct drm_i915_gem_request *req, 1448 struct drm_i915_gem_request *signal) 1449 { 1450 struct intel_ring *ring = req->ring; 1451 u32 dw1 = MI_SEMAPHORE_MBOX | 1452 MI_SEMAPHORE_COMPARE | 1453 MI_SEMAPHORE_REGISTER; 1454 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; 1455 int ret; 1456 1457 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1458 1459 ret = intel_ring_begin(req, 4); 1460 if (ret) 1461 return ret; 1462 1463 intel_ring_emit(ring, dw1 | wait_mbox); 1464 /* Throughout all of the GEM code, seqno passed implies our current 1465 * seqno is >= the last seqno executed. However for hardware the 1466 * comparison is strictly greater than. 1467 */ 1468 intel_ring_emit(ring, signal->fence.seqno - 1); 1469 intel_ring_emit(ring, 0); 1470 intel_ring_emit(ring, MI_NOOP); 1471 intel_ring_advance(ring); 1472 1473 return 0; 1474 } 1475 1476 static void 1477 gen5_seqno_barrier(struct intel_engine_cs *engine) 1478 { 1479 /* MI_STORE are internally buffered by the GPU and not flushed 1480 * either by MI_FLUSH or SyncFlush or any other combination of 1481 * MI commands. 1482 * 1483 * "Only the submission of the store operation is guaranteed. 1484 * The write result will be complete (coherent) some time later 1485 * (this is practically a finite period but there is no guaranteed 1486 * latency)." 1487 * 1488 * Empirically, we observe that we need a delay of at least 75us to 1489 * be sure that the seqno write is visible by the CPU. 1490 */ 1491 usleep_range(125, 250); 1492 } 1493 1494 static void 1495 gen6_seqno_barrier(struct intel_engine_cs *engine) 1496 { 1497 struct drm_i915_private *dev_priv = engine->i915; 1498 1499 /* Workaround to force correct ordering between irq and seqno writes on 1500 * ivb (and maybe also on snb) by reading from a CS register (like 1501 * ACTHD) before reading the status page. 1502 * 1503 * Note that this effectively stalls the read by the time it takes to 1504 * do a memory transaction, which more or less ensures that the write 1505 * from the GPU has sufficient time to invalidate the CPU cacheline. 1506 * Alternatively we could delay the interrupt from the CS ring to give 1507 * the write time to land, but that would incur a delay after every 1508 * batch i.e. much more frequent than a delay when waiting for the 1509 * interrupt (with the same net latency). 1510 * 1511 * Also note that to prevent whole machine hangs on gen7, we have to 1512 * take the spinlock to guard against concurrent cacheline access. 1513 */ 1514 spin_lock_irq(&dev_priv->uncore.lock); 1515 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1516 spin_unlock_irq(&dev_priv->uncore.lock); 1517 } 1518 1519 static void 1520 gen5_irq_enable(struct intel_engine_cs *engine) 1521 { 1522 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); 1523 } 1524 1525 static void 1526 gen5_irq_disable(struct intel_engine_cs *engine) 1527 { 1528 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); 1529 } 1530 1531 static void 1532 i9xx_irq_enable(struct intel_engine_cs *engine) 1533 { 1534 struct drm_i915_private *dev_priv = engine->i915; 1535 1536 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1537 I915_WRITE(IMR, dev_priv->irq_mask); 1538 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1539 } 1540 1541 static void 1542 i9xx_irq_disable(struct intel_engine_cs *engine) 1543 { 1544 struct drm_i915_private *dev_priv = engine->i915; 1545 1546 dev_priv->irq_mask |= engine->irq_enable_mask; 1547 I915_WRITE(IMR, dev_priv->irq_mask); 1548 } 1549 1550 static void 1551 i8xx_irq_enable(struct intel_engine_cs *engine) 1552 { 1553 struct drm_i915_private *dev_priv = engine->i915; 1554 1555 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1556 I915_WRITE16(IMR, dev_priv->irq_mask); 1557 POSTING_READ16(RING_IMR(engine->mmio_base)); 1558 } 1559 1560 static void 1561 i8xx_irq_disable(struct intel_engine_cs *engine) 1562 { 1563 struct drm_i915_private *dev_priv = engine->i915; 1564 1565 dev_priv->irq_mask |= engine->irq_enable_mask; 1566 I915_WRITE16(IMR, dev_priv->irq_mask); 1567 } 1568 1569 static int 1570 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1571 { 1572 struct intel_ring *ring = req->ring; 1573 int ret; 1574 1575 ret = intel_ring_begin(req, 2); 1576 if (ret) 1577 return ret; 1578 1579 intel_ring_emit(ring, MI_FLUSH); 1580 intel_ring_emit(ring, MI_NOOP); 1581 intel_ring_advance(ring); 1582 return 0; 1583 } 1584 1585 static void 1586 gen6_irq_enable(struct intel_engine_cs *engine) 1587 { 1588 struct drm_i915_private *dev_priv = engine->i915; 1589 1590 I915_WRITE_IMR(engine, 1591 ~(engine->irq_enable_mask | 1592 engine->irq_keep_mask)); 1593 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1594 } 1595 1596 static void 1597 gen6_irq_disable(struct intel_engine_cs *engine) 1598 { 1599 struct drm_i915_private *dev_priv = engine->i915; 1600 1601 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1602 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1603 } 1604 1605 static void 1606 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1607 { 1608 struct drm_i915_private *dev_priv = engine->i915; 1609 1610 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1611 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); 1612 } 1613 1614 static void 1615 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1616 { 1617 struct drm_i915_private *dev_priv = engine->i915; 1618 1619 I915_WRITE_IMR(engine, ~0); 1620 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); 1621 } 1622 1623 static void 1624 gen8_irq_enable(struct intel_engine_cs *engine) 1625 { 1626 struct drm_i915_private *dev_priv = engine->i915; 1627 1628 I915_WRITE_IMR(engine, 1629 ~(engine->irq_enable_mask | 1630 engine->irq_keep_mask)); 1631 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1632 } 1633 1634 static void 1635 gen8_irq_disable(struct intel_engine_cs *engine) 1636 { 1637 struct drm_i915_private *dev_priv = engine->i915; 1638 1639 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1640 } 1641 1642 static int 1643 i965_emit_bb_start(struct drm_i915_gem_request *req, 1644 u64 offset, u32 length, 1645 unsigned int dispatch_flags) 1646 { 1647 struct intel_ring *ring = req->ring; 1648 int ret; 1649 1650 ret = intel_ring_begin(req, 2); 1651 if (ret) 1652 return ret; 1653 1654 intel_ring_emit(ring, 1655 MI_BATCH_BUFFER_START | 1656 MI_BATCH_GTT | 1657 (dispatch_flags & I915_DISPATCH_SECURE ? 1658 0 : MI_BATCH_NON_SECURE_I965)); 1659 intel_ring_emit(ring, offset); 1660 intel_ring_advance(ring); 1661 1662 return 0; 1663 } 1664 1665 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1666 #define I830_BATCH_LIMIT (256*1024) 1667 #define I830_TLB_ENTRIES (2) 1668 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1669 static int 1670 i830_emit_bb_start(struct drm_i915_gem_request *req, 1671 u64 offset, u32 len, 1672 unsigned int dispatch_flags) 1673 { 1674 struct intel_ring *ring = req->ring; 1675 u32 cs_offset = i915_ggtt_offset(req->engine->scratch); 1676 int ret; 1677 1678 ret = intel_ring_begin(req, 6); 1679 if (ret) 1680 return ret; 1681 1682 /* Evict the invalid PTE TLBs */ 1683 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1684 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1685 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1686 intel_ring_emit(ring, cs_offset); 1687 intel_ring_emit(ring, 0xdeadbeef); 1688 intel_ring_emit(ring, MI_NOOP); 1689 intel_ring_advance(ring); 1690 1691 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1692 if (len > I830_BATCH_LIMIT) 1693 return -ENOSPC; 1694 1695 ret = intel_ring_begin(req, 6 + 2); 1696 if (ret) 1697 return ret; 1698 1699 /* Blit the batch (which has now all relocs applied) to the 1700 * stable batch scratch bo area (so that the CS never 1701 * stumbles over its tlb invalidation bug) ... 1702 */ 1703 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1704 intel_ring_emit(ring, 1705 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1706 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1707 intel_ring_emit(ring, cs_offset); 1708 intel_ring_emit(ring, 4096); 1709 intel_ring_emit(ring, offset); 1710 1711 intel_ring_emit(ring, MI_FLUSH); 1712 intel_ring_emit(ring, MI_NOOP); 1713 intel_ring_advance(ring); 1714 1715 /* ... and execute it. */ 1716 offset = cs_offset; 1717 } 1718 1719 ret = intel_ring_begin(req, 2); 1720 if (ret) 1721 return ret; 1722 1723 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1724 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1725 0 : MI_BATCH_NON_SECURE)); 1726 intel_ring_advance(ring); 1727 1728 return 0; 1729 } 1730 1731 static int 1732 i915_emit_bb_start(struct drm_i915_gem_request *req, 1733 u64 offset, u32 len, 1734 unsigned int dispatch_flags) 1735 { 1736 struct intel_ring *ring = req->ring; 1737 int ret; 1738 1739 ret = intel_ring_begin(req, 2); 1740 if (ret) 1741 return ret; 1742 1743 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1744 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1745 0 : MI_BATCH_NON_SECURE)); 1746 intel_ring_advance(ring); 1747 1748 return 0; 1749 } 1750 1751 static void cleanup_phys_status_page(struct intel_engine_cs *engine) 1752 { 1753 struct drm_i915_private *dev_priv = engine->i915; 1754 1755 if (!dev_priv->status_page_dmah) 1756 return; 1757 1758 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); 1759 engine->status_page.page_addr = NULL; 1760 } 1761 1762 static void cleanup_status_page(struct intel_engine_cs *engine) 1763 { 1764 struct i915_vma *vma; 1765 1766 vma = fetch_and_zero(&engine->status_page.vma); 1767 if (!vma) 1768 return; 1769 1770 i915_vma_unpin(vma); 1771 i915_gem_object_unpin_map(vma->obj); 1772 i915_vma_put(vma); 1773 } 1774 1775 static int init_status_page(struct intel_engine_cs *engine) 1776 { 1777 struct drm_i915_gem_object *obj; 1778 struct i915_vma *vma; 1779 unsigned int flags; 1780 int ret; 1781 1782 obj = i915_gem_object_create(&engine->i915->drm, 4096); 1783 if (IS_ERR(obj)) { 1784 DRM_ERROR("Failed to allocate status page\n"); 1785 return PTR_ERR(obj); 1786 } 1787 1788 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1789 if (ret) 1790 goto err; 1791 1792 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); 1793 if (IS_ERR(vma)) { 1794 ret = PTR_ERR(vma); 1795 goto err; 1796 } 1797 1798 flags = PIN_GLOBAL; 1799 if (!HAS_LLC(engine->i915)) 1800 /* On g33, we cannot place HWS above 256MiB, so 1801 * restrict its pinning to the low mappable arena. 1802 * Though this restriction is not documented for 1803 * gen4, gen5, or byt, they also behave similarly 1804 * and hang if the HWS is placed at the top of the 1805 * GTT. To generalise, it appears that all !llc 1806 * platforms have issues with us placing the HWS 1807 * above the mappable region (even though we never 1808 * actualy map it). 1809 */ 1810 flags |= PIN_MAPPABLE; 1811 ret = i915_vma_pin(vma, 0, 4096, flags); 1812 if (ret) 1813 goto err; 1814 1815 engine->status_page.vma = vma; 1816 engine->status_page.ggtt_offset = i915_ggtt_offset(vma); 1817 engine->status_page.page_addr = 1818 i915_gem_object_pin_map(obj, I915_MAP_WB); 1819 1820 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1821 engine->name, i915_ggtt_offset(vma)); 1822 return 0; 1823 1824 err: 1825 i915_gem_object_put(obj); 1826 return ret; 1827 } 1828 1829 static int init_phys_status_page(struct intel_engine_cs *engine) 1830 { 1831 struct drm_i915_private *dev_priv = engine->i915; 1832 1833 dev_priv->status_page_dmah = 1834 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); 1835 if (!dev_priv->status_page_dmah) 1836 return -ENOMEM; 1837 1838 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1839 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 1840 1841 return 0; 1842 } 1843 1844 int intel_ring_pin(struct intel_ring *ring) 1845 { 1846 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 1847 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; 1848 enum i915_map_type map; 1849 struct i915_vma *vma = ring->vma; 1850 void *addr; 1851 int ret; 1852 1853 GEM_BUG_ON(ring->vaddr); 1854 1855 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; 1856 1857 if (vma->obj->stolen) 1858 flags |= PIN_MAPPABLE; 1859 1860 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1861 if (flags & PIN_MAPPABLE || map == I915_MAP_WC) 1862 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1863 else 1864 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); 1865 if (unlikely(ret)) 1866 return ret; 1867 } 1868 1869 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); 1870 if (unlikely(ret)) 1871 return ret; 1872 1873 if (i915_vma_is_map_and_fenceable(vma)) 1874 addr = (void __force *)i915_vma_pin_iomap(vma); 1875 else 1876 addr = i915_gem_object_pin_map(vma->obj, map); 1877 if (IS_ERR(addr)) 1878 goto err; 1879 1880 ring->vaddr = addr; 1881 return 0; 1882 1883 err: 1884 i915_vma_unpin(vma); 1885 return PTR_ERR(addr); 1886 } 1887 1888 void intel_ring_unpin(struct intel_ring *ring) 1889 { 1890 GEM_BUG_ON(!ring->vma); 1891 GEM_BUG_ON(!ring->vaddr); 1892 1893 if (i915_vma_is_map_and_fenceable(ring->vma)) 1894 i915_vma_unpin_iomap(ring->vma); 1895 else 1896 i915_gem_object_unpin_map(ring->vma->obj); 1897 ring->vaddr = NULL; 1898 1899 i915_vma_unpin(ring->vma); 1900 } 1901 1902 static struct i915_vma * 1903 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) 1904 { 1905 struct drm_i915_gem_object *obj; 1906 struct i915_vma *vma; 1907 1908 obj = i915_gem_object_create_stolen(&dev_priv->drm, size); 1909 if (!obj) 1910 obj = i915_gem_object_create(&dev_priv->drm, size); 1911 if (IS_ERR(obj)) 1912 return ERR_CAST(obj); 1913 1914 /* mark ring buffers as read-only from GPU side by default */ 1915 obj->gt_ro = 1; 1916 1917 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); 1918 if (IS_ERR(vma)) 1919 goto err; 1920 1921 return vma; 1922 1923 err: 1924 i915_gem_object_put(obj); 1925 return vma; 1926 } 1927 1928 struct intel_ring * 1929 intel_engine_create_ring(struct intel_engine_cs *engine, int size) 1930 { 1931 struct intel_ring *ring; 1932 struct i915_vma *vma; 1933 1934 GEM_BUG_ON(!is_power_of_2(size)); 1935 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 1936 1937 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1938 if (!ring) 1939 return ERR_PTR(-ENOMEM); 1940 1941 ring->engine = engine; 1942 1943 INIT_LIST_HEAD(&ring->request_list); 1944 1945 ring->size = size; 1946 /* Workaround an erratum on the i830 which causes a hang if 1947 * the TAIL pointer points to within the last 2 cachelines 1948 * of the buffer. 1949 */ 1950 ring->effective_size = size; 1951 if (IS_I830(engine->i915) || IS_845G(engine->i915)) 1952 ring->effective_size -= 2 * CACHELINE_BYTES; 1953 1954 ring->last_retired_head = -1; 1955 intel_ring_update_space(ring); 1956 1957 vma = intel_ring_create_vma(engine->i915, size); 1958 if (IS_ERR(vma)) { 1959 kfree(ring); 1960 return ERR_CAST(vma); 1961 } 1962 ring->vma = vma; 1963 1964 return ring; 1965 } 1966 1967 void 1968 intel_ring_free(struct intel_ring *ring) 1969 { 1970 i915_vma_put(ring->vma); 1971 kfree(ring); 1972 } 1973 1974 static int intel_ring_context_pin(struct i915_gem_context *ctx, 1975 struct intel_engine_cs *engine) 1976 { 1977 struct intel_context *ce = &ctx->engine[engine->id]; 1978 int ret; 1979 1980 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1981 1982 if (ce->pin_count++) 1983 return 0; 1984 1985 if (ce->state) { 1986 ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false); 1987 if (ret) 1988 goto error; 1989 1990 ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment, 1991 PIN_GLOBAL | PIN_HIGH); 1992 if (ret) 1993 goto error; 1994 } 1995 1996 /* The kernel context is only used as a placeholder for flushing the 1997 * active context. It is never used for submitting user rendering and 1998 * as such never requires the golden render context, and so we can skip 1999 * emitting it when we switch to the kernel context. This is required 2000 * as during eviction we cannot allocate and pin the renderstate in 2001 * order to initialise the context. 2002 */ 2003 if (ctx == ctx->i915->kernel_context) 2004 ce->initialised = true; 2005 2006 i915_gem_context_get(ctx); 2007 return 0; 2008 2009 error: 2010 ce->pin_count = 0; 2011 return ret; 2012 } 2013 2014 static void intel_ring_context_unpin(struct i915_gem_context *ctx, 2015 struct intel_engine_cs *engine) 2016 { 2017 struct intel_context *ce = &ctx->engine[engine->id]; 2018 2019 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 2020 2021 if (--ce->pin_count) 2022 return; 2023 2024 if (ce->state) 2025 i915_vma_unpin(ce->state); 2026 2027 i915_gem_context_put(ctx); 2028 } 2029 2030 static int intel_init_ring_buffer(struct intel_engine_cs *engine) 2031 { 2032 struct drm_i915_private *dev_priv = engine->i915; 2033 struct intel_ring *ring; 2034 int ret; 2035 2036 WARN_ON(engine->buffer); 2037 2038 intel_engine_setup_common(engine); 2039 2040 memset(engine->semaphore.sync_seqno, 0, 2041 sizeof(engine->semaphore.sync_seqno)); 2042 2043 ret = intel_engine_init_common(engine); 2044 if (ret) 2045 goto error; 2046 2047 /* We may need to do things with the shrinker which 2048 * require us to immediately switch back to the default 2049 * context. This can cause a problem as pinning the 2050 * default context also requires GTT space which may not 2051 * be available. To avoid this we always pin the default 2052 * context. 2053 */ 2054 ret = intel_ring_context_pin(dev_priv->kernel_context, engine); 2055 if (ret) 2056 goto error; 2057 2058 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); 2059 if (IS_ERR(ring)) { 2060 ret = PTR_ERR(ring); 2061 goto error; 2062 } 2063 2064 if (HWS_NEEDS_PHYSICAL(dev_priv)) { 2065 WARN_ON(engine->id != RCS); 2066 ret = init_phys_status_page(engine); 2067 if (ret) 2068 goto error; 2069 } else { 2070 ret = init_status_page(engine); 2071 if (ret) 2072 goto error; 2073 } 2074 2075 ret = intel_ring_pin(ring); 2076 if (ret) { 2077 intel_ring_free(ring); 2078 goto error; 2079 } 2080 engine->buffer = ring; 2081 2082 return 0; 2083 2084 error: 2085 intel_engine_cleanup(engine); 2086 return ret; 2087 } 2088 2089 void intel_engine_cleanup(struct intel_engine_cs *engine) 2090 { 2091 struct drm_i915_private *dev_priv; 2092 2093 dev_priv = engine->i915; 2094 2095 if (engine->buffer) { 2096 WARN_ON(INTEL_GEN(dev_priv) > 2 && 2097 (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2098 2099 intel_ring_unpin(engine->buffer); 2100 intel_ring_free(engine->buffer); 2101 engine->buffer = NULL; 2102 } 2103 2104 if (engine->cleanup) 2105 engine->cleanup(engine); 2106 2107 if (HWS_NEEDS_PHYSICAL(dev_priv)) { 2108 WARN_ON(engine->id != RCS); 2109 cleanup_phys_status_page(engine); 2110 } else { 2111 cleanup_status_page(engine); 2112 } 2113 2114 intel_engine_cleanup_common(engine); 2115 2116 intel_ring_context_unpin(dev_priv->kernel_context, engine); 2117 2118 engine->i915 = NULL; 2119 dev_priv->engine[engine->id] = NULL; 2120 kfree(engine); 2121 } 2122 2123 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) 2124 { 2125 struct intel_engine_cs *engine; 2126 enum intel_engine_id id; 2127 2128 for_each_engine(engine, dev_priv, id) { 2129 engine->buffer->head = engine->buffer->tail; 2130 engine->buffer->last_retired_head = -1; 2131 } 2132 } 2133 2134 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2135 { 2136 int ret; 2137 2138 /* Flush enough space to reduce the likelihood of waiting after 2139 * we start building the request - in which case we will just 2140 * have to repeat work. 2141 */ 2142 request->reserved_space += LEGACY_REQUEST_SIZE; 2143 2144 request->ring = request->engine->buffer; 2145 2146 ret = intel_ring_begin(request, 0); 2147 if (ret) 2148 return ret; 2149 2150 request->reserved_space -= LEGACY_REQUEST_SIZE; 2151 return 0; 2152 } 2153 2154 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2155 { 2156 struct intel_ring *ring = req->ring; 2157 struct drm_i915_gem_request *target; 2158 int ret; 2159 2160 intel_ring_update_space(ring); 2161 if (ring->space >= bytes) 2162 return 0; 2163 2164 /* 2165 * Space is reserved in the ringbuffer for finalising the request, 2166 * as that cannot be allowed to fail. During request finalisation, 2167 * reserved_space is set to 0 to stop the overallocation and the 2168 * assumption is that then we never need to wait (which has the 2169 * risk of failing with EINTR). 2170 * 2171 * See also i915_gem_request_alloc() and i915_add_request(). 2172 */ 2173 GEM_BUG_ON(!req->reserved_space); 2174 2175 list_for_each_entry(target, &ring->request_list, ring_link) { 2176 unsigned space; 2177 2178 /* Would completion of this request free enough space? */ 2179 space = __intel_ring_space(target->postfix, ring->tail, 2180 ring->size); 2181 if (space >= bytes) 2182 break; 2183 } 2184 2185 if (WARN_ON(&target->ring_link == &ring->request_list)) 2186 return -ENOSPC; 2187 2188 ret = i915_wait_request(target, 2189 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 2190 NULL, NO_WAITBOOST); 2191 if (ret) 2192 return ret; 2193 2194 i915_gem_request_retire_upto(target); 2195 2196 intel_ring_update_space(ring); 2197 GEM_BUG_ON(ring->space < bytes); 2198 return 0; 2199 } 2200 2201 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2202 { 2203 struct intel_ring *ring = req->ring; 2204 int remain_actual = ring->size - ring->tail; 2205 int remain_usable = ring->effective_size - ring->tail; 2206 int bytes = num_dwords * sizeof(u32); 2207 int total_bytes, wait_bytes; 2208 bool need_wrap = false; 2209 2210 total_bytes = bytes + req->reserved_space; 2211 2212 if (unlikely(bytes > remain_usable)) { 2213 /* 2214 * Not enough space for the basic request. So need to flush 2215 * out the remainder and then wait for base + reserved. 2216 */ 2217 wait_bytes = remain_actual + total_bytes; 2218 need_wrap = true; 2219 } else if (unlikely(total_bytes > remain_usable)) { 2220 /* 2221 * The base request will fit but the reserved space 2222 * falls off the end. So we don't need an immediate wrap 2223 * and only need to effectively wait for the reserved 2224 * size space from the start of ringbuffer. 2225 */ 2226 wait_bytes = remain_actual + req->reserved_space; 2227 } else { 2228 /* No wrapping required, just waiting. */ 2229 wait_bytes = total_bytes; 2230 } 2231 2232 if (wait_bytes > ring->space) { 2233 int ret = wait_for_space(req, wait_bytes); 2234 if (unlikely(ret)) 2235 return ret; 2236 } 2237 2238 if (unlikely(need_wrap)) { 2239 GEM_BUG_ON(remain_actual > ring->space); 2240 GEM_BUG_ON(ring->tail + remain_actual > ring->size); 2241 2242 /* Fill the tail with MI_NOOP */ 2243 memset(ring->vaddr + ring->tail, 0, remain_actual); 2244 ring->tail = 0; 2245 ring->space -= remain_actual; 2246 } 2247 2248 ring->space -= bytes; 2249 GEM_BUG_ON(ring->space < 0); 2250 return 0; 2251 } 2252 2253 /* Align the ring tail to a cacheline boundary */ 2254 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2255 { 2256 struct intel_ring *ring = req->ring; 2257 int num_dwords = 2258 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2259 int ret; 2260 2261 if (num_dwords == 0) 2262 return 0; 2263 2264 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2265 ret = intel_ring_begin(req, num_dwords); 2266 if (ret) 2267 return ret; 2268 2269 while (num_dwords--) 2270 intel_ring_emit(ring, MI_NOOP); 2271 2272 intel_ring_advance(ring); 2273 2274 return 0; 2275 } 2276 2277 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) 2278 { 2279 struct drm_i915_private *dev_priv = request->i915; 2280 2281 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2282 2283 /* Every tail move must follow the sequence below */ 2284 2285 /* Disable notification that the ring is IDLE. The GT 2286 * will then assume that it is busy and bring it out of rc6. 2287 */ 2288 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2289 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2290 2291 /* Clear the context id. Here be magic! */ 2292 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 2293 2294 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2295 if (intel_wait_for_register_fw(dev_priv, 2296 GEN6_BSD_SLEEP_PSMI_CONTROL, 2297 GEN6_BSD_SLEEP_INDICATOR, 2298 0, 2299 50)) 2300 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2301 2302 /* Now that the ring is fully powered up, update the tail */ 2303 i9xx_submit_request(request); 2304 2305 /* Let the ring send IDLE messages to the GT again, 2306 * and so let it sleep to conserve power when idle. 2307 */ 2308 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2309 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2310 2311 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2312 } 2313 2314 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 2315 { 2316 struct intel_ring *ring = req->ring; 2317 uint32_t cmd; 2318 int ret; 2319 2320 ret = intel_ring_begin(req, 4); 2321 if (ret) 2322 return ret; 2323 2324 cmd = MI_FLUSH_DW; 2325 if (INTEL_GEN(req->i915) >= 8) 2326 cmd += 1; 2327 2328 /* We always require a command barrier so that subsequent 2329 * commands, such as breadcrumb interrupts, are strictly ordered 2330 * wrt the contents of the write cache being flushed to memory 2331 * (and thus being coherent from the CPU). 2332 */ 2333 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2334 2335 /* 2336 * Bspec vol 1c.5 - video engine command streamer: 2337 * "If ENABLED, all TLBs will be invalidated once the flush 2338 * operation is complete. This bit is only valid when the 2339 * Post-Sync Operation field is a value of 1h or 3h." 2340 */ 2341 if (mode & EMIT_INVALIDATE) 2342 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2343 2344 intel_ring_emit(ring, cmd); 2345 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2346 if (INTEL_GEN(req->i915) >= 8) { 2347 intel_ring_emit(ring, 0); /* upper addr */ 2348 intel_ring_emit(ring, 0); /* value */ 2349 } else { 2350 intel_ring_emit(ring, 0); 2351 intel_ring_emit(ring, MI_NOOP); 2352 } 2353 intel_ring_advance(ring); 2354 return 0; 2355 } 2356 2357 static int 2358 gen8_emit_bb_start(struct drm_i915_gem_request *req, 2359 u64 offset, u32 len, 2360 unsigned int dispatch_flags) 2361 { 2362 struct intel_ring *ring = req->ring; 2363 bool ppgtt = USES_PPGTT(req->i915) && 2364 !(dispatch_flags & I915_DISPATCH_SECURE); 2365 int ret; 2366 2367 ret = intel_ring_begin(req, 4); 2368 if (ret) 2369 return ret; 2370 2371 /* FIXME(BDW): Address space and security selectors. */ 2372 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2373 (dispatch_flags & I915_DISPATCH_RS ? 2374 MI_BATCH_RESOURCE_STREAMER : 0)); 2375 intel_ring_emit(ring, lower_32_bits(offset)); 2376 intel_ring_emit(ring, upper_32_bits(offset)); 2377 intel_ring_emit(ring, MI_NOOP); 2378 intel_ring_advance(ring); 2379 2380 return 0; 2381 } 2382 2383 static int 2384 hsw_emit_bb_start(struct drm_i915_gem_request *req, 2385 u64 offset, u32 len, 2386 unsigned int dispatch_flags) 2387 { 2388 struct intel_ring *ring = req->ring; 2389 int ret; 2390 2391 ret = intel_ring_begin(req, 2); 2392 if (ret) 2393 return ret; 2394 2395 intel_ring_emit(ring, 2396 MI_BATCH_BUFFER_START | 2397 (dispatch_flags & I915_DISPATCH_SECURE ? 2398 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2399 (dispatch_flags & I915_DISPATCH_RS ? 2400 MI_BATCH_RESOURCE_STREAMER : 0)); 2401 /* bit0-7 is the length on GEN6+ */ 2402 intel_ring_emit(ring, offset); 2403 intel_ring_advance(ring); 2404 2405 return 0; 2406 } 2407 2408 static int 2409 gen6_emit_bb_start(struct drm_i915_gem_request *req, 2410 u64 offset, u32 len, 2411 unsigned int dispatch_flags) 2412 { 2413 struct intel_ring *ring = req->ring; 2414 int ret; 2415 2416 ret = intel_ring_begin(req, 2); 2417 if (ret) 2418 return ret; 2419 2420 intel_ring_emit(ring, 2421 MI_BATCH_BUFFER_START | 2422 (dispatch_flags & I915_DISPATCH_SECURE ? 2423 0 : MI_BATCH_NON_SECURE_I965)); 2424 /* bit0-7 is the length on GEN6+ */ 2425 intel_ring_emit(ring, offset); 2426 intel_ring_advance(ring); 2427 2428 return 0; 2429 } 2430 2431 /* Blitter support (SandyBridge+) */ 2432 2433 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) 2434 { 2435 struct intel_ring *ring = req->ring; 2436 uint32_t cmd; 2437 int ret; 2438 2439 ret = intel_ring_begin(req, 4); 2440 if (ret) 2441 return ret; 2442 2443 cmd = MI_FLUSH_DW; 2444 if (INTEL_GEN(req->i915) >= 8) 2445 cmd += 1; 2446 2447 /* We always require a command barrier so that subsequent 2448 * commands, such as breadcrumb interrupts, are strictly ordered 2449 * wrt the contents of the write cache being flushed to memory 2450 * (and thus being coherent from the CPU). 2451 */ 2452 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2453 2454 /* 2455 * Bspec vol 1c.3 - blitter engine command streamer: 2456 * "If ENABLED, all TLBs will be invalidated once the flush 2457 * operation is complete. This bit is only valid when the 2458 * Post-Sync Operation field is a value of 1h or 3h." 2459 */ 2460 if (mode & EMIT_INVALIDATE) 2461 cmd |= MI_INVALIDATE_TLB; 2462 intel_ring_emit(ring, cmd); 2463 intel_ring_emit(ring, 2464 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2465 if (INTEL_GEN(req->i915) >= 8) { 2466 intel_ring_emit(ring, 0); /* upper addr */ 2467 intel_ring_emit(ring, 0); /* value */ 2468 } else { 2469 intel_ring_emit(ring, 0); 2470 intel_ring_emit(ring, MI_NOOP); 2471 } 2472 intel_ring_advance(ring); 2473 2474 return 0; 2475 } 2476 2477 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, 2478 struct intel_engine_cs *engine) 2479 { 2480 struct drm_i915_gem_object *obj; 2481 int ret, i; 2482 2483 if (!i915.semaphores) 2484 return; 2485 2486 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { 2487 struct i915_vma *vma; 2488 2489 obj = i915_gem_object_create(&dev_priv->drm, 4096); 2490 if (IS_ERR(obj)) 2491 goto err; 2492 2493 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); 2494 if (IS_ERR(vma)) 2495 goto err_obj; 2496 2497 ret = i915_gem_object_set_to_gtt_domain(obj, false); 2498 if (ret) 2499 goto err_obj; 2500 2501 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 2502 if (ret) 2503 goto err_obj; 2504 2505 dev_priv->semaphore = vma; 2506 } 2507 2508 if (INTEL_GEN(dev_priv) >= 8) { 2509 u32 offset = i915_ggtt_offset(dev_priv->semaphore); 2510 2511 engine->semaphore.sync_to = gen8_ring_sync_to; 2512 engine->semaphore.signal = gen8_xcs_signal; 2513 2514 for (i = 0; i < I915_NUM_ENGINES; i++) { 2515 u32 ring_offset; 2516 2517 if (i != engine->id) 2518 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); 2519 else 2520 ring_offset = MI_SEMAPHORE_SYNC_INVALID; 2521 2522 engine->semaphore.signal_ggtt[i] = ring_offset; 2523 } 2524 } else if (INTEL_GEN(dev_priv) >= 6) { 2525 engine->semaphore.sync_to = gen6_ring_sync_to; 2526 engine->semaphore.signal = gen6_signal; 2527 2528 /* 2529 * The current semaphore is only applied on pre-gen8 2530 * platform. And there is no VCS2 ring on the pre-gen8 2531 * platform. So the semaphore between RCS and VCS2 is 2532 * initialized as INVALID. Gen8 will initialize the 2533 * sema between VCS2 and RCS later. 2534 */ 2535 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { 2536 static const struct { 2537 u32 wait_mbox; 2538 i915_reg_t mbox_reg; 2539 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { 2540 [RCS_HW] = { 2541 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, 2542 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, 2543 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, 2544 }, 2545 [VCS_HW] = { 2546 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, 2547 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, 2548 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, 2549 }, 2550 [BCS_HW] = { 2551 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, 2552 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, 2553 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, 2554 }, 2555 [VECS_HW] = { 2556 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, 2557 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, 2558 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, 2559 }, 2560 }; 2561 u32 wait_mbox; 2562 i915_reg_t mbox_reg; 2563 2564 if (i == engine->hw_id) { 2565 wait_mbox = MI_SEMAPHORE_SYNC_INVALID; 2566 mbox_reg = GEN6_NOSYNC; 2567 } else { 2568 wait_mbox = sem_data[engine->hw_id][i].wait_mbox; 2569 mbox_reg = sem_data[engine->hw_id][i].mbox_reg; 2570 } 2571 2572 engine->semaphore.mbox.wait[i] = wait_mbox; 2573 engine->semaphore.mbox.signal[i] = mbox_reg; 2574 } 2575 } 2576 2577 return; 2578 2579 err_obj: 2580 i915_gem_object_put(obj); 2581 err: 2582 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); 2583 i915.semaphores = 0; 2584 } 2585 2586 static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2587 struct intel_engine_cs *engine) 2588 { 2589 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift; 2590 2591 if (INTEL_GEN(dev_priv) >= 8) { 2592 engine->irq_enable = gen8_irq_enable; 2593 engine->irq_disable = gen8_irq_disable; 2594 engine->irq_seqno_barrier = gen6_seqno_barrier; 2595 } else if (INTEL_GEN(dev_priv) >= 6) { 2596 engine->irq_enable = gen6_irq_enable; 2597 engine->irq_disable = gen6_irq_disable; 2598 engine->irq_seqno_barrier = gen6_seqno_barrier; 2599 } else if (INTEL_GEN(dev_priv) >= 5) { 2600 engine->irq_enable = gen5_irq_enable; 2601 engine->irq_disable = gen5_irq_disable; 2602 engine->irq_seqno_barrier = gen5_seqno_barrier; 2603 } else if (INTEL_GEN(dev_priv) >= 3) { 2604 engine->irq_enable = i9xx_irq_enable; 2605 engine->irq_disable = i9xx_irq_disable; 2606 } else { 2607 engine->irq_enable = i8xx_irq_enable; 2608 engine->irq_disable = i8xx_irq_disable; 2609 } 2610 } 2611 2612 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2613 struct intel_engine_cs *engine) 2614 { 2615 intel_ring_init_irq(dev_priv, engine); 2616 intel_ring_init_semaphores(dev_priv, engine); 2617 2618 engine->init_hw = init_ring_common; 2619 engine->reset_hw = reset_ring_common; 2620 2621 engine->emit_request = i9xx_emit_request; 2622 if (i915.semaphores) 2623 engine->emit_request = gen6_sema_emit_request; 2624 engine->submit_request = i9xx_submit_request; 2625 2626 if (INTEL_GEN(dev_priv) >= 8) 2627 engine->emit_bb_start = gen8_emit_bb_start; 2628 else if (INTEL_GEN(dev_priv) >= 6) 2629 engine->emit_bb_start = gen6_emit_bb_start; 2630 else if (INTEL_GEN(dev_priv) >= 4) 2631 engine->emit_bb_start = i965_emit_bb_start; 2632 else if (IS_I830(dev_priv) || IS_845G(dev_priv)) 2633 engine->emit_bb_start = i830_emit_bb_start; 2634 else 2635 engine->emit_bb_start = i915_emit_bb_start; 2636 } 2637 2638 int intel_init_render_ring_buffer(struct intel_engine_cs *engine) 2639 { 2640 struct drm_i915_private *dev_priv = engine->i915; 2641 int ret; 2642 2643 intel_ring_default_vfuncs(dev_priv, engine); 2644 2645 if (HAS_L3_DPF(dev_priv)) 2646 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2647 2648 if (INTEL_GEN(dev_priv) >= 8) { 2649 engine->init_context = intel_rcs_ctx_init; 2650 engine->emit_request = gen8_render_emit_request; 2651 engine->emit_flush = gen8_render_ring_flush; 2652 if (i915.semaphores) 2653 engine->semaphore.signal = gen8_rcs_signal; 2654 } else if (INTEL_GEN(dev_priv) >= 6) { 2655 engine->init_context = intel_rcs_ctx_init; 2656 engine->emit_flush = gen7_render_ring_flush; 2657 if (IS_GEN6(dev_priv)) 2658 engine->emit_flush = gen6_render_ring_flush; 2659 } else if (IS_GEN5(dev_priv)) { 2660 engine->emit_flush = gen4_render_ring_flush; 2661 } else { 2662 if (INTEL_GEN(dev_priv) < 4) 2663 engine->emit_flush = gen2_render_ring_flush; 2664 else 2665 engine->emit_flush = gen4_render_ring_flush; 2666 engine->irq_enable_mask = I915_USER_INTERRUPT; 2667 } 2668 2669 if (IS_HASWELL(dev_priv)) 2670 engine->emit_bb_start = hsw_emit_bb_start; 2671 2672 engine->init_hw = init_render_ring; 2673 engine->cleanup = render_ring_cleanup; 2674 2675 ret = intel_init_ring_buffer(engine); 2676 if (ret) 2677 return ret; 2678 2679 if (INTEL_GEN(dev_priv) >= 6) { 2680 ret = intel_engine_create_scratch(engine, 4096); 2681 if (ret) 2682 return ret; 2683 } else if (HAS_BROKEN_CS_TLB(dev_priv)) { 2684 ret = intel_engine_create_scratch(engine, I830_WA_SIZE); 2685 if (ret) 2686 return ret; 2687 } 2688 2689 return 0; 2690 } 2691 2692 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) 2693 { 2694 struct drm_i915_private *dev_priv = engine->i915; 2695 2696 intel_ring_default_vfuncs(dev_priv, engine); 2697 2698 if (INTEL_GEN(dev_priv) >= 6) { 2699 /* gen6 bsd needs a special wa for tail updates */ 2700 if (IS_GEN6(dev_priv)) 2701 engine->submit_request = gen6_bsd_submit_request; 2702 engine->emit_flush = gen6_bsd_ring_flush; 2703 if (INTEL_GEN(dev_priv) < 8) 2704 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2705 } else { 2706 engine->mmio_base = BSD_RING_BASE; 2707 engine->emit_flush = bsd_ring_flush; 2708 if (IS_GEN5(dev_priv)) 2709 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2710 else 2711 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2712 } 2713 2714 return intel_init_ring_buffer(engine); 2715 } 2716 2717 /** 2718 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 2719 */ 2720 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine) 2721 { 2722 struct drm_i915_private *dev_priv = engine->i915; 2723 2724 intel_ring_default_vfuncs(dev_priv, engine); 2725 2726 engine->emit_flush = gen6_bsd_ring_flush; 2727 2728 return intel_init_ring_buffer(engine); 2729 } 2730 2731 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) 2732 { 2733 struct drm_i915_private *dev_priv = engine->i915; 2734 2735 intel_ring_default_vfuncs(dev_priv, engine); 2736 2737 engine->emit_flush = gen6_ring_flush; 2738 if (INTEL_GEN(dev_priv) < 8) 2739 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2740 2741 return intel_init_ring_buffer(engine); 2742 } 2743 2744 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) 2745 { 2746 struct drm_i915_private *dev_priv = engine->i915; 2747 2748 intel_ring_default_vfuncs(dev_priv, engine); 2749 2750 engine->emit_flush = gen6_ring_flush; 2751 2752 if (INTEL_GEN(dev_priv) < 8) { 2753 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2754 engine->irq_enable = hsw_vebox_irq_enable; 2755 engine->irq_disable = hsw_vebox_irq_disable; 2756 } 2757 2758 return intel_init_ring_buffer(engine); 2759 } 2760