1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 /* Rough estimate of the typical request size, performing a flush, 38 * set-context and then emitting the batch. 39 */ 40 #define LEGACY_REQUEST_SIZE 200 41 42 static unsigned int __intel_ring_space(unsigned int head, 43 unsigned int tail, 44 unsigned int size) 45 { 46 /* 47 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the 48 * same cacheline, the Head Pointer must not be greater than the Tail 49 * Pointer." 50 */ 51 GEM_BUG_ON(!is_power_of_2(size)); 52 return (head - tail - CACHELINE_BYTES) & (size - 1); 53 } 54 55 unsigned int intel_ring_update_space(struct intel_ring *ring) 56 { 57 unsigned int space; 58 59 space = __intel_ring_space(ring->head, ring->emit, ring->size); 60 61 ring->space = space; 62 return space; 63 } 64 65 static int 66 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 67 { 68 u32 cmd, *cs; 69 70 cmd = MI_FLUSH; 71 72 if (mode & EMIT_INVALIDATE) 73 cmd |= MI_READ_FLUSH; 74 75 cs = intel_ring_begin(req, 2); 76 if (IS_ERR(cs)) 77 return PTR_ERR(cs); 78 79 *cs++ = cmd; 80 *cs++ = MI_NOOP; 81 intel_ring_advance(req, cs); 82 83 return 0; 84 } 85 86 static int 87 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 88 { 89 u32 cmd, *cs; 90 91 /* 92 * read/write caches: 93 * 94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 96 * also flushed at 2d versus 3d pipeline switches. 97 * 98 * read-only caches: 99 * 100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 101 * MI_READ_FLUSH is set, and is always flushed on 965. 102 * 103 * I915_GEM_DOMAIN_COMMAND may not exist? 104 * 105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 106 * invalidated when MI_EXE_FLUSH is set. 107 * 108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 109 * invalidated with every MI_FLUSH. 110 * 111 * TLBs: 112 * 113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 116 * are flushed at any MI_FLUSH. 117 */ 118 119 cmd = MI_FLUSH; 120 if (mode & EMIT_INVALIDATE) { 121 cmd |= MI_EXE_FLUSH; 122 if (IS_G4X(req->i915) || IS_GEN5(req->i915)) 123 cmd |= MI_INVALIDATE_ISP; 124 } 125 126 cs = intel_ring_begin(req, 2); 127 if (IS_ERR(cs)) 128 return PTR_ERR(cs); 129 130 *cs++ = cmd; 131 *cs++ = MI_NOOP; 132 intel_ring_advance(req, cs); 133 134 return 0; 135 } 136 137 /** 138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 139 * implementing two workarounds on gen6. From section 1.4.7.1 140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 141 * 142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 143 * produced by non-pipelined state commands), software needs to first 144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 145 * 0. 146 * 147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 149 * 150 * And the workaround for these two requires this workaround first: 151 * 152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 153 * BEFORE the pipe-control with a post-sync op and no write-cache 154 * flushes. 155 * 156 * And this last workaround is tricky because of the requirements on 157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 158 * volume 2 part 1: 159 * 160 * "1 of the following must also be set: 161 * - Render Target Cache Flush Enable ([12] of DW1) 162 * - Depth Cache Flush Enable ([0] of DW1) 163 * - Stall at Pixel Scoreboard ([1] of DW1) 164 * - Depth Stall ([13] of DW1) 165 * - Post-Sync Operation ([13] of DW1) 166 * - Notify Enable ([8] of DW1)" 167 * 168 * The cache flushes require the workaround flush that triggered this 169 * one, so we can't use it. Depth stall would trigger the same. 170 * Post-sync nonzero is what triggered this second workaround, so we 171 * can't use that one either. Notify enable is IRQs, which aren't 172 * really our business. That leaves only stall at scoreboard. 173 */ 174 static int 175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 176 { 177 u32 scratch_addr = 178 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 179 u32 *cs; 180 181 cs = intel_ring_begin(req, 6); 182 if (IS_ERR(cs)) 183 return PTR_ERR(cs); 184 185 *cs++ = GFX_OP_PIPE_CONTROL(5); 186 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 187 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 188 *cs++ = 0; /* low dword */ 189 *cs++ = 0; /* high dword */ 190 *cs++ = MI_NOOP; 191 intel_ring_advance(req, cs); 192 193 cs = intel_ring_begin(req, 6); 194 if (IS_ERR(cs)) 195 return PTR_ERR(cs); 196 197 *cs++ = GFX_OP_PIPE_CONTROL(5); 198 *cs++ = PIPE_CONTROL_QW_WRITE; 199 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 200 *cs++ = 0; 201 *cs++ = 0; 202 *cs++ = MI_NOOP; 203 intel_ring_advance(req, cs); 204 205 return 0; 206 } 207 208 static int 209 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 210 { 211 u32 scratch_addr = 212 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 213 u32 *cs, flags = 0; 214 int ret; 215 216 /* Force SNB workarounds for PIPE_CONTROL flushes */ 217 ret = intel_emit_post_sync_nonzero_flush(req); 218 if (ret) 219 return ret; 220 221 /* Just flush everything. Experiments have shown that reducing the 222 * number of bits based on the write domains has little performance 223 * impact. 224 */ 225 if (mode & EMIT_FLUSH) { 226 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 227 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 228 /* 229 * Ensure that any following seqno writes only happen 230 * when the render cache is indeed flushed. 231 */ 232 flags |= PIPE_CONTROL_CS_STALL; 233 } 234 if (mode & EMIT_INVALIDATE) { 235 flags |= PIPE_CONTROL_TLB_INVALIDATE; 236 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 237 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 238 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 239 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 240 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 241 /* 242 * TLB invalidate requires a post-sync write. 243 */ 244 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 245 } 246 247 cs = intel_ring_begin(req, 4); 248 if (IS_ERR(cs)) 249 return PTR_ERR(cs); 250 251 *cs++ = GFX_OP_PIPE_CONTROL(4); 252 *cs++ = flags; 253 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 254 *cs++ = 0; 255 intel_ring_advance(req, cs); 256 257 return 0; 258 } 259 260 static int 261 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 262 { 263 u32 *cs; 264 265 cs = intel_ring_begin(req, 4); 266 if (IS_ERR(cs)) 267 return PTR_ERR(cs); 268 269 *cs++ = GFX_OP_PIPE_CONTROL(4); 270 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 271 *cs++ = 0; 272 *cs++ = 0; 273 intel_ring_advance(req, cs); 274 275 return 0; 276 } 277 278 static int 279 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 280 { 281 u32 scratch_addr = 282 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 283 u32 *cs, flags = 0; 284 285 /* 286 * Ensure that any following seqno writes only happen when the render 287 * cache is indeed flushed. 288 * 289 * Workaround: 4th PIPE_CONTROL command (except the ones with only 290 * read-cache invalidate bits set) must have the CS_STALL bit set. We 291 * don't try to be clever and just set it unconditionally. 292 */ 293 flags |= PIPE_CONTROL_CS_STALL; 294 295 /* Just flush everything. Experiments have shown that reducing the 296 * number of bits based on the write domains has little performance 297 * impact. 298 */ 299 if (mode & EMIT_FLUSH) { 300 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 301 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 302 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 303 flags |= PIPE_CONTROL_FLUSH_ENABLE; 304 } 305 if (mode & EMIT_INVALIDATE) { 306 flags |= PIPE_CONTROL_TLB_INVALIDATE; 307 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 308 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 309 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 310 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 311 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 312 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 313 /* 314 * TLB invalidate requires a post-sync write. 315 */ 316 flags |= PIPE_CONTROL_QW_WRITE; 317 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 318 319 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 320 321 /* Workaround: we must issue a pipe_control with CS-stall bit 322 * set before a pipe_control command that has the state cache 323 * invalidate bit set. */ 324 gen7_render_ring_cs_stall_wa(req); 325 } 326 327 cs = intel_ring_begin(req, 4); 328 if (IS_ERR(cs)) 329 return PTR_ERR(cs); 330 331 *cs++ = GFX_OP_PIPE_CONTROL(4); 332 *cs++ = flags; 333 *cs++ = scratch_addr; 334 *cs++ = 0; 335 intel_ring_advance(req, cs); 336 337 return 0; 338 } 339 340 static int 341 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 342 { 343 u32 flags; 344 u32 *cs; 345 346 cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6); 347 if (IS_ERR(cs)) 348 return PTR_ERR(cs); 349 350 flags = PIPE_CONTROL_CS_STALL; 351 352 if (mode & EMIT_FLUSH) { 353 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 354 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 355 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 356 flags |= PIPE_CONTROL_FLUSH_ENABLE; 357 } 358 if (mode & EMIT_INVALIDATE) { 359 flags |= PIPE_CONTROL_TLB_INVALIDATE; 360 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 361 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 365 flags |= PIPE_CONTROL_QW_WRITE; 366 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 367 368 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 369 cs = gen8_emit_pipe_control(cs, 370 PIPE_CONTROL_CS_STALL | 371 PIPE_CONTROL_STALL_AT_SCOREBOARD, 372 0); 373 } 374 375 cs = gen8_emit_pipe_control(cs, flags, 376 i915_ggtt_offset(req->engine->scratch) + 377 2 * CACHELINE_BYTES); 378 379 intel_ring_advance(req, cs); 380 381 return 0; 382 } 383 384 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 385 { 386 struct drm_i915_private *dev_priv = engine->i915; 387 u32 addr; 388 389 addr = dev_priv->status_page_dmah->busaddr; 390 if (INTEL_GEN(dev_priv) >= 4) 391 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 392 I915_WRITE(HWS_PGA, addr); 393 } 394 395 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 396 { 397 struct drm_i915_private *dev_priv = engine->i915; 398 i915_reg_t mmio; 399 400 /* The ring status page addresses are no longer next to the rest of 401 * the ring registers as of gen7. 402 */ 403 if (IS_GEN7(dev_priv)) { 404 switch (engine->id) { 405 /* 406 * No more rings exist on Gen7. Default case is only to shut up 407 * gcc switch check warning. 408 */ 409 default: 410 GEM_BUG_ON(engine->id); 411 case RCS: 412 mmio = RENDER_HWS_PGA_GEN7; 413 break; 414 case BCS: 415 mmio = BLT_HWS_PGA_GEN7; 416 break; 417 case VCS: 418 mmio = BSD_HWS_PGA_GEN7; 419 break; 420 case VECS: 421 mmio = VEBOX_HWS_PGA_GEN7; 422 break; 423 } 424 } else if (IS_GEN6(dev_priv)) { 425 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 426 } else { 427 /* XXX: gen8 returns to sanity */ 428 mmio = RING_HWS_PGA(engine->mmio_base); 429 } 430 431 if (INTEL_GEN(dev_priv) >= 6) 432 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 433 434 I915_WRITE(mmio, engine->status_page.ggtt_offset); 435 POSTING_READ(mmio); 436 437 /* 438 * Flush the TLB for this page 439 * 440 * FIXME: These two bits have disappeared on gen8, so a question 441 * arises: do we still need this and if so how should we go about 442 * invalidating the TLB? 443 */ 444 if (IS_GEN(dev_priv, 6, 7)) { 445 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 446 447 /* ring should be idle before issuing a sync flush*/ 448 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 449 450 I915_WRITE(reg, 451 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 452 INSTPM_SYNC_FLUSH)); 453 if (intel_wait_for_register(dev_priv, 454 reg, INSTPM_SYNC_FLUSH, 0, 455 1000)) 456 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 457 engine->name); 458 } 459 } 460 461 static bool stop_ring(struct intel_engine_cs *engine) 462 { 463 struct drm_i915_private *dev_priv = engine->i915; 464 465 if (INTEL_GEN(dev_priv) > 2) { 466 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 467 if (intel_wait_for_register(dev_priv, 468 RING_MI_MODE(engine->mmio_base), 469 MODE_IDLE, 470 MODE_IDLE, 471 1000)) { 472 DRM_ERROR("%s : timed out trying to stop ring\n", 473 engine->name); 474 /* Sometimes we observe that the idle flag is not 475 * set even though the ring is empty. So double 476 * check before giving up. 477 */ 478 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 479 return false; 480 } 481 } 482 483 I915_WRITE_CTL(engine, 0); 484 I915_WRITE_HEAD(engine, 0); 485 I915_WRITE_TAIL(engine, 0); 486 487 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 488 } 489 490 static int init_ring_common(struct intel_engine_cs *engine) 491 { 492 struct drm_i915_private *dev_priv = engine->i915; 493 struct intel_ring *ring = engine->buffer; 494 int ret = 0; 495 496 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 497 498 if (!stop_ring(engine)) { 499 /* G45 ring initialization often fails to reset head to zero */ 500 DRM_DEBUG_KMS("%s head not reset to zero " 501 "ctl %08x head %08x tail %08x start %08x\n", 502 engine->name, 503 I915_READ_CTL(engine), 504 I915_READ_HEAD(engine), 505 I915_READ_TAIL(engine), 506 I915_READ_START(engine)); 507 508 if (!stop_ring(engine)) { 509 DRM_ERROR("failed to set %s head to zero " 510 "ctl %08x head %08x tail %08x start %08x\n", 511 engine->name, 512 I915_READ_CTL(engine), 513 I915_READ_HEAD(engine), 514 I915_READ_TAIL(engine), 515 I915_READ_START(engine)); 516 ret = -EIO; 517 goto out; 518 } 519 } 520 521 if (HWS_NEEDS_PHYSICAL(dev_priv)) 522 ring_setup_phys_status_page(engine); 523 else 524 intel_ring_setup_status_page(engine); 525 526 intel_engine_reset_breadcrumbs(engine); 527 528 /* Enforce ordering by reading HEAD register back */ 529 I915_READ_HEAD(engine); 530 531 /* Initialize the ring. This must happen _after_ we've cleared the ring 532 * registers with the above sequence (the readback of the HEAD registers 533 * also enforces ordering), otherwise the hw might lose the new ring 534 * register values. */ 535 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); 536 537 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 538 if (I915_READ_HEAD(engine)) 539 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 540 engine->name, I915_READ_HEAD(engine)); 541 542 intel_ring_update_space(ring); 543 I915_WRITE_HEAD(engine, ring->head); 544 I915_WRITE_TAIL(engine, ring->tail); 545 (void)I915_READ_TAIL(engine); 546 547 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); 548 549 /* If the head is still not zero, the ring is dead */ 550 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base), 551 RING_VALID, RING_VALID, 552 50)) { 553 DRM_ERROR("%s initialization failed " 554 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 555 engine->name, 556 I915_READ_CTL(engine), 557 I915_READ_CTL(engine) & RING_VALID, 558 I915_READ_HEAD(engine), ring->head, 559 I915_READ_TAIL(engine), ring->tail, 560 I915_READ_START(engine), 561 i915_ggtt_offset(ring->vma)); 562 ret = -EIO; 563 goto out; 564 } 565 566 intel_engine_init_hangcheck(engine); 567 568 if (INTEL_GEN(dev_priv) > 2) 569 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 570 571 out: 572 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 573 574 return ret; 575 } 576 577 static void reset_ring_common(struct intel_engine_cs *engine, 578 struct drm_i915_gem_request *request) 579 { 580 /* 581 * RC6 must be prevented until the reset is complete and the engine 582 * reinitialised. If it occurs in the middle of this sequence, the 583 * state written to/loaded from the power context is ill-defined (e.g. 584 * the PP_BASE_DIR may be lost). 585 */ 586 assert_forcewakes_active(engine->i915, FORCEWAKE_ALL); 587 588 /* 589 * Try to restore the logical GPU state to match the continuation 590 * of the request queue. If we skip the context/PD restore, then 591 * the next request may try to execute assuming that its context 592 * is valid and loaded on the GPU and so may try to access invalid 593 * memory, prompting repeated GPU hangs. 594 * 595 * If the request was guilty, we still restore the logical state 596 * in case the next request requires it (e.g. the aliasing ppgtt), 597 * but skip over the hung batch. 598 * 599 * If the request was innocent, we try to replay the request with 600 * the restored context. 601 */ 602 if (request) { 603 struct drm_i915_private *dev_priv = request->i915; 604 struct intel_context *ce = &request->ctx->engine[engine->id]; 605 struct i915_hw_ppgtt *ppgtt; 606 607 /* FIXME consider gen8 reset */ 608 609 if (ce->state) { 610 I915_WRITE(CCID, 611 i915_ggtt_offset(ce->state) | 612 BIT(8) /* must be set! */ | 613 CCID_EXTENDED_STATE_SAVE | 614 CCID_EXTENDED_STATE_RESTORE | 615 CCID_EN); 616 } 617 618 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; 619 if (ppgtt) { 620 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; 621 622 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 623 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); 624 625 /* Wait for the PD reload to complete */ 626 if (intel_wait_for_register(dev_priv, 627 RING_PP_DIR_BASE(engine), 628 BIT(0), 0, 629 10)) 630 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); 631 632 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 633 } 634 635 /* If the rq hung, jump to its breadcrumb and skip the batch */ 636 if (request->fence.error == -EIO) 637 request->ring->head = request->postfix; 638 } else { 639 engine->legacy_active_context = NULL; 640 } 641 } 642 643 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 644 { 645 int ret; 646 647 ret = intel_ring_workarounds_emit(req); 648 if (ret != 0) 649 return ret; 650 651 ret = i915_gem_render_state_emit(req); 652 if (ret) 653 return ret; 654 655 return 0; 656 } 657 658 static int init_render_ring(struct intel_engine_cs *engine) 659 { 660 struct drm_i915_private *dev_priv = engine->i915; 661 int ret = init_ring_common(engine); 662 if (ret) 663 return ret; 664 665 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 666 if (IS_GEN(dev_priv, 4, 6)) 667 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 668 669 /* We need to disable the AsyncFlip performance optimisations in order 670 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 671 * programmed to '1' on all products. 672 * 673 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 674 */ 675 if (IS_GEN(dev_priv, 6, 7)) 676 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 677 678 /* Required for the hardware to program scanline values for waiting */ 679 /* WaEnableFlushTlbInvalidationMode:snb */ 680 if (IS_GEN6(dev_priv)) 681 I915_WRITE(GFX_MODE, 682 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 683 684 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 685 if (IS_GEN7(dev_priv)) 686 I915_WRITE(GFX_MODE_GEN7, 687 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 688 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 689 690 if (IS_GEN6(dev_priv)) { 691 /* From the Sandybridge PRM, volume 1 part 3, page 24: 692 * "If this bit is set, STCunit will have LRA as replacement 693 * policy. [...] This bit must be reset. LRA replacement 694 * policy is not supported." 695 */ 696 I915_WRITE(CACHE_MODE_0, 697 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 698 } 699 700 if (IS_GEN(dev_priv, 6, 7)) 701 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 702 703 if (INTEL_INFO(dev_priv)->gen >= 6) 704 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 705 706 return init_workarounds_ring(engine); 707 } 708 709 static void render_ring_cleanup(struct intel_engine_cs *engine) 710 { 711 struct drm_i915_private *dev_priv = engine->i915; 712 713 i915_vma_unpin_and_release(&dev_priv->semaphore); 714 } 715 716 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs) 717 { 718 struct drm_i915_private *dev_priv = req->i915; 719 struct intel_engine_cs *waiter; 720 enum intel_engine_id id; 721 722 for_each_engine(waiter, dev_priv, id) { 723 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 724 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 725 continue; 726 727 *cs++ = GFX_OP_PIPE_CONTROL(6); 728 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | 729 PIPE_CONTROL_CS_STALL; 730 *cs++ = lower_32_bits(gtt_offset); 731 *cs++ = upper_32_bits(gtt_offset); 732 *cs++ = req->global_seqno; 733 *cs++ = 0; 734 *cs++ = MI_SEMAPHORE_SIGNAL | 735 MI_SEMAPHORE_TARGET(waiter->hw_id); 736 *cs++ = 0; 737 } 738 739 return cs; 740 } 741 742 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs) 743 { 744 struct drm_i915_private *dev_priv = req->i915; 745 struct intel_engine_cs *waiter; 746 enum intel_engine_id id; 747 748 for_each_engine(waiter, dev_priv, id) { 749 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; 750 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 751 continue; 752 753 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; 754 *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT; 755 *cs++ = upper_32_bits(gtt_offset); 756 *cs++ = req->global_seqno; 757 *cs++ = MI_SEMAPHORE_SIGNAL | 758 MI_SEMAPHORE_TARGET(waiter->hw_id); 759 *cs++ = 0; 760 } 761 762 return cs; 763 } 764 765 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) 766 { 767 struct drm_i915_private *dev_priv = req->i915; 768 struct intel_engine_cs *engine; 769 enum intel_engine_id id; 770 int num_rings = 0; 771 772 for_each_engine(engine, dev_priv, id) { 773 i915_reg_t mbox_reg; 774 775 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) 776 continue; 777 778 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; 779 if (i915_mmio_reg_valid(mbox_reg)) { 780 *cs++ = MI_LOAD_REGISTER_IMM(1); 781 *cs++ = i915_mmio_reg_offset(mbox_reg); 782 *cs++ = req->global_seqno; 783 num_rings++; 784 } 785 } 786 if (num_rings & 1) 787 *cs++ = MI_NOOP; 788 789 return cs; 790 } 791 792 static void cancel_requests(struct intel_engine_cs *engine) 793 { 794 struct drm_i915_gem_request *request; 795 unsigned long flags; 796 797 spin_lock_irqsave(&engine->timeline->lock, flags); 798 799 /* Mark all submitted requests as skipped. */ 800 list_for_each_entry(request, &engine->timeline->requests, link) { 801 GEM_BUG_ON(!request->global_seqno); 802 if (!i915_gem_request_completed(request)) 803 dma_fence_set_error(&request->fence, -EIO); 804 } 805 /* Remaining _unready_ requests will be nop'ed when submitted */ 806 807 spin_unlock_irqrestore(&engine->timeline->lock, flags); 808 } 809 810 static void i9xx_submit_request(struct drm_i915_gem_request *request) 811 { 812 struct drm_i915_private *dev_priv = request->i915; 813 814 i915_gem_request_submit(request); 815 816 I915_WRITE_TAIL(request->engine, 817 intel_ring_set_tail(request->ring, request->tail)); 818 } 819 820 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 821 { 822 *cs++ = MI_STORE_DWORD_INDEX; 823 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; 824 *cs++ = req->global_seqno; 825 *cs++ = MI_USER_INTERRUPT; 826 827 req->tail = intel_ring_offset(req, cs); 828 assert_ring_tail_valid(req->ring, req->tail); 829 } 830 831 static const int i9xx_emit_breadcrumb_sz = 4; 832 833 /** 834 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers 835 * 836 * @request - request to write to the ring 837 * 838 * Update the mailbox registers in the *other* rings with the current seqno. 839 * This acts like a signal in the canonical semaphore. 840 */ 841 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 842 { 843 return i9xx_emit_breadcrumb(req, 844 req->engine->semaphore.signal(req, cs)); 845 } 846 847 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req, 848 u32 *cs) 849 { 850 struct intel_engine_cs *engine = req->engine; 851 852 if (engine->semaphore.signal) 853 cs = engine->semaphore.signal(req, cs); 854 855 *cs++ = GFX_OP_PIPE_CONTROL(6); 856 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | 857 PIPE_CONTROL_QW_WRITE; 858 *cs++ = intel_hws_seqno_address(engine); 859 *cs++ = 0; 860 *cs++ = req->global_seqno; 861 /* We're thrashing one dword of HWS. */ 862 *cs++ = 0; 863 *cs++ = MI_USER_INTERRUPT; 864 *cs++ = MI_NOOP; 865 866 req->tail = intel_ring_offset(req, cs); 867 assert_ring_tail_valid(req->ring, req->tail); 868 } 869 870 static const int gen8_render_emit_breadcrumb_sz = 8; 871 872 /** 873 * intel_ring_sync - sync the waiter to the signaller on seqno 874 * 875 * @waiter - ring that is waiting 876 * @signaller - ring which has, or will signal 877 * @seqno - seqno which the waiter will block on 878 */ 879 880 static int 881 gen8_ring_sync_to(struct drm_i915_gem_request *req, 882 struct drm_i915_gem_request *signal) 883 { 884 struct drm_i915_private *dev_priv = req->i915; 885 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id); 886 struct i915_hw_ppgtt *ppgtt; 887 u32 *cs; 888 889 cs = intel_ring_begin(req, 4); 890 if (IS_ERR(cs)) 891 return PTR_ERR(cs); 892 893 *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | 894 MI_SEMAPHORE_SAD_GTE_SDD; 895 *cs++ = signal->global_seqno; 896 *cs++ = lower_32_bits(offset); 897 *cs++ = upper_32_bits(offset); 898 intel_ring_advance(req, cs); 899 900 /* When the !RCS engines idle waiting upon a semaphore, they lose their 901 * pagetables and we must reload them before executing the batch. 902 * We do this on the i915_switch_context() following the wait and 903 * before the dispatch. 904 */ 905 ppgtt = req->ctx->ppgtt; 906 if (ppgtt && req->engine->id != RCS) 907 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine); 908 return 0; 909 } 910 911 static int 912 gen6_ring_sync_to(struct drm_i915_gem_request *req, 913 struct drm_i915_gem_request *signal) 914 { 915 u32 dw1 = MI_SEMAPHORE_MBOX | 916 MI_SEMAPHORE_COMPARE | 917 MI_SEMAPHORE_REGISTER; 918 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; 919 u32 *cs; 920 921 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 922 923 cs = intel_ring_begin(req, 4); 924 if (IS_ERR(cs)) 925 return PTR_ERR(cs); 926 927 *cs++ = dw1 | wait_mbox; 928 /* Throughout all of the GEM code, seqno passed implies our current 929 * seqno is >= the last seqno executed. However for hardware the 930 * comparison is strictly greater than. 931 */ 932 *cs++ = signal->global_seqno - 1; 933 *cs++ = 0; 934 *cs++ = MI_NOOP; 935 intel_ring_advance(req, cs); 936 937 return 0; 938 } 939 940 static void 941 gen5_seqno_barrier(struct intel_engine_cs *engine) 942 { 943 /* MI_STORE are internally buffered by the GPU and not flushed 944 * either by MI_FLUSH or SyncFlush or any other combination of 945 * MI commands. 946 * 947 * "Only the submission of the store operation is guaranteed. 948 * The write result will be complete (coherent) some time later 949 * (this is practically a finite period but there is no guaranteed 950 * latency)." 951 * 952 * Empirically, we observe that we need a delay of at least 75us to 953 * be sure that the seqno write is visible by the CPU. 954 */ 955 usleep_range(125, 250); 956 } 957 958 static void 959 gen6_seqno_barrier(struct intel_engine_cs *engine) 960 { 961 struct drm_i915_private *dev_priv = engine->i915; 962 963 /* Workaround to force correct ordering between irq and seqno writes on 964 * ivb (and maybe also on snb) by reading from a CS register (like 965 * ACTHD) before reading the status page. 966 * 967 * Note that this effectively stalls the read by the time it takes to 968 * do a memory transaction, which more or less ensures that the write 969 * from the GPU has sufficient time to invalidate the CPU cacheline. 970 * Alternatively we could delay the interrupt from the CS ring to give 971 * the write time to land, but that would incur a delay after every 972 * batch i.e. much more frequent than a delay when waiting for the 973 * interrupt (with the same net latency). 974 * 975 * Also note that to prevent whole machine hangs on gen7, we have to 976 * take the spinlock to guard against concurrent cacheline access. 977 */ 978 spin_lock_irq(&dev_priv->uncore.lock); 979 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 980 spin_unlock_irq(&dev_priv->uncore.lock); 981 } 982 983 static void 984 gen5_irq_enable(struct intel_engine_cs *engine) 985 { 986 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); 987 } 988 989 static void 990 gen5_irq_disable(struct intel_engine_cs *engine) 991 { 992 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); 993 } 994 995 static void 996 i9xx_irq_enable(struct intel_engine_cs *engine) 997 { 998 struct drm_i915_private *dev_priv = engine->i915; 999 1000 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1001 I915_WRITE(IMR, dev_priv->irq_mask); 1002 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1003 } 1004 1005 static void 1006 i9xx_irq_disable(struct intel_engine_cs *engine) 1007 { 1008 struct drm_i915_private *dev_priv = engine->i915; 1009 1010 dev_priv->irq_mask |= engine->irq_enable_mask; 1011 I915_WRITE(IMR, dev_priv->irq_mask); 1012 } 1013 1014 static void 1015 i8xx_irq_enable(struct intel_engine_cs *engine) 1016 { 1017 struct drm_i915_private *dev_priv = engine->i915; 1018 1019 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1020 I915_WRITE16(IMR, dev_priv->irq_mask); 1021 POSTING_READ16(RING_IMR(engine->mmio_base)); 1022 } 1023 1024 static void 1025 i8xx_irq_disable(struct intel_engine_cs *engine) 1026 { 1027 struct drm_i915_private *dev_priv = engine->i915; 1028 1029 dev_priv->irq_mask |= engine->irq_enable_mask; 1030 I915_WRITE16(IMR, dev_priv->irq_mask); 1031 } 1032 1033 static int 1034 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1035 { 1036 u32 *cs; 1037 1038 cs = intel_ring_begin(req, 2); 1039 if (IS_ERR(cs)) 1040 return PTR_ERR(cs); 1041 1042 *cs++ = MI_FLUSH; 1043 *cs++ = MI_NOOP; 1044 intel_ring_advance(req, cs); 1045 return 0; 1046 } 1047 1048 static void 1049 gen6_irq_enable(struct intel_engine_cs *engine) 1050 { 1051 struct drm_i915_private *dev_priv = engine->i915; 1052 1053 I915_WRITE_IMR(engine, 1054 ~(engine->irq_enable_mask | 1055 engine->irq_keep_mask)); 1056 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1057 } 1058 1059 static void 1060 gen6_irq_disable(struct intel_engine_cs *engine) 1061 { 1062 struct drm_i915_private *dev_priv = engine->i915; 1063 1064 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1065 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1066 } 1067 1068 static void 1069 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1070 { 1071 struct drm_i915_private *dev_priv = engine->i915; 1072 1073 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1074 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); 1075 } 1076 1077 static void 1078 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1079 { 1080 struct drm_i915_private *dev_priv = engine->i915; 1081 1082 I915_WRITE_IMR(engine, ~0); 1083 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask); 1084 } 1085 1086 static void 1087 gen8_irq_enable(struct intel_engine_cs *engine) 1088 { 1089 struct drm_i915_private *dev_priv = engine->i915; 1090 1091 I915_WRITE_IMR(engine, 1092 ~(engine->irq_enable_mask | 1093 engine->irq_keep_mask)); 1094 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1095 } 1096 1097 static void 1098 gen8_irq_disable(struct intel_engine_cs *engine) 1099 { 1100 struct drm_i915_private *dev_priv = engine->i915; 1101 1102 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1103 } 1104 1105 static int 1106 i965_emit_bb_start(struct drm_i915_gem_request *req, 1107 u64 offset, u32 length, 1108 unsigned int dispatch_flags) 1109 { 1110 u32 *cs; 1111 1112 cs = intel_ring_begin(req, 2); 1113 if (IS_ERR(cs)) 1114 return PTR_ERR(cs); 1115 1116 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 1117 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 1118 *cs++ = offset; 1119 intel_ring_advance(req, cs); 1120 1121 return 0; 1122 } 1123 1124 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1125 #define I830_BATCH_LIMIT (256*1024) 1126 #define I830_TLB_ENTRIES (2) 1127 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1128 static int 1129 i830_emit_bb_start(struct drm_i915_gem_request *req, 1130 u64 offset, u32 len, 1131 unsigned int dispatch_flags) 1132 { 1133 u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch); 1134 1135 cs = intel_ring_begin(req, 6); 1136 if (IS_ERR(cs)) 1137 return PTR_ERR(cs); 1138 1139 /* Evict the invalid PTE TLBs */ 1140 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; 1141 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; 1142 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ 1143 *cs++ = cs_offset; 1144 *cs++ = 0xdeadbeef; 1145 *cs++ = MI_NOOP; 1146 intel_ring_advance(req, cs); 1147 1148 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1149 if (len > I830_BATCH_LIMIT) 1150 return -ENOSPC; 1151 1152 cs = intel_ring_begin(req, 6 + 2); 1153 if (IS_ERR(cs)) 1154 return PTR_ERR(cs); 1155 1156 /* Blit the batch (which has now all relocs applied) to the 1157 * stable batch scratch bo area (so that the CS never 1158 * stumbles over its tlb invalidation bug) ... 1159 */ 1160 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA; 1161 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; 1162 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; 1163 *cs++ = cs_offset; 1164 *cs++ = 4096; 1165 *cs++ = offset; 1166 1167 *cs++ = MI_FLUSH; 1168 *cs++ = MI_NOOP; 1169 intel_ring_advance(req, cs); 1170 1171 /* ... and execute it. */ 1172 offset = cs_offset; 1173 } 1174 1175 cs = intel_ring_begin(req, 2); 1176 if (IS_ERR(cs)) 1177 return PTR_ERR(cs); 1178 1179 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1180 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1181 MI_BATCH_NON_SECURE); 1182 intel_ring_advance(req, cs); 1183 1184 return 0; 1185 } 1186 1187 static int 1188 i915_emit_bb_start(struct drm_i915_gem_request *req, 1189 u64 offset, u32 len, 1190 unsigned int dispatch_flags) 1191 { 1192 u32 *cs; 1193 1194 cs = intel_ring_begin(req, 2); 1195 if (IS_ERR(cs)) 1196 return PTR_ERR(cs); 1197 1198 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1199 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1200 MI_BATCH_NON_SECURE); 1201 intel_ring_advance(req, cs); 1202 1203 return 0; 1204 } 1205 1206 1207 1208 int intel_ring_pin(struct intel_ring *ring, 1209 struct drm_i915_private *i915, 1210 unsigned int offset_bias) 1211 { 1212 enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; 1213 struct i915_vma *vma = ring->vma; 1214 unsigned int flags; 1215 void *addr; 1216 int ret; 1217 1218 GEM_BUG_ON(ring->vaddr); 1219 1220 1221 flags = PIN_GLOBAL; 1222 if (offset_bias) 1223 flags |= PIN_OFFSET_BIAS | offset_bias; 1224 if (vma->obj->stolen) 1225 flags |= PIN_MAPPABLE; 1226 1227 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1228 if (flags & PIN_MAPPABLE || map == I915_MAP_WC) 1229 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1230 else 1231 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); 1232 if (unlikely(ret)) 1233 return ret; 1234 } 1235 1236 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); 1237 if (unlikely(ret)) 1238 return ret; 1239 1240 if (i915_vma_is_map_and_fenceable(vma)) 1241 addr = (void __force *)i915_vma_pin_iomap(vma); 1242 else 1243 addr = i915_gem_object_pin_map(vma->obj, map); 1244 if (IS_ERR(addr)) 1245 goto err; 1246 1247 vma->obj->pin_global++; 1248 1249 ring->vaddr = addr; 1250 return 0; 1251 1252 err: 1253 i915_vma_unpin(vma); 1254 return PTR_ERR(addr); 1255 } 1256 1257 void intel_ring_reset(struct intel_ring *ring, u32 tail) 1258 { 1259 GEM_BUG_ON(!list_empty(&ring->request_list)); 1260 ring->tail = tail; 1261 ring->head = tail; 1262 ring->emit = tail; 1263 intel_ring_update_space(ring); 1264 } 1265 1266 void intel_ring_unpin(struct intel_ring *ring) 1267 { 1268 GEM_BUG_ON(!ring->vma); 1269 GEM_BUG_ON(!ring->vaddr); 1270 1271 /* Discard any unused bytes beyond that submitted to hw. */ 1272 intel_ring_reset(ring, ring->tail); 1273 1274 if (i915_vma_is_map_and_fenceable(ring->vma)) 1275 i915_vma_unpin_iomap(ring->vma); 1276 else 1277 i915_gem_object_unpin_map(ring->vma->obj); 1278 ring->vaddr = NULL; 1279 1280 ring->vma->obj->pin_global--; 1281 i915_vma_unpin(ring->vma); 1282 } 1283 1284 static struct i915_vma * 1285 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) 1286 { 1287 struct drm_i915_gem_object *obj; 1288 struct i915_vma *vma; 1289 1290 obj = i915_gem_object_create_stolen(dev_priv, size); 1291 if (!obj) 1292 obj = i915_gem_object_create_internal(dev_priv, size); 1293 if (IS_ERR(obj)) 1294 return ERR_CAST(obj); 1295 1296 /* mark ring buffers as read-only from GPU side by default */ 1297 obj->gt_ro = 1; 1298 1299 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); 1300 if (IS_ERR(vma)) 1301 goto err; 1302 1303 return vma; 1304 1305 err: 1306 i915_gem_object_put(obj); 1307 return vma; 1308 } 1309 1310 struct intel_ring * 1311 intel_engine_create_ring(struct intel_engine_cs *engine, int size) 1312 { 1313 struct intel_ring *ring; 1314 struct i915_vma *vma; 1315 1316 GEM_BUG_ON(!is_power_of_2(size)); 1317 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 1318 1319 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1320 if (!ring) 1321 return ERR_PTR(-ENOMEM); 1322 1323 INIT_LIST_HEAD(&ring->request_list); 1324 1325 ring->size = size; 1326 /* Workaround an erratum on the i830 which causes a hang if 1327 * the TAIL pointer points to within the last 2 cachelines 1328 * of the buffer. 1329 */ 1330 ring->effective_size = size; 1331 if (IS_I830(engine->i915) || IS_I845G(engine->i915)) 1332 ring->effective_size -= 2 * CACHELINE_BYTES; 1333 1334 intel_ring_update_space(ring); 1335 1336 vma = intel_ring_create_vma(engine->i915, size); 1337 if (IS_ERR(vma)) { 1338 kfree(ring); 1339 return ERR_CAST(vma); 1340 } 1341 ring->vma = vma; 1342 1343 return ring; 1344 } 1345 1346 void 1347 intel_ring_free(struct intel_ring *ring) 1348 { 1349 struct drm_i915_gem_object *obj = ring->vma->obj; 1350 1351 i915_vma_close(ring->vma); 1352 __i915_gem_object_release_unless_active(obj); 1353 1354 kfree(ring); 1355 } 1356 1357 static int context_pin(struct i915_gem_context *ctx) 1358 { 1359 struct i915_vma *vma = ctx->engine[RCS].state; 1360 int ret; 1361 1362 /* Clear this page out of any CPU caches for coherent swap-in/out. 1363 * We only want to do this on the first bind so that we do not stall 1364 * on an active context (which by nature is already on the GPU). 1365 */ 1366 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1367 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); 1368 if (ret) 1369 return ret; 1370 } 1371 1372 return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT, 1373 PIN_GLOBAL | PIN_HIGH); 1374 } 1375 1376 static struct i915_vma * 1377 alloc_context_vma(struct intel_engine_cs *engine) 1378 { 1379 struct drm_i915_private *i915 = engine->i915; 1380 struct drm_i915_gem_object *obj; 1381 struct i915_vma *vma; 1382 1383 obj = i915_gem_object_create(i915, engine->context_size); 1384 if (IS_ERR(obj)) 1385 return ERR_CAST(obj); 1386 1387 /* 1388 * Try to make the context utilize L3 as well as LLC. 1389 * 1390 * On VLV we don't have L3 controls in the PTEs so we 1391 * shouldn't touch the cache level, especially as that 1392 * would make the object snooped which might have a 1393 * negative performance impact. 1394 * 1395 * Snooping is required on non-llc platforms in execlist 1396 * mode, but since all GGTT accesses use PAT entry 0 we 1397 * get snooping anyway regardless of cache_level. 1398 * 1399 * This is only applicable for Ivy Bridge devices since 1400 * later platforms don't have L3 control bits in the PTE. 1401 */ 1402 if (IS_IVYBRIDGE(i915)) { 1403 /* Ignore any error, regard it as a simple optimisation */ 1404 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 1405 } 1406 1407 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); 1408 if (IS_ERR(vma)) 1409 i915_gem_object_put(obj); 1410 1411 return vma; 1412 } 1413 1414 static struct intel_ring * 1415 intel_ring_context_pin(struct intel_engine_cs *engine, 1416 struct i915_gem_context *ctx) 1417 { 1418 struct intel_context *ce = &ctx->engine[engine->id]; 1419 int ret; 1420 1421 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1422 1423 if (likely(ce->pin_count++)) 1424 goto out; 1425 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ 1426 1427 if (!ce->state && engine->context_size) { 1428 struct i915_vma *vma; 1429 1430 vma = alloc_context_vma(engine); 1431 if (IS_ERR(vma)) { 1432 ret = PTR_ERR(vma); 1433 goto err; 1434 } 1435 1436 ce->state = vma; 1437 } 1438 1439 if (ce->state) { 1440 ret = context_pin(ctx); 1441 if (ret) 1442 goto err; 1443 1444 ce->state->obj->mm.dirty = true; 1445 ce->state->obj->pin_global++; 1446 } 1447 1448 /* The kernel context is only used as a placeholder for flushing the 1449 * active context. It is never used for submitting user rendering and 1450 * as such never requires the golden render context, and so we can skip 1451 * emitting it when we switch to the kernel context. This is required 1452 * as during eviction we cannot allocate and pin the renderstate in 1453 * order to initialise the context. 1454 */ 1455 if (i915_gem_context_is_kernel(ctx)) 1456 ce->initialised = true; 1457 1458 i915_gem_context_get(ctx); 1459 1460 out: 1461 /* One ringbuffer to rule them all */ 1462 return engine->buffer; 1463 1464 err: 1465 ce->pin_count = 0; 1466 return ERR_PTR(ret); 1467 } 1468 1469 static void intel_ring_context_unpin(struct intel_engine_cs *engine, 1470 struct i915_gem_context *ctx) 1471 { 1472 struct intel_context *ce = &ctx->engine[engine->id]; 1473 1474 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1475 GEM_BUG_ON(ce->pin_count == 0); 1476 1477 if (--ce->pin_count) 1478 return; 1479 1480 if (ce->state) { 1481 ce->state->obj->pin_global--; 1482 i915_vma_unpin(ce->state); 1483 } 1484 1485 i915_gem_context_put(ctx); 1486 } 1487 1488 static int intel_init_ring_buffer(struct intel_engine_cs *engine) 1489 { 1490 struct intel_ring *ring; 1491 int err; 1492 1493 intel_engine_setup_common(engine); 1494 1495 err = intel_engine_init_common(engine); 1496 if (err) 1497 goto err; 1498 1499 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); 1500 if (IS_ERR(ring)) { 1501 err = PTR_ERR(ring); 1502 goto err; 1503 } 1504 1505 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 1506 err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE); 1507 if (err) 1508 goto err_ring; 1509 1510 GEM_BUG_ON(engine->buffer); 1511 engine->buffer = ring; 1512 1513 return 0; 1514 1515 err_ring: 1516 intel_ring_free(ring); 1517 err: 1518 intel_engine_cleanup_common(engine); 1519 return err; 1520 } 1521 1522 void intel_engine_cleanup(struct intel_engine_cs *engine) 1523 { 1524 struct drm_i915_private *dev_priv = engine->i915; 1525 1526 WARN_ON(INTEL_GEN(dev_priv) > 2 && 1527 (I915_READ_MODE(engine) & MODE_IDLE) == 0); 1528 1529 intel_ring_unpin(engine->buffer); 1530 intel_ring_free(engine->buffer); 1531 1532 if (engine->cleanup) 1533 engine->cleanup(engine); 1534 1535 intel_engine_cleanup_common(engine); 1536 1537 dev_priv->engine[engine->id] = NULL; 1538 kfree(engine); 1539 } 1540 1541 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) 1542 { 1543 struct intel_engine_cs *engine; 1544 enum intel_engine_id id; 1545 1546 /* Restart from the beginning of the rings for convenience */ 1547 for_each_engine(engine, dev_priv, id) 1548 intel_ring_reset(engine->buffer, 0); 1549 } 1550 1551 static int ring_request_alloc(struct drm_i915_gem_request *request) 1552 { 1553 u32 *cs; 1554 1555 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); 1556 1557 /* Flush enough space to reduce the likelihood of waiting after 1558 * we start building the request - in which case we will just 1559 * have to repeat work. 1560 */ 1561 request->reserved_space += LEGACY_REQUEST_SIZE; 1562 1563 cs = intel_ring_begin(request, 0); 1564 if (IS_ERR(cs)) 1565 return PTR_ERR(cs); 1566 1567 request->reserved_space -= LEGACY_REQUEST_SIZE; 1568 return 0; 1569 } 1570 1571 static noinline int wait_for_space(struct drm_i915_gem_request *req, 1572 unsigned int bytes) 1573 { 1574 struct intel_ring *ring = req->ring; 1575 struct drm_i915_gem_request *target; 1576 long timeout; 1577 1578 lockdep_assert_held(&req->i915->drm.struct_mutex); 1579 1580 if (intel_ring_update_space(ring) >= bytes) 1581 return 0; 1582 1583 /* 1584 * Space is reserved in the ringbuffer for finalising the request, 1585 * as that cannot be allowed to fail. During request finalisation, 1586 * reserved_space is set to 0 to stop the overallocation and the 1587 * assumption is that then we never need to wait (which has the 1588 * risk of failing with EINTR). 1589 * 1590 * See also i915_gem_request_alloc() and i915_add_request(). 1591 */ 1592 GEM_BUG_ON(!req->reserved_space); 1593 1594 list_for_each_entry(target, &ring->request_list, ring_link) { 1595 /* Would completion of this request free enough space? */ 1596 if (bytes <= __intel_ring_space(target->postfix, 1597 ring->emit, ring->size)) 1598 break; 1599 } 1600 1601 if (WARN_ON(&target->ring_link == &ring->request_list)) 1602 return -ENOSPC; 1603 1604 timeout = i915_wait_request(target, 1605 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 1606 MAX_SCHEDULE_TIMEOUT); 1607 if (timeout < 0) 1608 return timeout; 1609 1610 i915_gem_request_retire_upto(target); 1611 1612 intel_ring_update_space(ring); 1613 GEM_BUG_ON(ring->space < bytes); 1614 return 0; 1615 } 1616 1617 u32 *intel_ring_begin(struct drm_i915_gem_request *req, 1618 unsigned int num_dwords) 1619 { 1620 struct intel_ring *ring = req->ring; 1621 const unsigned int remain_usable = ring->effective_size - ring->emit; 1622 const unsigned int bytes = num_dwords * sizeof(u32); 1623 unsigned int need_wrap = 0; 1624 unsigned int total_bytes; 1625 u32 *cs; 1626 1627 /* Packets must be qword aligned. */ 1628 GEM_BUG_ON(num_dwords & 1); 1629 1630 total_bytes = bytes + req->reserved_space; 1631 GEM_BUG_ON(total_bytes > ring->effective_size); 1632 1633 if (unlikely(total_bytes > remain_usable)) { 1634 const int remain_actual = ring->size - ring->emit; 1635 1636 if (bytes > remain_usable) { 1637 /* 1638 * Not enough space for the basic request. So need to 1639 * flush out the remainder and then wait for 1640 * base + reserved. 1641 */ 1642 total_bytes += remain_actual; 1643 need_wrap = remain_actual | 1; 1644 } else { 1645 /* 1646 * The base request will fit but the reserved space 1647 * falls off the end. So we don't need an immediate 1648 * wrap and only need to effectively wait for the 1649 * reserved size from the start of ringbuffer. 1650 */ 1651 total_bytes = req->reserved_space + remain_actual; 1652 } 1653 } 1654 1655 if (unlikely(total_bytes > ring->space)) { 1656 int ret = wait_for_space(req, total_bytes); 1657 if (unlikely(ret)) 1658 return ERR_PTR(ret); 1659 } 1660 1661 if (unlikely(need_wrap)) { 1662 need_wrap &= ~1; 1663 GEM_BUG_ON(need_wrap > ring->space); 1664 GEM_BUG_ON(ring->emit + need_wrap > ring->size); 1665 1666 /* Fill the tail with MI_NOOP */ 1667 memset(ring->vaddr + ring->emit, 0, need_wrap); 1668 ring->emit = 0; 1669 ring->space -= need_wrap; 1670 } 1671 1672 GEM_BUG_ON(ring->emit > ring->size - bytes); 1673 GEM_BUG_ON(ring->space < bytes); 1674 cs = ring->vaddr + ring->emit; 1675 GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes)); 1676 ring->emit += bytes; 1677 ring->space -= bytes; 1678 1679 return cs; 1680 } 1681 1682 /* Align the ring tail to a cacheline boundary */ 1683 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1684 { 1685 int num_dwords = 1686 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1687 u32 *cs; 1688 1689 if (num_dwords == 0) 1690 return 0; 1691 1692 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1693 cs = intel_ring_begin(req, num_dwords); 1694 if (IS_ERR(cs)) 1695 return PTR_ERR(cs); 1696 1697 while (num_dwords--) 1698 *cs++ = MI_NOOP; 1699 1700 intel_ring_advance(req, cs); 1701 1702 return 0; 1703 } 1704 1705 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) 1706 { 1707 struct drm_i915_private *dev_priv = request->i915; 1708 1709 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1710 1711 /* Every tail move must follow the sequence below */ 1712 1713 /* Disable notification that the ring is IDLE. The GT 1714 * will then assume that it is busy and bring it out of rc6. 1715 */ 1716 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 1717 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1718 1719 /* Clear the context id. Here be magic! */ 1720 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 1721 1722 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1723 if (__intel_wait_for_register_fw(dev_priv, 1724 GEN6_BSD_SLEEP_PSMI_CONTROL, 1725 GEN6_BSD_SLEEP_INDICATOR, 1726 0, 1727 1000, 0, NULL)) 1728 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1729 1730 /* Now that the ring is fully powered up, update the tail */ 1731 i9xx_submit_request(request); 1732 1733 /* Let the ring send IDLE messages to the GT again, 1734 * and so let it sleep to conserve power when idle. 1735 */ 1736 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 1737 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1738 1739 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1740 } 1741 1742 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1743 { 1744 u32 cmd, *cs; 1745 1746 cs = intel_ring_begin(req, 4); 1747 if (IS_ERR(cs)) 1748 return PTR_ERR(cs); 1749 1750 cmd = MI_FLUSH_DW; 1751 if (INTEL_GEN(req->i915) >= 8) 1752 cmd += 1; 1753 1754 /* We always require a command barrier so that subsequent 1755 * commands, such as breadcrumb interrupts, are strictly ordered 1756 * wrt the contents of the write cache being flushed to memory 1757 * (and thus being coherent from the CPU). 1758 */ 1759 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1760 1761 /* 1762 * Bspec vol 1c.5 - video engine command streamer: 1763 * "If ENABLED, all TLBs will be invalidated once the flush 1764 * operation is complete. This bit is only valid when the 1765 * Post-Sync Operation field is a value of 1h or 3h." 1766 */ 1767 if (mode & EMIT_INVALIDATE) 1768 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 1769 1770 *cs++ = cmd; 1771 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1772 if (INTEL_GEN(req->i915) >= 8) { 1773 *cs++ = 0; /* upper addr */ 1774 *cs++ = 0; /* value */ 1775 } else { 1776 *cs++ = 0; 1777 *cs++ = MI_NOOP; 1778 } 1779 intel_ring_advance(req, cs); 1780 return 0; 1781 } 1782 1783 static int 1784 gen8_emit_bb_start(struct drm_i915_gem_request *req, 1785 u64 offset, u32 len, 1786 unsigned int dispatch_flags) 1787 { 1788 bool ppgtt = USES_PPGTT(req->i915) && 1789 !(dispatch_flags & I915_DISPATCH_SECURE); 1790 u32 *cs; 1791 1792 cs = intel_ring_begin(req, 4); 1793 if (IS_ERR(cs)) 1794 return PTR_ERR(cs); 1795 1796 /* FIXME(BDW): Address space and security selectors. */ 1797 *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags & 1798 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); 1799 *cs++ = lower_32_bits(offset); 1800 *cs++ = upper_32_bits(offset); 1801 *cs++ = MI_NOOP; 1802 intel_ring_advance(req, cs); 1803 1804 return 0; 1805 } 1806 1807 static int 1808 hsw_emit_bb_start(struct drm_i915_gem_request *req, 1809 u64 offset, u32 len, 1810 unsigned int dispatch_flags) 1811 { 1812 u32 *cs; 1813 1814 cs = intel_ring_begin(req, 2); 1815 if (IS_ERR(cs)) 1816 return PTR_ERR(cs); 1817 1818 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1819 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 1820 (dispatch_flags & I915_DISPATCH_RS ? 1821 MI_BATCH_RESOURCE_STREAMER : 0); 1822 /* bit0-7 is the length on GEN6+ */ 1823 *cs++ = offset; 1824 intel_ring_advance(req, cs); 1825 1826 return 0; 1827 } 1828 1829 static int 1830 gen6_emit_bb_start(struct drm_i915_gem_request *req, 1831 u64 offset, u32 len, 1832 unsigned int dispatch_flags) 1833 { 1834 u32 *cs; 1835 1836 cs = intel_ring_begin(req, 2); 1837 if (IS_ERR(cs)) 1838 return PTR_ERR(cs); 1839 1840 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1841 0 : MI_BATCH_NON_SECURE_I965); 1842 /* bit0-7 is the length on GEN6+ */ 1843 *cs++ = offset; 1844 intel_ring_advance(req, cs); 1845 1846 return 0; 1847 } 1848 1849 /* Blitter support (SandyBridge+) */ 1850 1851 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1852 { 1853 u32 cmd, *cs; 1854 1855 cs = intel_ring_begin(req, 4); 1856 if (IS_ERR(cs)) 1857 return PTR_ERR(cs); 1858 1859 cmd = MI_FLUSH_DW; 1860 if (INTEL_GEN(req->i915) >= 8) 1861 cmd += 1; 1862 1863 /* We always require a command barrier so that subsequent 1864 * commands, such as breadcrumb interrupts, are strictly ordered 1865 * wrt the contents of the write cache being flushed to memory 1866 * (and thus being coherent from the CPU). 1867 */ 1868 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1869 1870 /* 1871 * Bspec vol 1c.3 - blitter engine command streamer: 1872 * "If ENABLED, all TLBs will be invalidated once the flush 1873 * operation is complete. This bit is only valid when the 1874 * Post-Sync Operation field is a value of 1h or 3h." 1875 */ 1876 if (mode & EMIT_INVALIDATE) 1877 cmd |= MI_INVALIDATE_TLB; 1878 *cs++ = cmd; 1879 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1880 if (INTEL_GEN(req->i915) >= 8) { 1881 *cs++ = 0; /* upper addr */ 1882 *cs++ = 0; /* value */ 1883 } else { 1884 *cs++ = 0; 1885 *cs++ = MI_NOOP; 1886 } 1887 intel_ring_advance(req, cs); 1888 1889 return 0; 1890 } 1891 1892 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, 1893 struct intel_engine_cs *engine) 1894 { 1895 struct drm_i915_gem_object *obj; 1896 int ret, i; 1897 1898 if (!i915_modparams.semaphores) 1899 return; 1900 1901 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { 1902 struct i915_vma *vma; 1903 1904 obj = i915_gem_object_create(dev_priv, PAGE_SIZE); 1905 if (IS_ERR(obj)) 1906 goto err; 1907 1908 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); 1909 if (IS_ERR(vma)) 1910 goto err_obj; 1911 1912 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1913 if (ret) 1914 goto err_obj; 1915 1916 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 1917 if (ret) 1918 goto err_obj; 1919 1920 dev_priv->semaphore = vma; 1921 } 1922 1923 if (INTEL_GEN(dev_priv) >= 8) { 1924 u32 offset = i915_ggtt_offset(dev_priv->semaphore); 1925 1926 engine->semaphore.sync_to = gen8_ring_sync_to; 1927 engine->semaphore.signal = gen8_xcs_signal; 1928 1929 for (i = 0; i < I915_NUM_ENGINES; i++) { 1930 u32 ring_offset; 1931 1932 if (i != engine->id) 1933 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); 1934 else 1935 ring_offset = MI_SEMAPHORE_SYNC_INVALID; 1936 1937 engine->semaphore.signal_ggtt[i] = ring_offset; 1938 } 1939 } else if (INTEL_GEN(dev_priv) >= 6) { 1940 engine->semaphore.sync_to = gen6_ring_sync_to; 1941 engine->semaphore.signal = gen6_signal; 1942 1943 /* 1944 * The current semaphore is only applied on pre-gen8 1945 * platform. And there is no VCS2 ring on the pre-gen8 1946 * platform. So the semaphore between RCS and VCS2 is 1947 * initialized as INVALID. Gen8 will initialize the 1948 * sema between VCS2 and RCS later. 1949 */ 1950 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { 1951 static const struct { 1952 u32 wait_mbox; 1953 i915_reg_t mbox_reg; 1954 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { 1955 [RCS_HW] = { 1956 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, 1957 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, 1958 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, 1959 }, 1960 [VCS_HW] = { 1961 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, 1962 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, 1963 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, 1964 }, 1965 [BCS_HW] = { 1966 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, 1967 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, 1968 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, 1969 }, 1970 [VECS_HW] = { 1971 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, 1972 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, 1973 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, 1974 }, 1975 }; 1976 u32 wait_mbox; 1977 i915_reg_t mbox_reg; 1978 1979 if (i == engine->hw_id) { 1980 wait_mbox = MI_SEMAPHORE_SYNC_INVALID; 1981 mbox_reg = GEN6_NOSYNC; 1982 } else { 1983 wait_mbox = sem_data[engine->hw_id][i].wait_mbox; 1984 mbox_reg = sem_data[engine->hw_id][i].mbox_reg; 1985 } 1986 1987 engine->semaphore.mbox.wait[i] = wait_mbox; 1988 engine->semaphore.mbox.signal[i] = mbox_reg; 1989 } 1990 } 1991 1992 return; 1993 1994 err_obj: 1995 i915_gem_object_put(obj); 1996 err: 1997 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); 1998 i915_modparams.semaphores = 0; 1999 } 2000 2001 static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2002 struct intel_engine_cs *engine) 2003 { 2004 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift; 2005 2006 if (INTEL_GEN(dev_priv) >= 8) { 2007 engine->irq_enable = gen8_irq_enable; 2008 engine->irq_disable = gen8_irq_disable; 2009 engine->irq_seqno_barrier = gen6_seqno_barrier; 2010 } else if (INTEL_GEN(dev_priv) >= 6) { 2011 engine->irq_enable = gen6_irq_enable; 2012 engine->irq_disable = gen6_irq_disable; 2013 engine->irq_seqno_barrier = gen6_seqno_barrier; 2014 } else if (INTEL_GEN(dev_priv) >= 5) { 2015 engine->irq_enable = gen5_irq_enable; 2016 engine->irq_disable = gen5_irq_disable; 2017 engine->irq_seqno_barrier = gen5_seqno_barrier; 2018 } else if (INTEL_GEN(dev_priv) >= 3) { 2019 engine->irq_enable = i9xx_irq_enable; 2020 engine->irq_disable = i9xx_irq_disable; 2021 } else { 2022 engine->irq_enable = i8xx_irq_enable; 2023 engine->irq_disable = i8xx_irq_disable; 2024 } 2025 } 2026 2027 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 2028 { 2029 engine->submit_request = i9xx_submit_request; 2030 engine->cancel_requests = cancel_requests; 2031 } 2032 2033 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 2034 { 2035 engine->submit_request = gen6_bsd_submit_request; 2036 engine->cancel_requests = cancel_requests; 2037 } 2038 2039 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2040 struct intel_engine_cs *engine) 2041 { 2042 intel_ring_init_irq(dev_priv, engine); 2043 intel_ring_init_semaphores(dev_priv, engine); 2044 2045 engine->init_hw = init_ring_common; 2046 engine->reset_hw = reset_ring_common; 2047 2048 engine->context_pin = intel_ring_context_pin; 2049 engine->context_unpin = intel_ring_context_unpin; 2050 2051 engine->request_alloc = ring_request_alloc; 2052 2053 engine->emit_breadcrumb = i9xx_emit_breadcrumb; 2054 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; 2055 if (i915_modparams.semaphores) { 2056 int num_rings; 2057 2058 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; 2059 2060 num_rings = INTEL_INFO(dev_priv)->num_rings - 1; 2061 if (INTEL_GEN(dev_priv) >= 8) { 2062 engine->emit_breadcrumb_sz += num_rings * 6; 2063 } else { 2064 engine->emit_breadcrumb_sz += num_rings * 3; 2065 if (num_rings & 1) 2066 engine->emit_breadcrumb_sz++; 2067 } 2068 } 2069 2070 engine->set_default_submission = i9xx_set_default_submission; 2071 2072 if (INTEL_GEN(dev_priv) >= 8) 2073 engine->emit_bb_start = gen8_emit_bb_start; 2074 else if (INTEL_GEN(dev_priv) >= 6) 2075 engine->emit_bb_start = gen6_emit_bb_start; 2076 else if (INTEL_GEN(dev_priv) >= 4) 2077 engine->emit_bb_start = i965_emit_bb_start; 2078 else if (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2079 engine->emit_bb_start = i830_emit_bb_start; 2080 else 2081 engine->emit_bb_start = i915_emit_bb_start; 2082 } 2083 2084 int intel_init_render_ring_buffer(struct intel_engine_cs *engine) 2085 { 2086 struct drm_i915_private *dev_priv = engine->i915; 2087 int ret; 2088 2089 intel_ring_default_vfuncs(dev_priv, engine); 2090 2091 if (HAS_L3_DPF(dev_priv)) 2092 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2093 2094 if (INTEL_GEN(dev_priv) >= 8) { 2095 engine->init_context = intel_rcs_ctx_init; 2096 engine->emit_breadcrumb = gen8_render_emit_breadcrumb; 2097 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; 2098 engine->emit_flush = gen8_render_ring_flush; 2099 if (i915_modparams.semaphores) { 2100 int num_rings; 2101 2102 engine->semaphore.signal = gen8_rcs_signal; 2103 2104 num_rings = INTEL_INFO(dev_priv)->num_rings - 1; 2105 engine->emit_breadcrumb_sz += num_rings * 8; 2106 } 2107 } else if (INTEL_GEN(dev_priv) >= 6) { 2108 engine->init_context = intel_rcs_ctx_init; 2109 engine->emit_flush = gen7_render_ring_flush; 2110 if (IS_GEN6(dev_priv)) 2111 engine->emit_flush = gen6_render_ring_flush; 2112 } else if (IS_GEN5(dev_priv)) { 2113 engine->emit_flush = gen4_render_ring_flush; 2114 } else { 2115 if (INTEL_GEN(dev_priv) < 4) 2116 engine->emit_flush = gen2_render_ring_flush; 2117 else 2118 engine->emit_flush = gen4_render_ring_flush; 2119 engine->irq_enable_mask = I915_USER_INTERRUPT; 2120 } 2121 2122 if (IS_HASWELL(dev_priv)) 2123 engine->emit_bb_start = hsw_emit_bb_start; 2124 2125 engine->init_hw = init_render_ring; 2126 engine->cleanup = render_ring_cleanup; 2127 2128 ret = intel_init_ring_buffer(engine); 2129 if (ret) 2130 return ret; 2131 2132 if (INTEL_GEN(dev_priv) >= 6) { 2133 ret = intel_engine_create_scratch(engine, PAGE_SIZE); 2134 if (ret) 2135 return ret; 2136 } else if (HAS_BROKEN_CS_TLB(dev_priv)) { 2137 ret = intel_engine_create_scratch(engine, I830_WA_SIZE); 2138 if (ret) 2139 return ret; 2140 } 2141 2142 return 0; 2143 } 2144 2145 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) 2146 { 2147 struct drm_i915_private *dev_priv = engine->i915; 2148 2149 intel_ring_default_vfuncs(dev_priv, engine); 2150 2151 if (INTEL_GEN(dev_priv) >= 6) { 2152 /* gen6 bsd needs a special wa for tail updates */ 2153 if (IS_GEN6(dev_priv)) 2154 engine->set_default_submission = gen6_bsd_set_default_submission; 2155 engine->emit_flush = gen6_bsd_ring_flush; 2156 if (INTEL_GEN(dev_priv) < 8) 2157 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2158 } else { 2159 engine->mmio_base = BSD_RING_BASE; 2160 engine->emit_flush = bsd_ring_flush; 2161 if (IS_GEN5(dev_priv)) 2162 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2163 else 2164 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2165 } 2166 2167 return intel_init_ring_buffer(engine); 2168 } 2169 2170 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) 2171 { 2172 struct drm_i915_private *dev_priv = engine->i915; 2173 2174 intel_ring_default_vfuncs(dev_priv, engine); 2175 2176 engine->emit_flush = gen6_ring_flush; 2177 if (INTEL_GEN(dev_priv) < 8) 2178 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2179 2180 return intel_init_ring_buffer(engine); 2181 } 2182 2183 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) 2184 { 2185 struct drm_i915_private *dev_priv = engine->i915; 2186 2187 intel_ring_default_vfuncs(dev_priv, engine); 2188 2189 engine->emit_flush = gen6_ring_flush; 2190 2191 if (INTEL_GEN(dev_priv) < 8) { 2192 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2193 engine->irq_enable = hsw_vebox_irq_enable; 2194 engine->irq_disable = hsw_vebox_irq_disable; 2195 } 2196 2197 return intel_init_ring_buffer(engine); 2198 } 2199