1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include "i915_drv.h" 32 #include <drm/i915_drm.h> 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 bool 37 intel_ring_initialized(struct intel_engine_cs *ring) 38 { 39 struct drm_device *dev = ring->dev; 40 41 if (!dev) 42 return false; 43 44 if (i915.enable_execlists) { 45 struct intel_context *dctx = ring->default_context; 46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; 47 48 return ringbuf->obj; 49 } else 50 return ring->buffer && ring->buffer->obj; 51 } 52 53 int __intel_ring_space(int head, int tail, int size) 54 { 55 int space = head - tail; 56 if (space <= 0) 57 space += size; 58 return space - I915_RING_FREE_SPACE; 59 } 60 61 void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 62 { 63 if (ringbuf->last_retired_head != -1) { 64 ringbuf->head = ringbuf->last_retired_head; 65 ringbuf->last_retired_head = -1; 66 } 67 68 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, 69 ringbuf->tail, ringbuf->size); 70 } 71 72 int intel_ring_space(struct intel_ringbuffer *ringbuf) 73 { 74 intel_ring_update_space(ringbuf); 75 return ringbuf->space; 76 } 77 78 bool intel_ring_stopped(struct intel_engine_cs *ring) 79 { 80 struct drm_i915_private *dev_priv = ring->dev->dev_private; 81 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 82 } 83 84 static void __intel_ring_advance(struct intel_engine_cs *ring) 85 { 86 struct intel_ringbuffer *ringbuf = ring->buffer; 87 ringbuf->tail &= ringbuf->size - 1; 88 if (intel_ring_stopped(ring)) 89 return; 90 ring->write_tail(ring, ringbuf->tail); 91 } 92 93 static int 94 gen2_render_ring_flush(struct drm_i915_gem_request *req, 95 u32 invalidate_domains, 96 u32 flush_domains) 97 { 98 struct intel_engine_cs *ring = req->ring; 99 u32 cmd; 100 int ret; 101 102 cmd = MI_FLUSH; 103 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 104 cmd |= MI_NO_WRITE_FLUSH; 105 106 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 107 cmd |= MI_READ_FLUSH; 108 109 ret = intel_ring_begin(req, 2); 110 if (ret) 111 return ret; 112 113 intel_ring_emit(ring, cmd); 114 intel_ring_emit(ring, MI_NOOP); 115 intel_ring_advance(ring); 116 117 return 0; 118 } 119 120 static int 121 gen4_render_ring_flush(struct drm_i915_gem_request *req, 122 u32 invalidate_domains, 123 u32 flush_domains) 124 { 125 struct intel_engine_cs *ring = req->ring; 126 struct drm_device *dev = ring->dev; 127 u32 cmd; 128 int ret; 129 130 /* 131 * read/write caches: 132 * 133 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 134 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 135 * also flushed at 2d versus 3d pipeline switches. 136 * 137 * read-only caches: 138 * 139 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 140 * MI_READ_FLUSH is set, and is always flushed on 965. 141 * 142 * I915_GEM_DOMAIN_COMMAND may not exist? 143 * 144 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 145 * invalidated when MI_EXE_FLUSH is set. 146 * 147 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 148 * invalidated with every MI_FLUSH. 149 * 150 * TLBs: 151 * 152 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 153 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 154 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 155 * are flushed at any MI_FLUSH. 156 */ 157 158 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 159 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 160 cmd &= ~MI_NO_WRITE_FLUSH; 161 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 162 cmd |= MI_EXE_FLUSH; 163 164 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 165 (IS_G4X(dev) || IS_GEN5(dev))) 166 cmd |= MI_INVALIDATE_ISP; 167 168 ret = intel_ring_begin(req, 2); 169 if (ret) 170 return ret; 171 172 intel_ring_emit(ring, cmd); 173 intel_ring_emit(ring, MI_NOOP); 174 intel_ring_advance(ring); 175 176 return 0; 177 } 178 179 /** 180 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 181 * implementing two workarounds on gen6. From section 1.4.7.1 182 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 183 * 184 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 185 * produced by non-pipelined state commands), software needs to first 186 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 187 * 0. 188 * 189 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 190 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 191 * 192 * And the workaround for these two requires this workaround first: 193 * 194 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 195 * BEFORE the pipe-control with a post-sync op and no write-cache 196 * flushes. 197 * 198 * And this last workaround is tricky because of the requirements on 199 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 200 * volume 2 part 1: 201 * 202 * "1 of the following must also be set: 203 * - Render Target Cache Flush Enable ([12] of DW1) 204 * - Depth Cache Flush Enable ([0] of DW1) 205 * - Stall at Pixel Scoreboard ([1] of DW1) 206 * - Depth Stall ([13] of DW1) 207 * - Post-Sync Operation ([13] of DW1) 208 * - Notify Enable ([8] of DW1)" 209 * 210 * The cache flushes require the workaround flush that triggered this 211 * one, so we can't use it. Depth stall would trigger the same. 212 * Post-sync nonzero is what triggered this second workaround, so we 213 * can't use that one either. Notify enable is IRQs, which aren't 214 * really our business. That leaves only stall at scoreboard. 215 */ 216 static int 217 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 218 { 219 struct intel_engine_cs *ring = req->ring; 220 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 221 int ret; 222 223 ret = intel_ring_begin(req, 6); 224 if (ret) 225 return ret; 226 227 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 228 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 229 PIPE_CONTROL_STALL_AT_SCOREBOARD); 230 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 231 intel_ring_emit(ring, 0); /* low dword */ 232 intel_ring_emit(ring, 0); /* high dword */ 233 intel_ring_emit(ring, MI_NOOP); 234 intel_ring_advance(ring); 235 236 ret = intel_ring_begin(req, 6); 237 if (ret) 238 return ret; 239 240 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 241 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 242 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 243 intel_ring_emit(ring, 0); 244 intel_ring_emit(ring, 0); 245 intel_ring_emit(ring, MI_NOOP); 246 intel_ring_advance(ring); 247 248 return 0; 249 } 250 251 static int 252 gen6_render_ring_flush(struct drm_i915_gem_request *req, 253 u32 invalidate_domains, u32 flush_domains) 254 { 255 struct intel_engine_cs *ring = req->ring; 256 u32 flags = 0; 257 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 258 int ret; 259 260 /* Force SNB workarounds for PIPE_CONTROL flushes */ 261 ret = intel_emit_post_sync_nonzero_flush(req); 262 if (ret) 263 return ret; 264 265 /* Just flush everything. Experiments have shown that reducing the 266 * number of bits based on the write domains has little performance 267 * impact. 268 */ 269 if (flush_domains) { 270 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 271 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 272 /* 273 * Ensure that any following seqno writes only happen 274 * when the render cache is indeed flushed. 275 */ 276 flags |= PIPE_CONTROL_CS_STALL; 277 } 278 if (invalidate_domains) { 279 flags |= PIPE_CONTROL_TLB_INVALIDATE; 280 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 281 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 282 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 283 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 284 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 285 /* 286 * TLB invalidate requires a post-sync write. 287 */ 288 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 289 } 290 291 ret = intel_ring_begin(req, 4); 292 if (ret) 293 return ret; 294 295 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 296 intel_ring_emit(ring, flags); 297 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 298 intel_ring_emit(ring, 0); 299 intel_ring_advance(ring); 300 301 return 0; 302 } 303 304 static int 305 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 306 { 307 struct intel_engine_cs *ring = req->ring; 308 int ret; 309 310 ret = intel_ring_begin(req, 4); 311 if (ret) 312 return ret; 313 314 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 315 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 316 PIPE_CONTROL_STALL_AT_SCOREBOARD); 317 intel_ring_emit(ring, 0); 318 intel_ring_emit(ring, 0); 319 intel_ring_advance(ring); 320 321 return 0; 322 } 323 324 static int 325 gen7_render_ring_flush(struct drm_i915_gem_request *req, 326 u32 invalidate_domains, u32 flush_domains) 327 { 328 struct intel_engine_cs *ring = req->ring; 329 u32 flags = 0; 330 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 331 int ret; 332 333 /* 334 * Ensure that any following seqno writes only happen when the render 335 * cache is indeed flushed. 336 * 337 * Workaround: 4th PIPE_CONTROL command (except the ones with only 338 * read-cache invalidate bits set) must have the CS_STALL bit set. We 339 * don't try to be clever and just set it unconditionally. 340 */ 341 flags |= PIPE_CONTROL_CS_STALL; 342 343 /* Just flush everything. Experiments have shown that reducing the 344 * number of bits based on the write domains has little performance 345 * impact. 346 */ 347 if (flush_domains) { 348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 350 flags |= PIPE_CONTROL_FLUSH_ENABLE; 351 } 352 if (invalidate_domains) { 353 flags |= PIPE_CONTROL_TLB_INVALIDATE; 354 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 355 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 356 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 357 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 358 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 359 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 360 /* 361 * TLB invalidate requires a post-sync write. 362 */ 363 flags |= PIPE_CONTROL_QW_WRITE; 364 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 365 366 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 367 368 /* Workaround: we must issue a pipe_control with CS-stall bit 369 * set before a pipe_control command that has the state cache 370 * invalidate bit set. */ 371 gen7_render_ring_cs_stall_wa(req); 372 } 373 374 ret = intel_ring_begin(req, 4); 375 if (ret) 376 return ret; 377 378 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 379 intel_ring_emit(ring, flags); 380 intel_ring_emit(ring, scratch_addr); 381 intel_ring_emit(ring, 0); 382 intel_ring_advance(ring); 383 384 return 0; 385 } 386 387 static int 388 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 389 u32 flags, u32 scratch_addr) 390 { 391 struct intel_engine_cs *ring = req->ring; 392 int ret; 393 394 ret = intel_ring_begin(req, 6); 395 if (ret) 396 return ret; 397 398 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 399 intel_ring_emit(ring, flags); 400 intel_ring_emit(ring, scratch_addr); 401 intel_ring_emit(ring, 0); 402 intel_ring_emit(ring, 0); 403 intel_ring_emit(ring, 0); 404 intel_ring_advance(ring); 405 406 return 0; 407 } 408 409 static int 410 gen8_render_ring_flush(struct drm_i915_gem_request *req, 411 u32 invalidate_domains, u32 flush_domains) 412 { 413 u32 flags = 0; 414 u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 415 int ret; 416 417 flags |= PIPE_CONTROL_CS_STALL; 418 419 if (flush_domains) { 420 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 421 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 422 flags |= PIPE_CONTROL_FLUSH_ENABLE; 423 } 424 if (invalidate_domains) { 425 flags |= PIPE_CONTROL_TLB_INVALIDATE; 426 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 427 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 428 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 429 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 430 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 431 flags |= PIPE_CONTROL_QW_WRITE; 432 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 433 434 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 435 ret = gen8_emit_pipe_control(req, 436 PIPE_CONTROL_CS_STALL | 437 PIPE_CONTROL_STALL_AT_SCOREBOARD, 438 0); 439 if (ret) 440 return ret; 441 } 442 443 return gen8_emit_pipe_control(req, flags, scratch_addr); 444 } 445 446 static void ring_write_tail(struct intel_engine_cs *ring, 447 u32 value) 448 { 449 struct drm_i915_private *dev_priv = ring->dev->dev_private; 450 I915_WRITE_TAIL(ring, value); 451 } 452 453 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 454 { 455 struct drm_i915_private *dev_priv = ring->dev->dev_private; 456 u64 acthd; 457 458 if (INTEL_INFO(ring->dev)->gen >= 8) 459 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 460 RING_ACTHD_UDW(ring->mmio_base)); 461 else if (INTEL_INFO(ring->dev)->gen >= 4) 462 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 463 else 464 acthd = I915_READ(ACTHD); 465 466 return acthd; 467 } 468 469 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 470 { 471 struct drm_i915_private *dev_priv = ring->dev->dev_private; 472 u32 addr; 473 474 addr = dev_priv->status_page_dmah->busaddr; 475 if (INTEL_INFO(ring->dev)->gen >= 4) 476 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 477 I915_WRITE(HWS_PGA, addr); 478 } 479 480 static void intel_ring_setup_status_page(struct intel_engine_cs *ring) 481 { 482 struct drm_device *dev = ring->dev; 483 struct drm_i915_private *dev_priv = ring->dev->dev_private; 484 u32 mmio = 0; 485 486 /* The ring status page addresses are no longer next to the rest of 487 * the ring registers as of gen7. 488 */ 489 if (IS_GEN7(dev)) { 490 switch (ring->id) { 491 case RCS: 492 mmio = RENDER_HWS_PGA_GEN7; 493 break; 494 case BCS: 495 mmio = BLT_HWS_PGA_GEN7; 496 break; 497 /* 498 * VCS2 actually doesn't exist on Gen7. Only shut up 499 * gcc switch check warning 500 */ 501 case VCS2: 502 case VCS: 503 mmio = BSD_HWS_PGA_GEN7; 504 break; 505 case VECS: 506 mmio = VEBOX_HWS_PGA_GEN7; 507 break; 508 } 509 } else if (IS_GEN6(ring->dev)) { 510 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 511 } else { 512 /* XXX: gen8 returns to sanity */ 513 mmio = RING_HWS_PGA(ring->mmio_base); 514 } 515 516 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 517 POSTING_READ(mmio); 518 519 /* 520 * Flush the TLB for this page 521 * 522 * FIXME: These two bits have disappeared on gen8, so a question 523 * arises: do we still need this and if so how should we go about 524 * invalidating the TLB? 525 */ 526 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 527 u32 reg = RING_INSTPM(ring->mmio_base); 528 529 /* ring should be idle before issuing a sync flush*/ 530 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 531 532 I915_WRITE(reg, 533 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 534 INSTPM_SYNC_FLUSH)); 535 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 536 1000)) 537 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 538 ring->name); 539 } 540 } 541 542 static bool stop_ring(struct intel_engine_cs *ring) 543 { 544 struct drm_i915_private *dev_priv = to_i915(ring->dev); 545 546 if (!IS_GEN2(ring->dev)) { 547 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 548 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 549 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); 550 /* Sometimes we observe that the idle flag is not 551 * set even though the ring is empty. So double 552 * check before giving up. 553 */ 554 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) 555 return false; 556 } 557 } 558 559 I915_WRITE_CTL(ring, 0); 560 I915_WRITE_HEAD(ring, 0); 561 ring->write_tail(ring, 0); 562 563 if (!IS_GEN2(ring->dev)) { 564 (void)I915_READ_CTL(ring); 565 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 566 } 567 568 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 569 } 570 571 static int init_ring_common(struct intel_engine_cs *ring) 572 { 573 struct drm_device *dev = ring->dev; 574 struct drm_i915_private *dev_priv = dev->dev_private; 575 struct intel_ringbuffer *ringbuf = ring->buffer; 576 struct drm_i915_gem_object *obj = ringbuf->obj; 577 int ret = 0; 578 579 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 580 581 if (!stop_ring(ring)) { 582 /* G45 ring initialization often fails to reset head to zero */ 583 DRM_DEBUG_KMS("%s head not reset to zero " 584 "ctl %08x head %08x tail %08x start %08x\n", 585 ring->name, 586 I915_READ_CTL(ring), 587 I915_READ_HEAD(ring), 588 I915_READ_TAIL(ring), 589 I915_READ_START(ring)); 590 591 if (!stop_ring(ring)) { 592 DRM_ERROR("failed to set %s head to zero " 593 "ctl %08x head %08x tail %08x start %08x\n", 594 ring->name, 595 I915_READ_CTL(ring), 596 I915_READ_HEAD(ring), 597 I915_READ_TAIL(ring), 598 I915_READ_START(ring)); 599 ret = -EIO; 600 goto out; 601 } 602 } 603 604 if (I915_NEED_GFX_HWS(dev)) 605 intel_ring_setup_status_page(ring); 606 else 607 ring_setup_phys_status_page(ring); 608 609 /* Enforce ordering by reading HEAD register back */ 610 I915_READ_HEAD(ring); 611 612 /* Initialize the ring. This must happen _after_ we've cleared the ring 613 * registers with the above sequence (the readback of the HEAD registers 614 * also enforces ordering), otherwise the hw might lose the new ring 615 * register values. */ 616 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 617 618 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 619 if (I915_READ_HEAD(ring)) 620 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 621 ring->name, I915_READ_HEAD(ring)); 622 I915_WRITE_HEAD(ring, 0); 623 (void)I915_READ_HEAD(ring); 624 625 I915_WRITE_CTL(ring, 626 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 627 | RING_VALID); 628 629 /* If the head is still not zero, the ring is dead */ 630 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 631 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 632 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 633 DRM_ERROR("%s initialization failed " 634 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 635 ring->name, 636 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 637 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 638 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 639 ret = -EIO; 640 goto out; 641 } 642 643 ringbuf->last_retired_head = -1; 644 ringbuf->head = I915_READ_HEAD(ring); 645 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 646 intel_ring_update_space(ringbuf); 647 648 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 649 650 out: 651 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 652 653 return ret; 654 } 655 656 void 657 intel_fini_pipe_control(struct intel_engine_cs *ring) 658 { 659 struct drm_device *dev = ring->dev; 660 661 if (ring->scratch.obj == NULL) 662 return; 663 664 if (INTEL_INFO(dev)->gen >= 5) { 665 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 666 i915_gem_object_ggtt_unpin(ring->scratch.obj); 667 } 668 669 drm_gem_object_unreference(&ring->scratch.obj->base); 670 ring->scratch.obj = NULL; 671 } 672 673 int 674 intel_init_pipe_control(struct intel_engine_cs *ring) 675 { 676 int ret; 677 678 WARN_ON(ring->scratch.obj); 679 680 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 681 if (ring->scratch.obj == NULL) { 682 DRM_ERROR("Failed to allocate seqno page\n"); 683 ret = -ENOMEM; 684 goto err; 685 } 686 687 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 688 if (ret) 689 goto err_unref; 690 691 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 692 if (ret) 693 goto err_unref; 694 695 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 696 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 697 if (ring->scratch.cpu_page == NULL) { 698 ret = -ENOMEM; 699 goto err_unpin; 700 } 701 702 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 703 ring->name, ring->scratch.gtt_offset); 704 return 0; 705 706 err_unpin: 707 i915_gem_object_ggtt_unpin(ring->scratch.obj); 708 err_unref: 709 drm_gem_object_unreference(&ring->scratch.obj->base); 710 err: 711 return ret; 712 } 713 714 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 715 { 716 int ret, i; 717 struct intel_engine_cs *ring = req->ring; 718 struct drm_device *dev = ring->dev; 719 struct drm_i915_private *dev_priv = dev->dev_private; 720 struct i915_workarounds *w = &dev_priv->workarounds; 721 722 if (WARN_ON_ONCE(w->count == 0)) 723 return 0; 724 725 ring->gpu_caches_dirty = true; 726 ret = intel_ring_flush_all_caches(req); 727 if (ret) 728 return ret; 729 730 ret = intel_ring_begin(req, (w->count * 2 + 2)); 731 if (ret) 732 return ret; 733 734 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 735 for (i = 0; i < w->count; i++) { 736 intel_ring_emit(ring, w->reg[i].addr); 737 intel_ring_emit(ring, w->reg[i].value); 738 } 739 intel_ring_emit(ring, MI_NOOP); 740 741 intel_ring_advance(ring); 742 743 ring->gpu_caches_dirty = true; 744 ret = intel_ring_flush_all_caches(req); 745 if (ret) 746 return ret; 747 748 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 749 750 return 0; 751 } 752 753 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 754 { 755 int ret; 756 757 ret = intel_ring_workarounds_emit(req); 758 if (ret != 0) 759 return ret; 760 761 ret = i915_gem_render_state_init(req); 762 if (ret) 763 DRM_ERROR("init render state: %d\n", ret); 764 765 return ret; 766 } 767 768 static int wa_add(struct drm_i915_private *dev_priv, 769 const u32 addr, const u32 mask, const u32 val) 770 { 771 const u32 idx = dev_priv->workarounds.count; 772 773 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 774 return -ENOSPC; 775 776 dev_priv->workarounds.reg[idx].addr = addr; 777 dev_priv->workarounds.reg[idx].value = val; 778 dev_priv->workarounds.reg[idx].mask = mask; 779 780 dev_priv->workarounds.count++; 781 782 return 0; 783 } 784 785 #define WA_REG(addr, mask, val) do { \ 786 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 787 if (r) \ 788 return r; \ 789 } while (0) 790 791 #define WA_SET_BIT_MASKED(addr, mask) \ 792 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 793 794 #define WA_CLR_BIT_MASKED(addr, mask) \ 795 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 796 797 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 798 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 799 800 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 801 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 802 803 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 804 805 static int bdw_init_workarounds(struct intel_engine_cs *ring) 806 { 807 struct drm_device *dev = ring->dev; 808 struct drm_i915_private *dev_priv = dev->dev_private; 809 810 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 811 812 /* WaDisableAsyncFlipPerfMode:bdw */ 813 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 814 815 /* WaDisablePartialInstShootdown:bdw */ 816 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 817 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 818 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 819 STALL_DOP_GATING_DISABLE); 820 821 /* WaDisableDopClockGating:bdw */ 822 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 823 DOP_CLOCK_GATING_DISABLE); 824 825 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 826 GEN8_SAMPLER_POWER_BYPASS_DIS); 827 828 /* Use Force Non-Coherent whenever executing a 3D context. This is a 829 * workaround for for a possible hang in the unlikely event a TLB 830 * invalidation occurs during a PSD flush. 831 */ 832 WA_SET_BIT_MASKED(HDC_CHICKEN0, 833 /* WaForceEnableNonCoherent:bdw */ 834 HDC_FORCE_NON_COHERENT | 835 /* WaForceContextSaveRestoreNonCoherent:bdw */ 836 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 837 /* WaHdcDisableFetchWhenMasked:bdw */ 838 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 839 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 840 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 841 842 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 843 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 844 * polygons in the same 8x4 pixel/sample area to be processed without 845 * stalling waiting for the earlier ones to write to Hierarchical Z 846 * buffer." 847 * 848 * This optimization is off by default for Broadwell; turn it on. 849 */ 850 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 851 852 /* Wa4x4STCOptimizationDisable:bdw */ 853 WA_SET_BIT_MASKED(CACHE_MODE_1, 854 GEN8_4x4_STC_OPTIMIZATION_DISABLE); 855 856 /* 857 * BSpec recommends 8x4 when MSAA is used, 858 * however in practice 16x4 seems fastest. 859 * 860 * Note that PS/WM thread counts depend on the WIZ hashing 861 * disable bit, which we don't touch here, but it's good 862 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 863 */ 864 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 865 GEN6_WIZ_HASHING_MASK, 866 GEN6_WIZ_HASHING_16x4); 867 868 return 0; 869 } 870 871 static int chv_init_workarounds(struct intel_engine_cs *ring) 872 { 873 struct drm_device *dev = ring->dev; 874 struct drm_i915_private *dev_priv = dev->dev_private; 875 876 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 877 878 /* WaDisableAsyncFlipPerfMode:chv */ 879 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 880 881 /* WaDisablePartialInstShootdown:chv */ 882 /* WaDisableThreadStallDopClockGating:chv */ 883 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 884 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 885 STALL_DOP_GATING_DISABLE); 886 887 /* Use Force Non-Coherent whenever executing a 3D context. This is a 888 * workaround for a possible hang in the unlikely event a TLB 889 * invalidation occurs during a PSD flush. 890 */ 891 /* WaForceEnableNonCoherent:chv */ 892 /* WaHdcDisableFetchWhenMasked:chv */ 893 WA_SET_BIT_MASKED(HDC_CHICKEN0, 894 HDC_FORCE_NON_COHERENT | 895 HDC_DONOT_FETCH_MEM_WHEN_MASKED); 896 897 /* According to the CACHE_MODE_0 default value documentation, some 898 * CHV platforms disable this optimization by default. Turn it on. 899 */ 900 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 901 902 /* Wa4x4STCOptimizationDisable:chv */ 903 WA_SET_BIT_MASKED(CACHE_MODE_1, 904 GEN8_4x4_STC_OPTIMIZATION_DISABLE); 905 906 /* Improve HiZ throughput on CHV. */ 907 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 908 909 /* 910 * BSpec recommends 8x4 when MSAA is used, 911 * however in practice 16x4 seems fastest. 912 * 913 * Note that PS/WM thread counts depend on the WIZ hashing 914 * disable bit, which we don't touch here, but it's good 915 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 916 */ 917 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 918 GEN6_WIZ_HASHING_MASK, 919 GEN6_WIZ_HASHING_16x4); 920 921 return 0; 922 } 923 924 static int gen9_init_workarounds(struct intel_engine_cs *ring) 925 { 926 struct drm_device *dev = ring->dev; 927 struct drm_i915_private *dev_priv = dev->dev_private; 928 uint32_t tmp; 929 930 /* WaDisablePartialInstShootdown:skl,bxt */ 931 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 932 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 933 934 /* Syncing dependencies between camera and graphics:skl,bxt */ 935 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 936 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 937 938 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 || 939 INTEL_REVID(dev) == SKL_REVID_B0)) || 940 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 941 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 942 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 943 GEN9_DG_MIRROR_FIX_ENABLE); 944 } 945 946 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 947 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 948 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 949 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 950 GEN9_RHWO_OPTIMIZATION_DISABLE); 951 /* 952 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 953 * but we do that in per ctx batchbuffer as there is an issue 954 * with this register not getting restored on ctx restore 955 */ 956 } 957 958 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || 959 IS_BROXTON(dev)) { 960 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ 961 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 962 GEN9_ENABLE_YV12_BUGFIX); 963 } 964 965 /* Wa4x4STCOptimizationDisable:skl,bxt */ 966 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 967 968 /* WaDisablePartialResolveInVc:skl,bxt */ 969 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); 970 971 /* WaCcsTlbPrefetchDisable:skl,bxt */ 972 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 973 GEN9_CCS_TLB_PREFETCH_ENABLE); 974 975 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 976 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || 977 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) 978 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 979 PIXEL_MASK_CAMMING_DISABLE); 980 981 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 982 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 983 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || 984 (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) 985 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 986 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 987 988 return 0; 989 } 990 991 static int skl_tune_iz_hashing(struct intel_engine_cs *ring) 992 { 993 struct drm_device *dev = ring->dev; 994 struct drm_i915_private *dev_priv = dev->dev_private; 995 u8 vals[3] = { 0, 0, 0 }; 996 unsigned int i; 997 998 for (i = 0; i < 3; i++) { 999 u8 ss; 1000 1001 /* 1002 * Only consider slices where one, and only one, subslice has 7 1003 * EUs 1004 */ 1005 if (hweight8(dev_priv->info.subslice_7eu[i]) != 1) 1006 continue; 1007 1008 /* 1009 * subslice_7eu[i] != 0 (because of the check above) and 1010 * ss_max == 4 (maximum number of subslices possible per slice) 1011 * 1012 * -> 0 <= ss <= 3; 1013 */ 1014 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1; 1015 vals[i] = 3 - ss; 1016 } 1017 1018 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 1019 return 0; 1020 1021 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 1022 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 1023 GEN9_IZ_HASHING_MASK(2) | 1024 GEN9_IZ_HASHING_MASK(1) | 1025 GEN9_IZ_HASHING_MASK(0), 1026 GEN9_IZ_HASHING(2, vals[2]) | 1027 GEN9_IZ_HASHING(1, vals[1]) | 1028 GEN9_IZ_HASHING(0, vals[0])); 1029 1030 return 0; 1031 } 1032 1033 1034 static int skl_init_workarounds(struct intel_engine_cs *ring) 1035 { 1036 struct drm_device *dev = ring->dev; 1037 struct drm_i915_private *dev_priv = dev->dev_private; 1038 1039 gen9_init_workarounds(ring); 1040 1041 /* WaDisablePowerCompilerClockGating:skl */ 1042 if (INTEL_REVID(dev) == SKL_REVID_B0) 1043 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1044 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1045 1046 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1047 /* 1048 *Use Force Non-Coherent whenever executing a 3D context. This 1049 * is a workaround for a possible hang in the unlikely event 1050 * a TLB invalidation occurs during a PSD flush. 1051 */ 1052 /* WaForceEnableNonCoherent:skl */ 1053 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1054 HDC_FORCE_NON_COHERENT); 1055 } 1056 1057 if (INTEL_REVID(dev) == SKL_REVID_C0 || 1058 INTEL_REVID(dev) == SKL_REVID_D0) 1059 /* WaBarrierPerformanceFixDisable:skl */ 1060 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1061 HDC_FENCE_DEST_SLM_DISABLE | 1062 HDC_BARRIER_PERFORMANCE_DISABLE); 1063 1064 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1065 if (INTEL_REVID(dev) <= SKL_REVID_F0) { 1066 WA_SET_BIT_MASKED( 1067 GEN7_HALF_SLICE_CHICKEN1, 1068 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1069 } 1070 1071 return skl_tune_iz_hashing(ring); 1072 } 1073 1074 static int bxt_init_workarounds(struct intel_engine_cs *ring) 1075 { 1076 struct drm_device *dev = ring->dev; 1077 struct drm_i915_private *dev_priv = dev->dev_private; 1078 1079 gen9_init_workarounds(ring); 1080 1081 /* WaDisableThreadStallDopClockGating:bxt */ 1082 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1083 STALL_DOP_GATING_DISABLE); 1084 1085 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1086 if (INTEL_REVID(dev) <= BXT_REVID_B0) { 1087 WA_SET_BIT_MASKED( 1088 GEN7_HALF_SLICE_CHICKEN1, 1089 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1090 } 1091 1092 return 0; 1093 } 1094 1095 int init_workarounds_ring(struct intel_engine_cs *ring) 1096 { 1097 struct drm_device *dev = ring->dev; 1098 struct drm_i915_private *dev_priv = dev->dev_private; 1099 1100 WARN_ON(ring->id != RCS); 1101 1102 dev_priv->workarounds.count = 0; 1103 1104 if (IS_BROADWELL(dev)) 1105 return bdw_init_workarounds(ring); 1106 1107 if (IS_CHERRYVIEW(dev)) 1108 return chv_init_workarounds(ring); 1109 1110 if (IS_SKYLAKE(dev)) 1111 return skl_init_workarounds(ring); 1112 1113 if (IS_BROXTON(dev)) 1114 return bxt_init_workarounds(ring); 1115 1116 return 0; 1117 } 1118 1119 static int init_render_ring(struct intel_engine_cs *ring) 1120 { 1121 struct drm_device *dev = ring->dev; 1122 struct drm_i915_private *dev_priv = dev->dev_private; 1123 int ret = init_ring_common(ring); 1124 if (ret) 1125 return ret; 1126 1127 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1128 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1129 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1130 1131 /* We need to disable the AsyncFlip performance optimisations in order 1132 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1133 * programmed to '1' on all products. 1134 * 1135 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1136 */ 1137 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1138 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1139 1140 /* Required for the hardware to program scanline values for waiting */ 1141 /* WaEnableFlushTlbInvalidationMode:snb */ 1142 if (INTEL_INFO(dev)->gen == 6) 1143 I915_WRITE(GFX_MODE, 1144 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1145 1146 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1147 if (IS_GEN7(dev)) 1148 I915_WRITE(GFX_MODE_GEN7, 1149 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1150 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1151 1152 if (IS_GEN6(dev)) { 1153 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1154 * "If this bit is set, STCunit will have LRA as replacement 1155 * policy. [...] This bit must be reset. LRA replacement 1156 * policy is not supported." 1157 */ 1158 I915_WRITE(CACHE_MODE_0, 1159 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1160 } 1161 1162 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1163 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1164 1165 if (HAS_L3_DPF(dev)) 1166 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1167 1168 return init_workarounds_ring(ring); 1169 } 1170 1171 static void render_ring_cleanup(struct intel_engine_cs *ring) 1172 { 1173 struct drm_device *dev = ring->dev; 1174 struct drm_i915_private *dev_priv = dev->dev_private; 1175 1176 if (dev_priv->semaphore_obj) { 1177 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1178 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 1179 dev_priv->semaphore_obj = NULL; 1180 } 1181 1182 intel_fini_pipe_control(ring); 1183 } 1184 1185 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1186 unsigned int num_dwords) 1187 { 1188 #define MBOX_UPDATE_DWORDS 8 1189 struct intel_engine_cs *signaller = signaller_req->ring; 1190 struct drm_device *dev = signaller->dev; 1191 struct drm_i915_private *dev_priv = dev->dev_private; 1192 struct intel_engine_cs *waiter; 1193 int i, ret, num_rings; 1194 1195 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1196 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1197 #undef MBOX_UPDATE_DWORDS 1198 1199 ret = intel_ring_begin(signaller_req, num_dwords); 1200 if (ret) 1201 return ret; 1202 1203 for_each_ring(waiter, dev_priv, i) { 1204 u32 seqno; 1205 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1206 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1207 continue; 1208 1209 seqno = i915_gem_request_get_seqno(signaller_req); 1210 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1211 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1212 PIPE_CONTROL_QW_WRITE | 1213 PIPE_CONTROL_FLUSH_ENABLE); 1214 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1215 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1216 intel_ring_emit(signaller, seqno); 1217 intel_ring_emit(signaller, 0); 1218 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1219 MI_SEMAPHORE_TARGET(waiter->id)); 1220 intel_ring_emit(signaller, 0); 1221 } 1222 1223 return 0; 1224 } 1225 1226 static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, 1227 unsigned int num_dwords) 1228 { 1229 #define MBOX_UPDATE_DWORDS 6 1230 struct intel_engine_cs *signaller = signaller_req->ring; 1231 struct drm_device *dev = signaller->dev; 1232 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct intel_engine_cs *waiter; 1234 int i, ret, num_rings; 1235 1236 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1237 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1238 #undef MBOX_UPDATE_DWORDS 1239 1240 ret = intel_ring_begin(signaller_req, num_dwords); 1241 if (ret) 1242 return ret; 1243 1244 for_each_ring(waiter, dev_priv, i) { 1245 u32 seqno; 1246 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1247 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1248 continue; 1249 1250 seqno = i915_gem_request_get_seqno(signaller_req); 1251 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1252 MI_FLUSH_DW_OP_STOREDW); 1253 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1254 MI_FLUSH_DW_USE_GTT); 1255 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1256 intel_ring_emit(signaller, seqno); 1257 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1258 MI_SEMAPHORE_TARGET(waiter->id)); 1259 intel_ring_emit(signaller, 0); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1266 unsigned int num_dwords) 1267 { 1268 struct intel_engine_cs *signaller = signaller_req->ring; 1269 struct drm_device *dev = signaller->dev; 1270 struct drm_i915_private *dev_priv = dev->dev_private; 1271 struct intel_engine_cs *useless; 1272 int i, ret, num_rings; 1273 1274 #define MBOX_UPDATE_DWORDS 3 1275 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1276 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1277 #undef MBOX_UPDATE_DWORDS 1278 1279 ret = intel_ring_begin(signaller_req, num_dwords); 1280 if (ret) 1281 return ret; 1282 1283 for_each_ring(useless, dev_priv, i) { 1284 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 1285 if (mbox_reg != GEN6_NOSYNC) { 1286 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1287 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1288 intel_ring_emit(signaller, mbox_reg); 1289 intel_ring_emit(signaller, seqno); 1290 } 1291 } 1292 1293 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1294 if (num_rings % 2 == 0) 1295 intel_ring_emit(signaller, MI_NOOP); 1296 1297 return 0; 1298 } 1299 1300 /** 1301 * gen6_add_request - Update the semaphore mailbox registers 1302 * 1303 * @request - request to write to the ring 1304 * 1305 * Update the mailbox registers in the *other* rings with the current seqno. 1306 * This acts like a signal in the canonical semaphore. 1307 */ 1308 static int 1309 gen6_add_request(struct drm_i915_gem_request *req) 1310 { 1311 struct intel_engine_cs *ring = req->ring; 1312 int ret; 1313 1314 if (ring->semaphore.signal) 1315 ret = ring->semaphore.signal(req, 4); 1316 else 1317 ret = intel_ring_begin(req, 4); 1318 1319 if (ret) 1320 return ret; 1321 1322 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1323 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1324 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1325 intel_ring_emit(ring, MI_USER_INTERRUPT); 1326 __intel_ring_advance(ring); 1327 1328 return 0; 1329 } 1330 1331 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1332 u32 seqno) 1333 { 1334 struct drm_i915_private *dev_priv = dev->dev_private; 1335 return dev_priv->last_seqno < seqno; 1336 } 1337 1338 /** 1339 * intel_ring_sync - sync the waiter to the signaller on seqno 1340 * 1341 * @waiter - ring that is waiting 1342 * @signaller - ring which has, or will signal 1343 * @seqno - seqno which the waiter will block on 1344 */ 1345 1346 static int 1347 gen8_ring_sync(struct drm_i915_gem_request *waiter_req, 1348 struct intel_engine_cs *signaller, 1349 u32 seqno) 1350 { 1351 struct intel_engine_cs *waiter = waiter_req->ring; 1352 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1353 int ret; 1354 1355 ret = intel_ring_begin(waiter_req, 4); 1356 if (ret) 1357 return ret; 1358 1359 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1360 MI_SEMAPHORE_GLOBAL_GTT | 1361 MI_SEMAPHORE_POLL | 1362 MI_SEMAPHORE_SAD_GTE_SDD); 1363 intel_ring_emit(waiter, seqno); 1364 intel_ring_emit(waiter, 1365 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1366 intel_ring_emit(waiter, 1367 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1368 intel_ring_advance(waiter); 1369 return 0; 1370 } 1371 1372 static int 1373 gen6_ring_sync(struct drm_i915_gem_request *waiter_req, 1374 struct intel_engine_cs *signaller, 1375 u32 seqno) 1376 { 1377 struct intel_engine_cs *waiter = waiter_req->ring; 1378 u32 dw1 = MI_SEMAPHORE_MBOX | 1379 MI_SEMAPHORE_COMPARE | 1380 MI_SEMAPHORE_REGISTER; 1381 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1382 int ret; 1383 1384 /* Throughout all of the GEM code, seqno passed implies our current 1385 * seqno is >= the last seqno executed. However for hardware the 1386 * comparison is strictly greater than. 1387 */ 1388 seqno -= 1; 1389 1390 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1391 1392 ret = intel_ring_begin(waiter_req, 4); 1393 if (ret) 1394 return ret; 1395 1396 /* If seqno wrap happened, omit the wait with no-ops */ 1397 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1398 intel_ring_emit(waiter, dw1 | wait_mbox); 1399 intel_ring_emit(waiter, seqno); 1400 intel_ring_emit(waiter, 0); 1401 intel_ring_emit(waiter, MI_NOOP); 1402 } else { 1403 intel_ring_emit(waiter, MI_NOOP); 1404 intel_ring_emit(waiter, MI_NOOP); 1405 intel_ring_emit(waiter, MI_NOOP); 1406 intel_ring_emit(waiter, MI_NOOP); 1407 } 1408 intel_ring_advance(waiter); 1409 1410 return 0; 1411 } 1412 1413 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1414 do { \ 1415 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 1416 PIPE_CONTROL_DEPTH_STALL); \ 1417 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 1418 intel_ring_emit(ring__, 0); \ 1419 intel_ring_emit(ring__, 0); \ 1420 } while (0) 1421 1422 static int 1423 pc_render_add_request(struct drm_i915_gem_request *req) 1424 { 1425 struct intel_engine_cs *ring = req->ring; 1426 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1427 int ret; 1428 1429 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 1430 * incoherent with writes to memory, i.e. completely fubar, 1431 * so we need to use PIPE_NOTIFY instead. 1432 * 1433 * However, we also need to workaround the qword write 1434 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1435 * memory before requesting an interrupt. 1436 */ 1437 ret = intel_ring_begin(req, 32); 1438 if (ret) 1439 return ret; 1440 1441 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1442 PIPE_CONTROL_WRITE_FLUSH | 1443 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1444 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1445 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1446 intel_ring_emit(ring, 0); 1447 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1448 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1449 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1450 scratch_addr += 2 * CACHELINE_BYTES; 1451 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1452 scratch_addr += 2 * CACHELINE_BYTES; 1453 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1454 scratch_addr += 2 * CACHELINE_BYTES; 1455 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1456 scratch_addr += 2 * CACHELINE_BYTES; 1457 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1458 1459 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1460 PIPE_CONTROL_WRITE_FLUSH | 1461 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1462 PIPE_CONTROL_NOTIFY); 1463 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1464 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1465 intel_ring_emit(ring, 0); 1466 __intel_ring_advance(ring); 1467 1468 return 0; 1469 } 1470 1471 static u32 1472 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1473 { 1474 /* Workaround to force correct ordering between irq and seqno writes on 1475 * ivb (and maybe also on snb) by reading from a CS register (like 1476 * ACTHD) before reading the status page. */ 1477 if (!lazy_coherency) { 1478 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1479 POSTING_READ(RING_ACTHD(ring->mmio_base)); 1480 } 1481 1482 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1483 } 1484 1485 static u32 1486 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1487 { 1488 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1489 } 1490 1491 static void 1492 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1493 { 1494 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1495 } 1496 1497 static u32 1498 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1499 { 1500 return ring->scratch.cpu_page[0]; 1501 } 1502 1503 static void 1504 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1505 { 1506 ring->scratch.cpu_page[0] = seqno; 1507 } 1508 1509 static bool 1510 gen5_ring_get_irq(struct intel_engine_cs *ring) 1511 { 1512 struct drm_device *dev = ring->dev; 1513 struct drm_i915_private *dev_priv = dev->dev_private; 1514 1515 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1516 return false; 1517 1518 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1519 if (ring->irq_refcount++ == 0) 1520 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1521 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1522 1523 return true; 1524 } 1525 1526 static void 1527 gen5_ring_put_irq(struct intel_engine_cs *ring) 1528 { 1529 struct drm_device *dev = ring->dev; 1530 struct drm_i915_private *dev_priv = dev->dev_private; 1531 1532 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1533 if (--ring->irq_refcount == 0) 1534 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1535 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1536 } 1537 1538 static bool 1539 i9xx_ring_get_irq(struct intel_engine_cs *ring) 1540 { 1541 struct drm_device *dev = ring->dev; 1542 struct drm_i915_private *dev_priv = dev->dev_private; 1543 1544 if (!intel_irqs_enabled(dev_priv)) 1545 return false; 1546 1547 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1548 if (ring->irq_refcount++ == 0) { 1549 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1550 I915_WRITE(IMR, dev_priv->irq_mask); 1551 POSTING_READ(IMR); 1552 } 1553 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1554 1555 return true; 1556 } 1557 1558 static void 1559 i9xx_ring_put_irq(struct intel_engine_cs *ring) 1560 { 1561 struct drm_device *dev = ring->dev; 1562 struct drm_i915_private *dev_priv = dev->dev_private; 1563 1564 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1565 if (--ring->irq_refcount == 0) { 1566 dev_priv->irq_mask |= ring->irq_enable_mask; 1567 I915_WRITE(IMR, dev_priv->irq_mask); 1568 POSTING_READ(IMR); 1569 } 1570 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1571 } 1572 1573 static bool 1574 i8xx_ring_get_irq(struct intel_engine_cs *ring) 1575 { 1576 struct drm_device *dev = ring->dev; 1577 struct drm_i915_private *dev_priv = dev->dev_private; 1578 1579 if (!intel_irqs_enabled(dev_priv)) 1580 return false; 1581 1582 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1583 if (ring->irq_refcount++ == 0) { 1584 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1585 I915_WRITE16(IMR, dev_priv->irq_mask); 1586 POSTING_READ16(IMR); 1587 } 1588 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1589 1590 return true; 1591 } 1592 1593 static void 1594 i8xx_ring_put_irq(struct intel_engine_cs *ring) 1595 { 1596 struct drm_device *dev = ring->dev; 1597 struct drm_i915_private *dev_priv = dev->dev_private; 1598 1599 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1600 if (--ring->irq_refcount == 0) { 1601 dev_priv->irq_mask |= ring->irq_enable_mask; 1602 I915_WRITE16(IMR, dev_priv->irq_mask); 1603 POSTING_READ16(IMR); 1604 } 1605 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1606 } 1607 1608 static int 1609 bsd_ring_flush(struct drm_i915_gem_request *req, 1610 u32 invalidate_domains, 1611 u32 flush_domains) 1612 { 1613 struct intel_engine_cs *ring = req->ring; 1614 int ret; 1615 1616 ret = intel_ring_begin(req, 2); 1617 if (ret) 1618 return ret; 1619 1620 intel_ring_emit(ring, MI_FLUSH); 1621 intel_ring_emit(ring, MI_NOOP); 1622 intel_ring_advance(ring); 1623 return 0; 1624 } 1625 1626 static int 1627 i9xx_add_request(struct drm_i915_gem_request *req) 1628 { 1629 struct intel_engine_cs *ring = req->ring; 1630 int ret; 1631 1632 ret = intel_ring_begin(req, 4); 1633 if (ret) 1634 return ret; 1635 1636 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1637 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1638 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1639 intel_ring_emit(ring, MI_USER_INTERRUPT); 1640 __intel_ring_advance(ring); 1641 1642 return 0; 1643 } 1644 1645 static bool 1646 gen6_ring_get_irq(struct intel_engine_cs *ring) 1647 { 1648 struct drm_device *dev = ring->dev; 1649 struct drm_i915_private *dev_priv = dev->dev_private; 1650 1651 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1652 return false; 1653 1654 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1655 if (ring->irq_refcount++ == 0) { 1656 if (HAS_L3_DPF(dev) && ring->id == RCS) 1657 I915_WRITE_IMR(ring, 1658 ~(ring->irq_enable_mask | 1659 GT_PARITY_ERROR(dev))); 1660 else 1661 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1662 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1663 } 1664 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1665 1666 return true; 1667 } 1668 1669 static void 1670 gen6_ring_put_irq(struct intel_engine_cs *ring) 1671 { 1672 struct drm_device *dev = ring->dev; 1673 struct drm_i915_private *dev_priv = dev->dev_private; 1674 1675 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1676 if (--ring->irq_refcount == 0) { 1677 if (HAS_L3_DPF(dev) && ring->id == RCS) 1678 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1679 else 1680 I915_WRITE_IMR(ring, ~0); 1681 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1682 } 1683 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1684 } 1685 1686 static bool 1687 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1688 { 1689 struct drm_device *dev = ring->dev; 1690 struct drm_i915_private *dev_priv = dev->dev_private; 1691 1692 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1693 return false; 1694 1695 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1696 if (ring->irq_refcount++ == 0) { 1697 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1698 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1699 } 1700 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1701 1702 return true; 1703 } 1704 1705 static void 1706 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1707 { 1708 struct drm_device *dev = ring->dev; 1709 struct drm_i915_private *dev_priv = dev->dev_private; 1710 1711 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1712 if (--ring->irq_refcount == 0) { 1713 I915_WRITE_IMR(ring, ~0); 1714 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1715 } 1716 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1717 } 1718 1719 static bool 1720 gen8_ring_get_irq(struct intel_engine_cs *ring) 1721 { 1722 struct drm_device *dev = ring->dev; 1723 struct drm_i915_private *dev_priv = dev->dev_private; 1724 1725 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1726 return false; 1727 1728 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1729 if (ring->irq_refcount++ == 0) { 1730 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1731 I915_WRITE_IMR(ring, 1732 ~(ring->irq_enable_mask | 1733 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1734 } else { 1735 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1736 } 1737 POSTING_READ(RING_IMR(ring->mmio_base)); 1738 } 1739 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1740 1741 return true; 1742 } 1743 1744 static void 1745 gen8_ring_put_irq(struct intel_engine_cs *ring) 1746 { 1747 struct drm_device *dev = ring->dev; 1748 struct drm_i915_private *dev_priv = dev->dev_private; 1749 1750 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1751 if (--ring->irq_refcount == 0) { 1752 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1753 I915_WRITE_IMR(ring, 1754 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1755 } else { 1756 I915_WRITE_IMR(ring, ~0); 1757 } 1758 POSTING_READ(RING_IMR(ring->mmio_base)); 1759 } 1760 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1761 } 1762 1763 static int 1764 i965_dispatch_execbuffer(struct drm_i915_gem_request *req, 1765 u64 offset, u32 length, 1766 unsigned dispatch_flags) 1767 { 1768 struct intel_engine_cs *ring = req->ring; 1769 int ret; 1770 1771 ret = intel_ring_begin(req, 2); 1772 if (ret) 1773 return ret; 1774 1775 intel_ring_emit(ring, 1776 MI_BATCH_BUFFER_START | 1777 MI_BATCH_GTT | 1778 (dispatch_flags & I915_DISPATCH_SECURE ? 1779 0 : MI_BATCH_NON_SECURE_I965)); 1780 intel_ring_emit(ring, offset); 1781 intel_ring_advance(ring); 1782 1783 return 0; 1784 } 1785 1786 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1787 #define I830_BATCH_LIMIT (256*1024) 1788 #define I830_TLB_ENTRIES (2) 1789 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1790 static int 1791 i830_dispatch_execbuffer(struct drm_i915_gem_request *req, 1792 u64 offset, u32 len, 1793 unsigned dispatch_flags) 1794 { 1795 struct intel_engine_cs *ring = req->ring; 1796 u32 cs_offset = ring->scratch.gtt_offset; 1797 int ret; 1798 1799 ret = intel_ring_begin(req, 6); 1800 if (ret) 1801 return ret; 1802 1803 /* Evict the invalid PTE TLBs */ 1804 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1805 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1806 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1807 intel_ring_emit(ring, cs_offset); 1808 intel_ring_emit(ring, 0xdeadbeef); 1809 intel_ring_emit(ring, MI_NOOP); 1810 intel_ring_advance(ring); 1811 1812 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1813 if (len > I830_BATCH_LIMIT) 1814 return -ENOSPC; 1815 1816 ret = intel_ring_begin(req, 6 + 2); 1817 if (ret) 1818 return ret; 1819 1820 /* Blit the batch (which has now all relocs applied) to the 1821 * stable batch scratch bo area (so that the CS never 1822 * stumbles over its tlb invalidation bug) ... 1823 */ 1824 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1825 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1826 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1827 intel_ring_emit(ring, cs_offset); 1828 intel_ring_emit(ring, 4096); 1829 intel_ring_emit(ring, offset); 1830 1831 intel_ring_emit(ring, MI_FLUSH); 1832 intel_ring_emit(ring, MI_NOOP); 1833 intel_ring_advance(ring); 1834 1835 /* ... and execute it. */ 1836 offset = cs_offset; 1837 } 1838 1839 ret = intel_ring_begin(req, 4); 1840 if (ret) 1841 return ret; 1842 1843 intel_ring_emit(ring, MI_BATCH_BUFFER); 1844 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1845 0 : MI_BATCH_NON_SECURE)); 1846 intel_ring_emit(ring, offset + len - 8); 1847 intel_ring_emit(ring, MI_NOOP); 1848 intel_ring_advance(ring); 1849 1850 return 0; 1851 } 1852 1853 static int 1854 i915_dispatch_execbuffer(struct drm_i915_gem_request *req, 1855 u64 offset, u32 len, 1856 unsigned dispatch_flags) 1857 { 1858 struct intel_engine_cs *ring = req->ring; 1859 int ret; 1860 1861 ret = intel_ring_begin(req, 2); 1862 if (ret) 1863 return ret; 1864 1865 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1866 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1867 0 : MI_BATCH_NON_SECURE)); 1868 intel_ring_advance(ring); 1869 1870 return 0; 1871 } 1872 1873 static void cleanup_status_page(struct intel_engine_cs *ring) 1874 { 1875 struct drm_i915_gem_object *obj; 1876 1877 obj = ring->status_page.obj; 1878 if (obj == NULL) 1879 return; 1880 1881 kunmap(sg_page(obj->pages->sgl)); 1882 i915_gem_object_ggtt_unpin(obj); 1883 drm_gem_object_unreference(&obj->base); 1884 ring->status_page.obj = NULL; 1885 } 1886 1887 static int init_status_page(struct intel_engine_cs *ring) 1888 { 1889 struct drm_i915_gem_object *obj; 1890 1891 if ((obj = ring->status_page.obj) == NULL) { 1892 unsigned flags; 1893 int ret; 1894 1895 obj = i915_gem_alloc_object(ring->dev, 4096); 1896 if (obj == NULL) { 1897 DRM_ERROR("Failed to allocate status page\n"); 1898 return -ENOMEM; 1899 } 1900 1901 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1902 if (ret) 1903 goto err_unref; 1904 1905 flags = 0; 1906 if (!HAS_LLC(ring->dev)) 1907 /* On g33, we cannot place HWS above 256MiB, so 1908 * restrict its pinning to the low mappable arena. 1909 * Though this restriction is not documented for 1910 * gen4, gen5, or byt, they also behave similarly 1911 * and hang if the HWS is placed at the top of the 1912 * GTT. To generalise, it appears that all !llc 1913 * platforms have issues with us placing the HWS 1914 * above the mappable region (even though we never 1915 * actualy map it). 1916 */ 1917 flags |= PIN_MAPPABLE; 1918 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1919 if (ret) { 1920 err_unref: 1921 drm_gem_object_unreference(&obj->base); 1922 return ret; 1923 } 1924 1925 ring->status_page.obj = obj; 1926 } 1927 1928 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1929 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1930 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1931 1932 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1933 ring->name, ring->status_page.gfx_addr); 1934 1935 return 0; 1936 } 1937 1938 static int init_phys_status_page(struct intel_engine_cs *ring) 1939 { 1940 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1941 1942 if (!dev_priv->status_page_dmah) { 1943 dev_priv->status_page_dmah = 1944 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1945 if (!dev_priv->status_page_dmah) 1946 return -ENOMEM; 1947 } 1948 1949 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1950 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1951 1952 return 0; 1953 } 1954 1955 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1956 { 1957 iounmap(ringbuf->virtual_start); 1958 ringbuf->virtual_start = NULL; 1959 i915_gem_object_ggtt_unpin(ringbuf->obj); 1960 } 1961 1962 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 1963 struct intel_ringbuffer *ringbuf) 1964 { 1965 struct drm_i915_private *dev_priv = to_i915(dev); 1966 struct drm_i915_gem_object *obj = ringbuf->obj; 1967 int ret; 1968 1969 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1970 if (ret) 1971 return ret; 1972 1973 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1974 if (ret) { 1975 i915_gem_object_ggtt_unpin(obj); 1976 return ret; 1977 } 1978 1979 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + 1980 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 1981 if (ringbuf->virtual_start == NULL) { 1982 i915_gem_object_ggtt_unpin(obj); 1983 return -EINVAL; 1984 } 1985 1986 return 0; 1987 } 1988 1989 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1990 { 1991 drm_gem_object_unreference(&ringbuf->obj->base); 1992 ringbuf->obj = NULL; 1993 } 1994 1995 int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1996 struct intel_ringbuffer *ringbuf) 1997 { 1998 struct drm_i915_gem_object *obj; 1999 2000 obj = NULL; 2001 if (!HAS_LLC(dev)) 2002 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2003 if (obj == NULL) 2004 obj = i915_gem_alloc_object(dev, ringbuf->size); 2005 if (obj == NULL) 2006 return -ENOMEM; 2007 2008 /* mark ring buffers as read-only from GPU side by default */ 2009 obj->gt_ro = 1; 2010 2011 ringbuf->obj = obj; 2012 2013 return 0; 2014 } 2015 2016 static int intel_init_ring_buffer(struct drm_device *dev, 2017 struct intel_engine_cs *ring) 2018 { 2019 struct intel_ringbuffer *ringbuf; 2020 int ret; 2021 2022 WARN_ON(ring->buffer); 2023 2024 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2025 if (!ringbuf) 2026 return -ENOMEM; 2027 ring->buffer = ringbuf; 2028 2029 ring->dev = dev; 2030 INIT_LIST_HEAD(&ring->active_list); 2031 INIT_LIST_HEAD(&ring->request_list); 2032 INIT_LIST_HEAD(&ring->execlist_queue); 2033 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2034 ringbuf->size = 32 * PAGE_SIZE; 2035 ringbuf->ring = ring; 2036 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2037 2038 init_waitqueue_head(&ring->irq_queue); 2039 2040 if (I915_NEED_GFX_HWS(dev)) { 2041 ret = init_status_page(ring); 2042 if (ret) 2043 goto error; 2044 } else { 2045 BUG_ON(ring->id != RCS); 2046 ret = init_phys_status_page(ring); 2047 if (ret) 2048 goto error; 2049 } 2050 2051 WARN_ON(ringbuf->obj); 2052 2053 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 2054 if (ret) { 2055 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", 2056 ring->name, ret); 2057 goto error; 2058 } 2059 2060 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2061 if (ret) { 2062 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2063 ring->name, ret); 2064 intel_destroy_ringbuffer_obj(ringbuf); 2065 goto error; 2066 } 2067 2068 /* Workaround an erratum on the i830 which causes a hang if 2069 * the TAIL pointer points to within the last 2 cachelines 2070 * of the buffer. 2071 */ 2072 ringbuf->effective_size = ringbuf->size; 2073 if (IS_I830(dev) || IS_845G(dev)) 2074 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2075 2076 ret = i915_cmd_parser_init_ring(ring); 2077 if (ret) 2078 goto error; 2079 2080 return 0; 2081 2082 error: 2083 kfree(ringbuf); 2084 ring->buffer = NULL; 2085 return ret; 2086 } 2087 2088 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 2089 { 2090 struct drm_i915_private *dev_priv; 2091 struct intel_ringbuffer *ringbuf; 2092 2093 if (!intel_ring_initialized(ring)) 2094 return; 2095 2096 dev_priv = to_i915(ring->dev); 2097 ringbuf = ring->buffer; 2098 2099 intel_stop_ring_buffer(ring); 2100 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 2101 2102 intel_unpin_ringbuffer_obj(ringbuf); 2103 intel_destroy_ringbuffer_obj(ringbuf); 2104 2105 if (ring->cleanup) 2106 ring->cleanup(ring); 2107 2108 cleanup_status_page(ring); 2109 2110 i915_cmd_parser_fini_ring(ring); 2111 i915_gem_batch_pool_fini(&ring->batch_pool); 2112 2113 kfree(ringbuf); 2114 ring->buffer = NULL; 2115 } 2116 2117 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 2118 { 2119 struct intel_ringbuffer *ringbuf = ring->buffer; 2120 struct drm_i915_gem_request *request; 2121 unsigned space; 2122 int ret; 2123 2124 if (intel_ring_space(ringbuf) >= n) 2125 return 0; 2126 2127 /* The whole point of reserving space is to not wait! */ 2128 WARN_ON(ringbuf->reserved_in_use); 2129 2130 list_for_each_entry(request, &ring->request_list, list) { 2131 space = __intel_ring_space(request->postfix, ringbuf->tail, 2132 ringbuf->size); 2133 if (space >= n) 2134 break; 2135 } 2136 2137 if (WARN_ON(&request->list == &ring->request_list)) 2138 return -ENOSPC; 2139 2140 ret = i915_wait_request(request); 2141 if (ret) 2142 return ret; 2143 2144 ringbuf->space = space; 2145 return 0; 2146 } 2147 2148 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) 2149 { 2150 uint32_t __iomem *virt; 2151 int rem = ringbuf->size - ringbuf->tail; 2152 2153 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 2154 rem /= 4; 2155 while (rem--) 2156 iowrite32(MI_NOOP, virt++); 2157 2158 ringbuf->tail = 0; 2159 intel_ring_update_space(ringbuf); 2160 } 2161 2162 int intel_ring_idle(struct intel_engine_cs *ring) 2163 { 2164 struct drm_i915_gem_request *req; 2165 2166 /* Wait upon the last request to be completed */ 2167 if (list_empty(&ring->request_list)) 2168 return 0; 2169 2170 req = list_entry(ring->request_list.prev, 2171 struct drm_i915_gem_request, 2172 list); 2173 2174 /* Make sure we do not trigger any retires */ 2175 return __i915_wait_request(req, 2176 atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), 2177 to_i915(ring->dev)->mm.interruptible, 2178 NULL, NULL); 2179 } 2180 2181 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2182 { 2183 request->ringbuf = request->ring->buffer; 2184 return 0; 2185 } 2186 2187 int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2188 { 2189 /* 2190 * The first call merely notes the reserve request and is common for 2191 * all back ends. The subsequent localised _begin() call actually 2192 * ensures that the reservation is available. Without the begin, if 2193 * the request creator immediately submitted the request without 2194 * adding any commands to it then there might not actually be 2195 * sufficient room for the submission commands. 2196 */ 2197 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2198 2199 return intel_ring_begin(request, 0); 2200 } 2201 2202 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2203 { 2204 WARN_ON(ringbuf->reserved_size); 2205 WARN_ON(ringbuf->reserved_in_use); 2206 2207 ringbuf->reserved_size = size; 2208 } 2209 2210 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) 2211 { 2212 WARN_ON(ringbuf->reserved_in_use); 2213 2214 ringbuf->reserved_size = 0; 2215 ringbuf->reserved_in_use = false; 2216 } 2217 2218 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2219 { 2220 WARN_ON(ringbuf->reserved_in_use); 2221 2222 ringbuf->reserved_in_use = true; 2223 ringbuf->reserved_tail = ringbuf->tail; 2224 } 2225 2226 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2227 { 2228 WARN_ON(!ringbuf->reserved_in_use); 2229 if (ringbuf->tail > ringbuf->reserved_tail) { 2230 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, 2231 "request reserved size too small: %d vs %d!\n", 2232 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); 2233 } else { 2234 /* 2235 * The ring was wrapped while the reserved space was in use. 2236 * That means that some unknown amount of the ring tail was 2237 * no-op filled and skipped. Thus simply adding the ring size 2238 * to the tail and doing the above space check will not work. 2239 * Rather than attempt to track how much tail was skipped, 2240 * it is much simpler to say that also skipping the sanity 2241 * check every once in a while is not a big issue. 2242 */ 2243 } 2244 2245 ringbuf->reserved_size = 0; 2246 ringbuf->reserved_in_use = false; 2247 } 2248 2249 static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) 2250 { 2251 struct intel_ringbuffer *ringbuf = ring->buffer; 2252 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2253 int remain_actual = ringbuf->size - ringbuf->tail; 2254 int ret, total_bytes, wait_bytes = 0; 2255 bool need_wrap = false; 2256 2257 if (ringbuf->reserved_in_use) 2258 total_bytes = bytes; 2259 else 2260 total_bytes = bytes + ringbuf->reserved_size; 2261 2262 if (unlikely(bytes > remain_usable)) { 2263 /* 2264 * Not enough space for the basic request. So need to flush 2265 * out the remainder and then wait for base + reserved. 2266 */ 2267 wait_bytes = remain_actual + total_bytes; 2268 need_wrap = true; 2269 } else { 2270 if (unlikely(total_bytes > remain_usable)) { 2271 /* 2272 * The base request will fit but the reserved space 2273 * falls off the end. So only need to to wait for the 2274 * reserved size after flushing out the remainder. 2275 */ 2276 wait_bytes = remain_actual + ringbuf->reserved_size; 2277 need_wrap = true; 2278 } else if (total_bytes > ringbuf->space) { 2279 /* No wrapping required, just waiting. */ 2280 wait_bytes = total_bytes; 2281 } 2282 } 2283 2284 if (wait_bytes) { 2285 ret = ring_wait_for_space(ring, wait_bytes); 2286 if (unlikely(ret)) 2287 return ret; 2288 2289 if (need_wrap) 2290 __wrap_ring_buffer(ringbuf); 2291 } 2292 2293 return 0; 2294 } 2295 2296 int intel_ring_begin(struct drm_i915_gem_request *req, 2297 int num_dwords) 2298 { 2299 struct intel_engine_cs *ring; 2300 struct drm_i915_private *dev_priv; 2301 int ret; 2302 2303 WARN_ON(req == NULL); 2304 ring = req->ring; 2305 dev_priv = ring->dev->dev_private; 2306 2307 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 2308 dev_priv->mm.interruptible); 2309 if (ret) 2310 return ret; 2311 2312 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 2313 if (ret) 2314 return ret; 2315 2316 ring->buffer->space -= num_dwords * sizeof(uint32_t); 2317 return 0; 2318 } 2319 2320 /* Align the ring tail to a cacheline boundary */ 2321 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2322 { 2323 struct intel_engine_cs *ring = req->ring; 2324 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2325 int ret; 2326 2327 if (num_dwords == 0) 2328 return 0; 2329 2330 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2331 ret = intel_ring_begin(req, num_dwords); 2332 if (ret) 2333 return ret; 2334 2335 while (num_dwords--) 2336 intel_ring_emit(ring, MI_NOOP); 2337 2338 intel_ring_advance(ring); 2339 2340 return 0; 2341 } 2342 2343 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 2344 { 2345 struct drm_device *dev = ring->dev; 2346 struct drm_i915_private *dev_priv = dev->dev_private; 2347 2348 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 2349 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 2350 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 2351 if (HAS_VEBOX(dev)) 2352 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 2353 } 2354 2355 ring->set_seqno(ring, seqno); 2356 ring->hangcheck.seqno = seqno; 2357 } 2358 2359 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 2360 u32 value) 2361 { 2362 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2363 2364 /* Every tail move must follow the sequence below */ 2365 2366 /* Disable notification that the ring is IDLE. The GT 2367 * will then assume that it is busy and bring it out of rc6. 2368 */ 2369 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2370 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2371 2372 /* Clear the context id. Here be magic! */ 2373 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2374 2375 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2376 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2377 GEN6_BSD_SLEEP_INDICATOR) == 0, 2378 50)) 2379 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2380 2381 /* Now that the ring is fully powered up, update the tail */ 2382 I915_WRITE_TAIL(ring, value); 2383 POSTING_READ(RING_TAIL(ring->mmio_base)); 2384 2385 /* Let the ring send IDLE messages to the GT again, 2386 * and so let it sleep to conserve power when idle. 2387 */ 2388 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2389 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2390 } 2391 2392 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2393 u32 invalidate, u32 flush) 2394 { 2395 struct intel_engine_cs *ring = req->ring; 2396 uint32_t cmd; 2397 int ret; 2398 2399 ret = intel_ring_begin(req, 4); 2400 if (ret) 2401 return ret; 2402 2403 cmd = MI_FLUSH_DW; 2404 if (INTEL_INFO(ring->dev)->gen >= 8) 2405 cmd += 1; 2406 2407 /* We always require a command barrier so that subsequent 2408 * commands, such as breadcrumb interrupts, are strictly ordered 2409 * wrt the contents of the write cache being flushed to memory 2410 * (and thus being coherent from the CPU). 2411 */ 2412 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2413 2414 /* 2415 * Bspec vol 1c.5 - video engine command streamer: 2416 * "If ENABLED, all TLBs will be invalidated once the flush 2417 * operation is complete. This bit is only valid when the 2418 * Post-Sync Operation field is a value of 1h or 3h." 2419 */ 2420 if (invalidate & I915_GEM_GPU_DOMAINS) 2421 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2422 2423 intel_ring_emit(ring, cmd); 2424 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2425 if (INTEL_INFO(ring->dev)->gen >= 8) { 2426 intel_ring_emit(ring, 0); /* upper addr */ 2427 intel_ring_emit(ring, 0); /* value */ 2428 } else { 2429 intel_ring_emit(ring, 0); 2430 intel_ring_emit(ring, MI_NOOP); 2431 } 2432 intel_ring_advance(ring); 2433 return 0; 2434 } 2435 2436 static int 2437 gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2438 u64 offset, u32 len, 2439 unsigned dispatch_flags) 2440 { 2441 struct intel_engine_cs *ring = req->ring; 2442 bool ppgtt = USES_PPGTT(ring->dev) && 2443 !(dispatch_flags & I915_DISPATCH_SECURE); 2444 int ret; 2445 2446 ret = intel_ring_begin(req, 4); 2447 if (ret) 2448 return ret; 2449 2450 /* FIXME(BDW): Address space and security selectors. */ 2451 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2452 (dispatch_flags & I915_DISPATCH_RS ? 2453 MI_BATCH_RESOURCE_STREAMER : 0)); 2454 intel_ring_emit(ring, lower_32_bits(offset)); 2455 intel_ring_emit(ring, upper_32_bits(offset)); 2456 intel_ring_emit(ring, MI_NOOP); 2457 intel_ring_advance(ring); 2458 2459 return 0; 2460 } 2461 2462 static int 2463 hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2464 u64 offset, u32 len, 2465 unsigned dispatch_flags) 2466 { 2467 struct intel_engine_cs *ring = req->ring; 2468 int ret; 2469 2470 ret = intel_ring_begin(req, 2); 2471 if (ret) 2472 return ret; 2473 2474 intel_ring_emit(ring, 2475 MI_BATCH_BUFFER_START | 2476 (dispatch_flags & I915_DISPATCH_SECURE ? 2477 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2478 (dispatch_flags & I915_DISPATCH_RS ? 2479 MI_BATCH_RESOURCE_STREAMER : 0)); 2480 /* bit0-7 is the length on GEN6+ */ 2481 intel_ring_emit(ring, offset); 2482 intel_ring_advance(ring); 2483 2484 return 0; 2485 } 2486 2487 static int 2488 gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2489 u64 offset, u32 len, 2490 unsigned dispatch_flags) 2491 { 2492 struct intel_engine_cs *ring = req->ring; 2493 int ret; 2494 2495 ret = intel_ring_begin(req, 2); 2496 if (ret) 2497 return ret; 2498 2499 intel_ring_emit(ring, 2500 MI_BATCH_BUFFER_START | 2501 (dispatch_flags & I915_DISPATCH_SECURE ? 2502 0 : MI_BATCH_NON_SECURE_I965)); 2503 /* bit0-7 is the length on GEN6+ */ 2504 intel_ring_emit(ring, offset); 2505 intel_ring_advance(ring); 2506 2507 return 0; 2508 } 2509 2510 /* Blitter support (SandyBridge+) */ 2511 2512 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2513 u32 invalidate, u32 flush) 2514 { 2515 struct intel_engine_cs *ring = req->ring; 2516 struct drm_device *dev = ring->dev; 2517 uint32_t cmd; 2518 int ret; 2519 2520 ret = intel_ring_begin(req, 4); 2521 if (ret) 2522 return ret; 2523 2524 cmd = MI_FLUSH_DW; 2525 if (INTEL_INFO(dev)->gen >= 8) 2526 cmd += 1; 2527 2528 /* We always require a command barrier so that subsequent 2529 * commands, such as breadcrumb interrupts, are strictly ordered 2530 * wrt the contents of the write cache being flushed to memory 2531 * (and thus being coherent from the CPU). 2532 */ 2533 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2534 2535 /* 2536 * Bspec vol 1c.3 - blitter engine command streamer: 2537 * "If ENABLED, all TLBs will be invalidated once the flush 2538 * operation is complete. This bit is only valid when the 2539 * Post-Sync Operation field is a value of 1h or 3h." 2540 */ 2541 if (invalidate & I915_GEM_DOMAIN_RENDER) 2542 cmd |= MI_INVALIDATE_TLB; 2543 intel_ring_emit(ring, cmd); 2544 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2545 if (INTEL_INFO(dev)->gen >= 8) { 2546 intel_ring_emit(ring, 0); /* upper addr */ 2547 intel_ring_emit(ring, 0); /* value */ 2548 } else { 2549 intel_ring_emit(ring, 0); 2550 intel_ring_emit(ring, MI_NOOP); 2551 } 2552 intel_ring_advance(ring); 2553 2554 return 0; 2555 } 2556 2557 int intel_init_render_ring_buffer(struct drm_device *dev) 2558 { 2559 struct drm_i915_private *dev_priv = dev->dev_private; 2560 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2561 struct drm_i915_gem_object *obj; 2562 int ret; 2563 2564 ring->name = "render ring"; 2565 ring->id = RCS; 2566 ring->mmio_base = RENDER_RING_BASE; 2567 2568 if (INTEL_INFO(dev)->gen >= 8) { 2569 if (i915_semaphore_is_enabled(dev)) { 2570 obj = i915_gem_alloc_object(dev, 4096); 2571 if (obj == NULL) { 2572 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2573 i915.semaphores = 0; 2574 } else { 2575 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2576 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2577 if (ret != 0) { 2578 drm_gem_object_unreference(&obj->base); 2579 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2580 i915.semaphores = 0; 2581 } else 2582 dev_priv->semaphore_obj = obj; 2583 } 2584 } 2585 2586 ring->init_context = intel_rcs_ctx_init; 2587 ring->add_request = gen6_add_request; 2588 ring->flush = gen8_render_ring_flush; 2589 ring->irq_get = gen8_ring_get_irq; 2590 ring->irq_put = gen8_ring_put_irq; 2591 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2592 ring->get_seqno = gen6_ring_get_seqno; 2593 ring->set_seqno = ring_set_seqno; 2594 if (i915_semaphore_is_enabled(dev)) { 2595 WARN_ON(!dev_priv->semaphore_obj); 2596 ring->semaphore.sync_to = gen8_ring_sync; 2597 ring->semaphore.signal = gen8_rcs_signal; 2598 GEN8_RING_SEMAPHORE_INIT; 2599 } 2600 } else if (INTEL_INFO(dev)->gen >= 6) { 2601 ring->add_request = gen6_add_request; 2602 ring->flush = gen7_render_ring_flush; 2603 if (INTEL_INFO(dev)->gen == 6) 2604 ring->flush = gen6_render_ring_flush; 2605 ring->irq_get = gen6_ring_get_irq; 2606 ring->irq_put = gen6_ring_put_irq; 2607 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2608 ring->get_seqno = gen6_ring_get_seqno; 2609 ring->set_seqno = ring_set_seqno; 2610 if (i915_semaphore_is_enabled(dev)) { 2611 ring->semaphore.sync_to = gen6_ring_sync; 2612 ring->semaphore.signal = gen6_signal; 2613 /* 2614 * The current semaphore is only applied on pre-gen8 2615 * platform. And there is no VCS2 ring on the pre-gen8 2616 * platform. So the semaphore between RCS and VCS2 is 2617 * initialized as INVALID. Gen8 will initialize the 2618 * sema between VCS2 and RCS later. 2619 */ 2620 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2621 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2622 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2623 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2624 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2625 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2626 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2627 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2628 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2629 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2630 } 2631 } else if (IS_GEN5(dev)) { 2632 ring->add_request = pc_render_add_request; 2633 ring->flush = gen4_render_ring_flush; 2634 ring->get_seqno = pc_render_get_seqno; 2635 ring->set_seqno = pc_render_set_seqno; 2636 ring->irq_get = gen5_ring_get_irq; 2637 ring->irq_put = gen5_ring_put_irq; 2638 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2639 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2640 } else { 2641 ring->add_request = i9xx_add_request; 2642 if (INTEL_INFO(dev)->gen < 4) 2643 ring->flush = gen2_render_ring_flush; 2644 else 2645 ring->flush = gen4_render_ring_flush; 2646 ring->get_seqno = ring_get_seqno; 2647 ring->set_seqno = ring_set_seqno; 2648 if (IS_GEN2(dev)) { 2649 ring->irq_get = i8xx_ring_get_irq; 2650 ring->irq_put = i8xx_ring_put_irq; 2651 } else { 2652 ring->irq_get = i9xx_ring_get_irq; 2653 ring->irq_put = i9xx_ring_put_irq; 2654 } 2655 ring->irq_enable_mask = I915_USER_INTERRUPT; 2656 } 2657 ring->write_tail = ring_write_tail; 2658 2659 if (IS_HASWELL(dev)) 2660 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2661 else if (IS_GEN8(dev)) 2662 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2663 else if (INTEL_INFO(dev)->gen >= 6) 2664 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2665 else if (INTEL_INFO(dev)->gen >= 4) 2666 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2667 else if (IS_I830(dev) || IS_845G(dev)) 2668 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2669 else 2670 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2671 ring->init_hw = init_render_ring; 2672 ring->cleanup = render_ring_cleanup; 2673 2674 /* Workaround batchbuffer to combat CS tlb bug. */ 2675 if (HAS_BROKEN_CS_TLB(dev)) { 2676 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2677 if (obj == NULL) { 2678 DRM_ERROR("Failed to allocate batch bo\n"); 2679 return -ENOMEM; 2680 } 2681 2682 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2683 if (ret != 0) { 2684 drm_gem_object_unreference(&obj->base); 2685 DRM_ERROR("Failed to ping batch bo\n"); 2686 return ret; 2687 } 2688 2689 ring->scratch.obj = obj; 2690 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2691 } 2692 2693 ret = intel_init_ring_buffer(dev, ring); 2694 if (ret) 2695 return ret; 2696 2697 if (INTEL_INFO(dev)->gen >= 5) { 2698 ret = intel_init_pipe_control(ring); 2699 if (ret) 2700 return ret; 2701 } 2702 2703 return 0; 2704 } 2705 2706 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2707 { 2708 struct drm_i915_private *dev_priv = dev->dev_private; 2709 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2710 2711 ring->name = "bsd ring"; 2712 ring->id = VCS; 2713 2714 ring->write_tail = ring_write_tail; 2715 if (INTEL_INFO(dev)->gen >= 6) { 2716 ring->mmio_base = GEN6_BSD_RING_BASE; 2717 /* gen6 bsd needs a special wa for tail updates */ 2718 if (IS_GEN6(dev)) 2719 ring->write_tail = gen6_bsd_ring_write_tail; 2720 ring->flush = gen6_bsd_ring_flush; 2721 ring->add_request = gen6_add_request; 2722 ring->get_seqno = gen6_ring_get_seqno; 2723 ring->set_seqno = ring_set_seqno; 2724 if (INTEL_INFO(dev)->gen >= 8) { 2725 ring->irq_enable_mask = 2726 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2727 ring->irq_get = gen8_ring_get_irq; 2728 ring->irq_put = gen8_ring_put_irq; 2729 ring->dispatch_execbuffer = 2730 gen8_ring_dispatch_execbuffer; 2731 if (i915_semaphore_is_enabled(dev)) { 2732 ring->semaphore.sync_to = gen8_ring_sync; 2733 ring->semaphore.signal = gen8_xcs_signal; 2734 GEN8_RING_SEMAPHORE_INIT; 2735 } 2736 } else { 2737 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2738 ring->irq_get = gen6_ring_get_irq; 2739 ring->irq_put = gen6_ring_put_irq; 2740 ring->dispatch_execbuffer = 2741 gen6_ring_dispatch_execbuffer; 2742 if (i915_semaphore_is_enabled(dev)) { 2743 ring->semaphore.sync_to = gen6_ring_sync; 2744 ring->semaphore.signal = gen6_signal; 2745 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2746 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2747 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2748 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2749 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2750 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2751 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2752 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2753 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2754 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2755 } 2756 } 2757 } else { 2758 ring->mmio_base = BSD_RING_BASE; 2759 ring->flush = bsd_ring_flush; 2760 ring->add_request = i9xx_add_request; 2761 ring->get_seqno = ring_get_seqno; 2762 ring->set_seqno = ring_set_seqno; 2763 if (IS_GEN5(dev)) { 2764 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2765 ring->irq_get = gen5_ring_get_irq; 2766 ring->irq_put = gen5_ring_put_irq; 2767 } else { 2768 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2769 ring->irq_get = i9xx_ring_get_irq; 2770 ring->irq_put = i9xx_ring_put_irq; 2771 } 2772 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2773 } 2774 ring->init_hw = init_ring_common; 2775 2776 return intel_init_ring_buffer(dev, ring); 2777 } 2778 2779 /** 2780 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 2781 */ 2782 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2783 { 2784 struct drm_i915_private *dev_priv = dev->dev_private; 2785 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2786 2787 ring->name = "bsd2 ring"; 2788 ring->id = VCS2; 2789 2790 ring->write_tail = ring_write_tail; 2791 ring->mmio_base = GEN8_BSD2_RING_BASE; 2792 ring->flush = gen6_bsd_ring_flush; 2793 ring->add_request = gen6_add_request; 2794 ring->get_seqno = gen6_ring_get_seqno; 2795 ring->set_seqno = ring_set_seqno; 2796 ring->irq_enable_mask = 2797 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2798 ring->irq_get = gen8_ring_get_irq; 2799 ring->irq_put = gen8_ring_put_irq; 2800 ring->dispatch_execbuffer = 2801 gen8_ring_dispatch_execbuffer; 2802 if (i915_semaphore_is_enabled(dev)) { 2803 ring->semaphore.sync_to = gen8_ring_sync; 2804 ring->semaphore.signal = gen8_xcs_signal; 2805 GEN8_RING_SEMAPHORE_INIT; 2806 } 2807 ring->init_hw = init_ring_common; 2808 2809 return intel_init_ring_buffer(dev, ring); 2810 } 2811 2812 int intel_init_blt_ring_buffer(struct drm_device *dev) 2813 { 2814 struct drm_i915_private *dev_priv = dev->dev_private; 2815 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2816 2817 ring->name = "blitter ring"; 2818 ring->id = BCS; 2819 2820 ring->mmio_base = BLT_RING_BASE; 2821 ring->write_tail = ring_write_tail; 2822 ring->flush = gen6_ring_flush; 2823 ring->add_request = gen6_add_request; 2824 ring->get_seqno = gen6_ring_get_seqno; 2825 ring->set_seqno = ring_set_seqno; 2826 if (INTEL_INFO(dev)->gen >= 8) { 2827 ring->irq_enable_mask = 2828 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2829 ring->irq_get = gen8_ring_get_irq; 2830 ring->irq_put = gen8_ring_put_irq; 2831 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2832 if (i915_semaphore_is_enabled(dev)) { 2833 ring->semaphore.sync_to = gen8_ring_sync; 2834 ring->semaphore.signal = gen8_xcs_signal; 2835 GEN8_RING_SEMAPHORE_INIT; 2836 } 2837 } else { 2838 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2839 ring->irq_get = gen6_ring_get_irq; 2840 ring->irq_put = gen6_ring_put_irq; 2841 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2842 if (i915_semaphore_is_enabled(dev)) { 2843 ring->semaphore.signal = gen6_signal; 2844 ring->semaphore.sync_to = gen6_ring_sync; 2845 /* 2846 * The current semaphore is only applied on pre-gen8 2847 * platform. And there is no VCS2 ring on the pre-gen8 2848 * platform. So the semaphore between BCS and VCS2 is 2849 * initialized as INVALID. Gen8 will initialize the 2850 * sema between BCS and VCS2 later. 2851 */ 2852 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2853 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2854 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2855 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2856 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2857 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2858 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2859 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2860 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2861 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2862 } 2863 } 2864 ring->init_hw = init_ring_common; 2865 2866 return intel_init_ring_buffer(dev, ring); 2867 } 2868 2869 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2870 { 2871 struct drm_i915_private *dev_priv = dev->dev_private; 2872 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2873 2874 ring->name = "video enhancement ring"; 2875 ring->id = VECS; 2876 2877 ring->mmio_base = VEBOX_RING_BASE; 2878 ring->write_tail = ring_write_tail; 2879 ring->flush = gen6_ring_flush; 2880 ring->add_request = gen6_add_request; 2881 ring->get_seqno = gen6_ring_get_seqno; 2882 ring->set_seqno = ring_set_seqno; 2883 2884 if (INTEL_INFO(dev)->gen >= 8) { 2885 ring->irq_enable_mask = 2886 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2887 ring->irq_get = gen8_ring_get_irq; 2888 ring->irq_put = gen8_ring_put_irq; 2889 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2890 if (i915_semaphore_is_enabled(dev)) { 2891 ring->semaphore.sync_to = gen8_ring_sync; 2892 ring->semaphore.signal = gen8_xcs_signal; 2893 GEN8_RING_SEMAPHORE_INIT; 2894 } 2895 } else { 2896 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2897 ring->irq_get = hsw_vebox_get_irq; 2898 ring->irq_put = hsw_vebox_put_irq; 2899 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2900 if (i915_semaphore_is_enabled(dev)) { 2901 ring->semaphore.sync_to = gen6_ring_sync; 2902 ring->semaphore.signal = gen6_signal; 2903 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2904 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2905 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2906 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2907 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2908 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2909 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2910 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2911 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2912 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2913 } 2914 } 2915 ring->init_hw = init_ring_common; 2916 2917 return intel_init_ring_buffer(dev, ring); 2918 } 2919 2920 int 2921 intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 2922 { 2923 struct intel_engine_cs *ring = req->ring; 2924 int ret; 2925 2926 if (!ring->gpu_caches_dirty) 2927 return 0; 2928 2929 ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS); 2930 if (ret) 2931 return ret; 2932 2933 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); 2934 2935 ring->gpu_caches_dirty = false; 2936 return 0; 2937 } 2938 2939 int 2940 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 2941 { 2942 struct intel_engine_cs *ring = req->ring; 2943 uint32_t flush_domains; 2944 int ret; 2945 2946 flush_domains = 0; 2947 if (ring->gpu_caches_dirty) 2948 flush_domains = I915_GEM_GPU_DOMAINS; 2949 2950 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 2951 if (ret) 2952 return ret; 2953 2954 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 2955 2956 ring->gpu_caches_dirty = false; 2957 return 0; 2958 } 2959 2960 void 2961 intel_stop_ring_buffer(struct intel_engine_cs *ring) 2962 { 2963 int ret; 2964 2965 if (!intel_ring_initialized(ring)) 2966 return; 2967 2968 ret = intel_ring_idle(ring); 2969 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2970 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2971 ring->name, ret); 2972 2973 stop_ring(ring); 2974 } 2975