1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 /* Rough estimate of the typical request size, performing a flush, 38 * set-context and then emitting the batch. 39 */ 40 #define LEGACY_REQUEST_SIZE 200 41 42 int __intel_ring_space(int head, int tail, int size) 43 { 44 int space = head - tail; 45 if (space <= 0) 46 space += size; 47 return space - I915_RING_FREE_SPACE; 48 } 49 50 void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 51 { 52 if (ringbuf->last_retired_head != -1) { 53 ringbuf->head = ringbuf->last_retired_head; 54 ringbuf->last_retired_head = -1; 55 } 56 57 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, 58 ringbuf->tail, ringbuf->size); 59 } 60 61 bool intel_engine_stopped(struct intel_engine_cs *engine) 62 { 63 struct drm_i915_private *dev_priv = engine->i915; 64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); 65 } 66 67 static void __intel_ring_advance(struct intel_engine_cs *engine) 68 { 69 struct intel_ringbuffer *ringbuf = engine->buffer; 70 ringbuf->tail &= ringbuf->size - 1; 71 if (intel_engine_stopped(engine)) 72 return; 73 engine->write_tail(engine, ringbuf->tail); 74 } 75 76 static int 77 gen2_render_ring_flush(struct drm_i915_gem_request *req, 78 u32 invalidate_domains, 79 u32 flush_domains) 80 { 81 struct intel_engine_cs *engine = req->engine; 82 u32 cmd; 83 int ret; 84 85 cmd = MI_FLUSH; 86 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 87 cmd |= MI_NO_WRITE_FLUSH; 88 89 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 90 cmd |= MI_READ_FLUSH; 91 92 ret = intel_ring_begin(req, 2); 93 if (ret) 94 return ret; 95 96 intel_ring_emit(engine, cmd); 97 intel_ring_emit(engine, MI_NOOP); 98 intel_ring_advance(engine); 99 100 return 0; 101 } 102 103 static int 104 gen4_render_ring_flush(struct drm_i915_gem_request *req, 105 u32 invalidate_domains, 106 u32 flush_domains) 107 { 108 struct intel_engine_cs *engine = req->engine; 109 u32 cmd; 110 int ret; 111 112 /* 113 * read/write caches: 114 * 115 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 116 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 117 * also flushed at 2d versus 3d pipeline switches. 118 * 119 * read-only caches: 120 * 121 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 122 * MI_READ_FLUSH is set, and is always flushed on 965. 123 * 124 * I915_GEM_DOMAIN_COMMAND may not exist? 125 * 126 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 127 * invalidated when MI_EXE_FLUSH is set. 128 * 129 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 130 * invalidated with every MI_FLUSH. 131 * 132 * TLBs: 133 * 134 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 135 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 136 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 137 * are flushed at any MI_FLUSH. 138 */ 139 140 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 141 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 142 cmd &= ~MI_NO_WRITE_FLUSH; 143 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 144 cmd |= MI_EXE_FLUSH; 145 146 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 147 (IS_G4X(req->i915) || IS_GEN5(req->i915))) 148 cmd |= MI_INVALIDATE_ISP; 149 150 ret = intel_ring_begin(req, 2); 151 if (ret) 152 return ret; 153 154 intel_ring_emit(engine, cmd); 155 intel_ring_emit(engine, MI_NOOP); 156 intel_ring_advance(engine); 157 158 return 0; 159 } 160 161 /** 162 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 163 * implementing two workarounds on gen6. From section 1.4.7.1 164 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 165 * 166 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 167 * produced by non-pipelined state commands), software needs to first 168 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 169 * 0. 170 * 171 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 172 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 173 * 174 * And the workaround for these two requires this workaround first: 175 * 176 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 177 * BEFORE the pipe-control with a post-sync op and no write-cache 178 * flushes. 179 * 180 * And this last workaround is tricky because of the requirements on 181 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 182 * volume 2 part 1: 183 * 184 * "1 of the following must also be set: 185 * - Render Target Cache Flush Enable ([12] of DW1) 186 * - Depth Cache Flush Enable ([0] of DW1) 187 * - Stall at Pixel Scoreboard ([1] of DW1) 188 * - Depth Stall ([13] of DW1) 189 * - Post-Sync Operation ([13] of DW1) 190 * - Notify Enable ([8] of DW1)" 191 * 192 * The cache flushes require the workaround flush that triggered this 193 * one, so we can't use it. Depth stall would trigger the same. 194 * Post-sync nonzero is what triggered this second workaround, so we 195 * can't use that one either. Notify enable is IRQs, which aren't 196 * really our business. That leaves only stall at scoreboard. 197 */ 198 static int 199 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 200 { 201 struct intel_engine_cs *engine = req->engine; 202 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 203 int ret; 204 205 ret = intel_ring_begin(req, 6); 206 if (ret) 207 return ret; 208 209 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 210 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 211 PIPE_CONTROL_STALL_AT_SCOREBOARD); 212 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 213 intel_ring_emit(engine, 0); /* low dword */ 214 intel_ring_emit(engine, 0); /* high dword */ 215 intel_ring_emit(engine, MI_NOOP); 216 intel_ring_advance(engine); 217 218 ret = intel_ring_begin(req, 6); 219 if (ret) 220 return ret; 221 222 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 223 intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); 224 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 225 intel_ring_emit(engine, 0); 226 intel_ring_emit(engine, 0); 227 intel_ring_emit(engine, MI_NOOP); 228 intel_ring_advance(engine); 229 230 return 0; 231 } 232 233 static int 234 gen6_render_ring_flush(struct drm_i915_gem_request *req, 235 u32 invalidate_domains, u32 flush_domains) 236 { 237 struct intel_engine_cs *engine = req->engine; 238 u32 flags = 0; 239 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 240 int ret; 241 242 /* Force SNB workarounds for PIPE_CONTROL flushes */ 243 ret = intel_emit_post_sync_nonzero_flush(req); 244 if (ret) 245 return ret; 246 247 /* Just flush everything. Experiments have shown that reducing the 248 * number of bits based on the write domains has little performance 249 * impact. 250 */ 251 if (flush_domains) { 252 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 253 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 254 /* 255 * Ensure that any following seqno writes only happen 256 * when the render cache is indeed flushed. 257 */ 258 flags |= PIPE_CONTROL_CS_STALL; 259 } 260 if (invalidate_domains) { 261 flags |= PIPE_CONTROL_TLB_INVALIDATE; 262 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 263 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 264 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 265 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 266 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 267 /* 268 * TLB invalidate requires a post-sync write. 269 */ 270 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 271 } 272 273 ret = intel_ring_begin(req, 4); 274 if (ret) 275 return ret; 276 277 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 278 intel_ring_emit(engine, flags); 279 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 280 intel_ring_emit(engine, 0); 281 intel_ring_advance(engine); 282 283 return 0; 284 } 285 286 static int 287 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 288 { 289 struct intel_engine_cs *engine = req->engine; 290 int ret; 291 292 ret = intel_ring_begin(req, 4); 293 if (ret) 294 return ret; 295 296 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 297 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 298 PIPE_CONTROL_STALL_AT_SCOREBOARD); 299 intel_ring_emit(engine, 0); 300 intel_ring_emit(engine, 0); 301 intel_ring_advance(engine); 302 303 return 0; 304 } 305 306 static int 307 gen7_render_ring_flush(struct drm_i915_gem_request *req, 308 u32 invalidate_domains, u32 flush_domains) 309 { 310 struct intel_engine_cs *engine = req->engine; 311 u32 flags = 0; 312 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 313 int ret; 314 315 /* 316 * Ensure that any following seqno writes only happen when the render 317 * cache is indeed flushed. 318 * 319 * Workaround: 4th PIPE_CONTROL command (except the ones with only 320 * read-cache invalidate bits set) must have the CS_STALL bit set. We 321 * don't try to be clever and just set it unconditionally. 322 */ 323 flags |= PIPE_CONTROL_CS_STALL; 324 325 /* Just flush everything. Experiments have shown that reducing the 326 * number of bits based on the write domains has little performance 327 * impact. 328 */ 329 if (flush_domains) { 330 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 331 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 332 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 333 flags |= PIPE_CONTROL_FLUSH_ENABLE; 334 } 335 if (invalidate_domains) { 336 flags |= PIPE_CONTROL_TLB_INVALIDATE; 337 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 338 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 339 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 340 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 341 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 342 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 343 /* 344 * TLB invalidate requires a post-sync write. 345 */ 346 flags |= PIPE_CONTROL_QW_WRITE; 347 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 348 349 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 350 351 /* Workaround: we must issue a pipe_control with CS-stall bit 352 * set before a pipe_control command that has the state cache 353 * invalidate bit set. */ 354 gen7_render_ring_cs_stall_wa(req); 355 } 356 357 ret = intel_ring_begin(req, 4); 358 if (ret) 359 return ret; 360 361 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 362 intel_ring_emit(engine, flags); 363 intel_ring_emit(engine, scratch_addr); 364 intel_ring_emit(engine, 0); 365 intel_ring_advance(engine); 366 367 return 0; 368 } 369 370 static int 371 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 372 u32 flags, u32 scratch_addr) 373 { 374 struct intel_engine_cs *engine = req->engine; 375 int ret; 376 377 ret = intel_ring_begin(req, 6); 378 if (ret) 379 return ret; 380 381 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); 382 intel_ring_emit(engine, flags); 383 intel_ring_emit(engine, scratch_addr); 384 intel_ring_emit(engine, 0); 385 intel_ring_emit(engine, 0); 386 intel_ring_emit(engine, 0); 387 intel_ring_advance(engine); 388 389 return 0; 390 } 391 392 static int 393 gen8_render_ring_flush(struct drm_i915_gem_request *req, 394 u32 invalidate_domains, u32 flush_domains) 395 { 396 u32 flags = 0; 397 u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 398 int ret; 399 400 flags |= PIPE_CONTROL_CS_STALL; 401 402 if (flush_domains) { 403 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 404 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 405 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 406 flags |= PIPE_CONTROL_FLUSH_ENABLE; 407 } 408 if (invalidate_domains) { 409 flags |= PIPE_CONTROL_TLB_INVALIDATE; 410 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 411 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 412 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 413 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 414 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 415 flags |= PIPE_CONTROL_QW_WRITE; 416 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 417 418 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 419 ret = gen8_emit_pipe_control(req, 420 PIPE_CONTROL_CS_STALL | 421 PIPE_CONTROL_STALL_AT_SCOREBOARD, 422 0); 423 if (ret) 424 return ret; 425 } 426 427 return gen8_emit_pipe_control(req, flags, scratch_addr); 428 } 429 430 static void ring_write_tail(struct intel_engine_cs *engine, 431 u32 value) 432 { 433 struct drm_i915_private *dev_priv = engine->i915; 434 I915_WRITE_TAIL(engine, value); 435 } 436 437 u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 438 { 439 struct drm_i915_private *dev_priv = engine->i915; 440 u64 acthd; 441 442 if (INTEL_GEN(dev_priv) >= 8) 443 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 444 RING_ACTHD_UDW(engine->mmio_base)); 445 else if (INTEL_GEN(dev_priv) >= 4) 446 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 447 else 448 acthd = I915_READ(ACTHD); 449 450 return acthd; 451 } 452 453 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 454 { 455 struct drm_i915_private *dev_priv = engine->i915; 456 u32 addr; 457 458 addr = dev_priv->status_page_dmah->busaddr; 459 if (INTEL_GEN(dev_priv) >= 4) 460 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 461 I915_WRITE(HWS_PGA, addr); 462 } 463 464 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 465 { 466 struct drm_i915_private *dev_priv = engine->i915; 467 i915_reg_t mmio; 468 469 /* The ring status page addresses are no longer next to the rest of 470 * the ring registers as of gen7. 471 */ 472 if (IS_GEN7(dev_priv)) { 473 switch (engine->id) { 474 case RCS: 475 mmio = RENDER_HWS_PGA_GEN7; 476 break; 477 case BCS: 478 mmio = BLT_HWS_PGA_GEN7; 479 break; 480 /* 481 * VCS2 actually doesn't exist on Gen7. Only shut up 482 * gcc switch check warning 483 */ 484 case VCS2: 485 case VCS: 486 mmio = BSD_HWS_PGA_GEN7; 487 break; 488 case VECS: 489 mmio = VEBOX_HWS_PGA_GEN7; 490 break; 491 } 492 } else if (IS_GEN6(dev_priv)) { 493 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 494 } else { 495 /* XXX: gen8 returns to sanity */ 496 mmio = RING_HWS_PGA(engine->mmio_base); 497 } 498 499 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); 500 POSTING_READ(mmio); 501 502 /* 503 * Flush the TLB for this page 504 * 505 * FIXME: These two bits have disappeared on gen8, so a question 506 * arises: do we still need this and if so how should we go about 507 * invalidating the TLB? 508 */ 509 if (IS_GEN(dev_priv, 6, 7)) { 510 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 511 512 /* ring should be idle before issuing a sync flush*/ 513 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 514 515 I915_WRITE(reg, 516 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 517 INSTPM_SYNC_FLUSH)); 518 if (intel_wait_for_register(dev_priv, 519 reg, INSTPM_SYNC_FLUSH, 0, 520 1000)) 521 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 522 engine->name); 523 } 524 } 525 526 static bool stop_ring(struct intel_engine_cs *engine) 527 { 528 struct drm_i915_private *dev_priv = engine->i915; 529 530 if (!IS_GEN2(dev_priv)) { 531 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 532 if (intel_wait_for_register(dev_priv, 533 RING_MI_MODE(engine->mmio_base), 534 MODE_IDLE, 535 MODE_IDLE, 536 1000)) { 537 DRM_ERROR("%s : timed out trying to stop ring\n", 538 engine->name); 539 /* Sometimes we observe that the idle flag is not 540 * set even though the ring is empty. So double 541 * check before giving up. 542 */ 543 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 544 return false; 545 } 546 } 547 548 I915_WRITE_CTL(engine, 0); 549 I915_WRITE_HEAD(engine, 0); 550 engine->write_tail(engine, 0); 551 552 if (!IS_GEN2(dev_priv)) { 553 (void)I915_READ_CTL(engine); 554 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 555 } 556 557 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 558 } 559 560 void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 561 { 562 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 563 } 564 565 static int init_ring_common(struct intel_engine_cs *engine) 566 { 567 struct drm_i915_private *dev_priv = engine->i915; 568 struct intel_ringbuffer *ringbuf = engine->buffer; 569 struct drm_i915_gem_object *obj = ringbuf->obj; 570 int ret = 0; 571 572 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 573 574 if (!stop_ring(engine)) { 575 /* G45 ring initialization often fails to reset head to zero */ 576 DRM_DEBUG_KMS("%s head not reset to zero " 577 "ctl %08x head %08x tail %08x start %08x\n", 578 engine->name, 579 I915_READ_CTL(engine), 580 I915_READ_HEAD(engine), 581 I915_READ_TAIL(engine), 582 I915_READ_START(engine)); 583 584 if (!stop_ring(engine)) { 585 DRM_ERROR("failed to set %s head to zero " 586 "ctl %08x head %08x tail %08x start %08x\n", 587 engine->name, 588 I915_READ_CTL(engine), 589 I915_READ_HEAD(engine), 590 I915_READ_TAIL(engine), 591 I915_READ_START(engine)); 592 ret = -EIO; 593 goto out; 594 } 595 } 596 597 if (I915_NEED_GFX_HWS(dev_priv)) 598 intel_ring_setup_status_page(engine); 599 else 600 ring_setup_phys_status_page(engine); 601 602 /* Enforce ordering by reading HEAD register back */ 603 I915_READ_HEAD(engine); 604 605 /* Initialize the ring. This must happen _after_ we've cleared the ring 606 * registers with the above sequence (the readback of the HEAD registers 607 * also enforces ordering), otherwise the hw might lose the new ring 608 * register values. */ 609 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); 610 611 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 612 if (I915_READ_HEAD(engine)) 613 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 614 engine->name, I915_READ_HEAD(engine)); 615 I915_WRITE_HEAD(engine, 0); 616 (void)I915_READ_HEAD(engine); 617 618 I915_WRITE_CTL(engine, 619 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 620 | RING_VALID); 621 622 /* If the head is still not zero, the ring is dead */ 623 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && 624 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && 625 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { 626 DRM_ERROR("%s initialization failed " 627 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 628 engine->name, 629 I915_READ_CTL(engine), 630 I915_READ_CTL(engine) & RING_VALID, 631 I915_READ_HEAD(engine), I915_READ_TAIL(engine), 632 I915_READ_START(engine), 633 (unsigned long)i915_gem_obj_ggtt_offset(obj)); 634 ret = -EIO; 635 goto out; 636 } 637 638 ringbuf->last_retired_head = -1; 639 ringbuf->head = I915_READ_HEAD(engine); 640 ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; 641 intel_ring_update_space(ringbuf); 642 643 intel_engine_init_hangcheck(engine); 644 645 out: 646 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 647 648 return ret; 649 } 650 651 void 652 intel_fini_pipe_control(struct intel_engine_cs *engine) 653 { 654 if (engine->scratch.obj == NULL) 655 return; 656 657 if (INTEL_GEN(engine->i915) >= 5) { 658 kunmap(sg_page(engine->scratch.obj->pages->sgl)); 659 i915_gem_object_ggtt_unpin(engine->scratch.obj); 660 } 661 662 drm_gem_object_unreference(&engine->scratch.obj->base); 663 engine->scratch.obj = NULL; 664 } 665 666 int 667 intel_init_pipe_control(struct intel_engine_cs *engine) 668 { 669 int ret; 670 671 WARN_ON(engine->scratch.obj); 672 673 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); 674 if (IS_ERR(engine->scratch.obj)) { 675 DRM_ERROR("Failed to allocate seqno page\n"); 676 ret = PTR_ERR(engine->scratch.obj); 677 engine->scratch.obj = NULL; 678 goto err; 679 } 680 681 ret = i915_gem_object_set_cache_level(engine->scratch.obj, 682 I915_CACHE_LLC); 683 if (ret) 684 goto err_unref; 685 686 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); 687 if (ret) 688 goto err_unref; 689 690 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); 691 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); 692 if (engine->scratch.cpu_page == NULL) { 693 ret = -ENOMEM; 694 goto err_unpin; 695 } 696 697 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 698 engine->name, engine->scratch.gtt_offset); 699 return 0; 700 701 err_unpin: 702 i915_gem_object_ggtt_unpin(engine->scratch.obj); 703 err_unref: 704 drm_gem_object_unreference(&engine->scratch.obj->base); 705 err: 706 return ret; 707 } 708 709 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 710 { 711 struct intel_engine_cs *engine = req->engine; 712 struct i915_workarounds *w = &req->i915->workarounds; 713 int ret, i; 714 715 if (w->count == 0) 716 return 0; 717 718 engine->gpu_caches_dirty = true; 719 ret = intel_ring_flush_all_caches(req); 720 if (ret) 721 return ret; 722 723 ret = intel_ring_begin(req, (w->count * 2 + 2)); 724 if (ret) 725 return ret; 726 727 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); 728 for (i = 0; i < w->count; i++) { 729 intel_ring_emit_reg(engine, w->reg[i].addr); 730 intel_ring_emit(engine, w->reg[i].value); 731 } 732 intel_ring_emit(engine, MI_NOOP); 733 734 intel_ring_advance(engine); 735 736 engine->gpu_caches_dirty = true; 737 ret = intel_ring_flush_all_caches(req); 738 if (ret) 739 return ret; 740 741 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 742 743 return 0; 744 } 745 746 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 747 { 748 int ret; 749 750 ret = intel_ring_workarounds_emit(req); 751 if (ret != 0) 752 return ret; 753 754 ret = i915_gem_render_state_init(req); 755 if (ret) 756 return ret; 757 758 return 0; 759 } 760 761 static int wa_add(struct drm_i915_private *dev_priv, 762 i915_reg_t addr, 763 const u32 mask, const u32 val) 764 { 765 const u32 idx = dev_priv->workarounds.count; 766 767 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 768 return -ENOSPC; 769 770 dev_priv->workarounds.reg[idx].addr = addr; 771 dev_priv->workarounds.reg[idx].value = val; 772 dev_priv->workarounds.reg[idx].mask = mask; 773 774 dev_priv->workarounds.count++; 775 776 return 0; 777 } 778 779 #define WA_REG(addr, mask, val) do { \ 780 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 781 if (r) \ 782 return r; \ 783 } while (0) 784 785 #define WA_SET_BIT_MASKED(addr, mask) \ 786 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 787 788 #define WA_CLR_BIT_MASKED(addr, mask) \ 789 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 790 791 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 792 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 793 794 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 795 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 796 797 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 798 799 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 800 i915_reg_t reg) 801 { 802 struct drm_i915_private *dev_priv = engine->i915; 803 struct i915_workarounds *wa = &dev_priv->workarounds; 804 const uint32_t index = wa->hw_whitelist_count[engine->id]; 805 806 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 807 return -EINVAL; 808 809 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), 810 i915_mmio_reg_offset(reg)); 811 wa->hw_whitelist_count[engine->id]++; 812 813 return 0; 814 } 815 816 static int gen8_init_workarounds(struct intel_engine_cs *engine) 817 { 818 struct drm_i915_private *dev_priv = engine->i915; 819 820 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 821 822 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 823 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 824 825 /* WaDisablePartialInstShootdown:bdw,chv */ 826 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 827 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 828 829 /* Use Force Non-Coherent whenever executing a 3D context. This is a 830 * workaround for for a possible hang in the unlikely event a TLB 831 * invalidation occurs during a PSD flush. 832 */ 833 /* WaForceEnableNonCoherent:bdw,chv */ 834 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 835 WA_SET_BIT_MASKED(HDC_CHICKEN0, 836 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 837 HDC_FORCE_NON_COHERENT); 838 839 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 840 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 841 * polygons in the same 8x4 pixel/sample area to be processed without 842 * stalling waiting for the earlier ones to write to Hierarchical Z 843 * buffer." 844 * 845 * This optimization is off by default for BDW and CHV; turn it on. 846 */ 847 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 848 849 /* Wa4x4STCOptimizationDisable:bdw,chv */ 850 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 851 852 /* 853 * BSpec recommends 8x4 when MSAA is used, 854 * however in practice 16x4 seems fastest. 855 * 856 * Note that PS/WM thread counts depend on the WIZ hashing 857 * disable bit, which we don't touch here, but it's good 858 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 859 */ 860 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 861 GEN6_WIZ_HASHING_MASK, 862 GEN6_WIZ_HASHING_16x4); 863 864 return 0; 865 } 866 867 static int bdw_init_workarounds(struct intel_engine_cs *engine) 868 { 869 struct drm_i915_private *dev_priv = engine->i915; 870 int ret; 871 872 ret = gen8_init_workarounds(engine); 873 if (ret) 874 return ret; 875 876 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 877 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 878 879 /* WaDisableDopClockGating:bdw */ 880 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 881 DOP_CLOCK_GATING_DISABLE); 882 883 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 884 GEN8_SAMPLER_POWER_BYPASS_DIS); 885 886 WA_SET_BIT_MASKED(HDC_CHICKEN0, 887 /* WaForceContextSaveRestoreNonCoherent:bdw */ 888 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 889 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 890 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 891 892 return 0; 893 } 894 895 static int chv_init_workarounds(struct intel_engine_cs *engine) 896 { 897 struct drm_i915_private *dev_priv = engine->i915; 898 int ret; 899 900 ret = gen8_init_workarounds(engine); 901 if (ret) 902 return ret; 903 904 /* WaDisableThreadStallDopClockGating:chv */ 905 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 906 907 /* Improve HiZ throughput on CHV. */ 908 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 909 910 return 0; 911 } 912 913 static int gen9_init_workarounds(struct intel_engine_cs *engine) 914 { 915 struct drm_i915_private *dev_priv = engine->i915; 916 int ret; 917 918 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 919 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 920 921 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ 922 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 923 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 924 925 /* WaDisableKillLogic:bxt,skl,kbl */ 926 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 927 ECOCHK_DIS_TLB); 928 929 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ 930 /* WaDisablePartialInstShootdown:skl,bxt,kbl */ 931 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 932 FLOW_CONTROL_ENABLE | 933 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 934 935 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 936 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 937 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 938 939 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 940 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || 941 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 942 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 943 GEN9_DG_MIRROR_FIX_ENABLE); 944 945 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 946 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || 947 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 948 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 949 GEN9_RHWO_OPTIMIZATION_DISABLE); 950 /* 951 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 952 * but we do that in per ctx batchbuffer as there is an issue 953 * with this register not getting restored on ctx restore 954 */ 955 } 956 957 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ 958 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 959 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 960 GEN9_ENABLE_YV12_BUGFIX | 961 GEN9_ENABLE_GPGPU_PREEMPTION); 962 963 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ 964 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 965 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 966 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 967 968 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ 969 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 970 GEN9_CCS_TLB_PREFETCH_ENABLE); 971 972 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 973 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || 974 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 975 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 976 PIXEL_MASK_CAMMING_DISABLE); 977 978 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 979 WA_SET_BIT_MASKED(HDC_CHICKEN0, 980 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 981 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 982 983 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 984 * both tied to WaForceContextSaveRestoreNonCoherent 985 * in some hsds for skl. We keep the tie for all gen9. The 986 * documentation is a bit hazy and so we want to get common behaviour, 987 * even though there is no clear evidence we would need both on kbl/bxt. 988 * This area has been source of system hangs so we play it safe 989 * and mimic the skl regardless of what bspec says. 990 * 991 * Use Force Non-Coherent whenever executing a 3D context. This 992 * is a workaround for a possible hang in the unlikely event 993 * a TLB invalidation occurs during a PSD flush. 994 */ 995 996 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 997 WA_SET_BIT_MASKED(HDC_CHICKEN0, 998 HDC_FORCE_NON_COHERENT); 999 1000 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 1001 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 1002 BDW_DISABLE_HDC_INVALIDATION); 1003 1004 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 1005 if (IS_SKYLAKE(dev_priv) || 1006 IS_KABYLAKE(dev_priv) || 1007 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1008 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 1009 GEN8_SAMPLER_POWER_BYPASS_DIS); 1010 1011 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ 1012 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 1013 1014 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 1015 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 1016 GEN8_LQSC_FLUSH_COHERENT_LINES)); 1017 1018 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ 1019 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 1020 if (ret) 1021 return ret; 1022 1023 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 1024 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 1025 if (ret) 1026 return ret; 1027 1028 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ 1029 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 1030 if (ret) 1031 return ret; 1032 1033 return 0; 1034 } 1035 1036 static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1037 { 1038 struct drm_i915_private *dev_priv = engine->i915; 1039 u8 vals[3] = { 0, 0, 0 }; 1040 unsigned int i; 1041 1042 for (i = 0; i < 3; i++) { 1043 u8 ss; 1044 1045 /* 1046 * Only consider slices where one, and only one, subslice has 7 1047 * EUs 1048 */ 1049 if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) 1050 continue; 1051 1052 /* 1053 * subslice_7eu[i] != 0 (because of the check above) and 1054 * ss_max == 4 (maximum number of subslices possible per slice) 1055 * 1056 * -> 0 <= ss <= 3; 1057 */ 1058 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1; 1059 vals[i] = 3 - ss; 1060 } 1061 1062 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 1063 return 0; 1064 1065 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 1066 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 1067 GEN9_IZ_HASHING_MASK(2) | 1068 GEN9_IZ_HASHING_MASK(1) | 1069 GEN9_IZ_HASHING_MASK(0), 1070 GEN9_IZ_HASHING(2, vals[2]) | 1071 GEN9_IZ_HASHING(1, vals[1]) | 1072 GEN9_IZ_HASHING(0, vals[0])); 1073 1074 return 0; 1075 } 1076 1077 static int skl_init_workarounds(struct intel_engine_cs *engine) 1078 { 1079 struct drm_i915_private *dev_priv = engine->i915; 1080 int ret; 1081 1082 ret = gen9_init_workarounds(engine); 1083 if (ret) 1084 return ret; 1085 1086 /* 1087 * Actual WA is to disable percontext preemption granularity control 1088 * until D0 which is the default case so this is equivalent to 1089 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1090 */ 1091 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { 1092 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1093 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1094 } 1095 1096 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { 1097 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1098 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1099 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1100 } 1101 1102 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1103 * involving this register should also be added to WA batch as required. 1104 */ 1105 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) 1106 /* WaDisableLSQCROPERFforOCL:skl */ 1107 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1108 GEN8_LQSC_RO_PERF_DIS); 1109 1110 /* WaEnableGapsTsvCreditFix:skl */ 1111 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { 1112 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1113 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1114 } 1115 1116 /* WaDisablePowerCompilerClockGating:skl */ 1117 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) 1118 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1119 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1120 1121 /* WaBarrierPerformanceFixDisable:skl */ 1122 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) 1123 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1124 HDC_FENCE_DEST_SLM_DISABLE | 1125 HDC_BARRIER_PERFORMANCE_DISABLE); 1126 1127 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1128 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) 1129 WA_SET_BIT_MASKED( 1130 GEN7_HALF_SLICE_CHICKEN1, 1131 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1132 1133 /* WaDisableGafsUnitClkGating:skl */ 1134 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1135 1136 /* WaDisableLSQCROPERFforOCL:skl */ 1137 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1138 if (ret) 1139 return ret; 1140 1141 return skl_tune_iz_hashing(engine); 1142 } 1143 1144 static int bxt_init_workarounds(struct intel_engine_cs *engine) 1145 { 1146 struct drm_i915_private *dev_priv = engine->i915; 1147 int ret; 1148 1149 ret = gen9_init_workarounds(engine); 1150 if (ret) 1151 return ret; 1152 1153 /* WaStoreMultiplePTEenable:bxt */ 1154 /* This is a requirement according to Hardware specification */ 1155 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1156 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1157 1158 /* WaSetClckGatingDisableMedia:bxt */ 1159 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1160 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1161 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1162 } 1163 1164 /* WaDisableThreadStallDopClockGating:bxt */ 1165 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1166 STALL_DOP_GATING_DISABLE); 1167 1168 /* WaDisablePooledEuLoadBalancingFix:bxt */ 1169 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { 1170 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, 1171 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1172 } 1173 1174 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1175 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { 1176 WA_SET_BIT_MASKED( 1177 GEN7_HALF_SLICE_CHICKEN1, 1178 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1179 } 1180 1181 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ 1182 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1183 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1184 /* WaDisableLSQCROPERFforOCL:bxt */ 1185 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1186 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1187 if (ret) 1188 return ret; 1189 1190 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1191 if (ret) 1192 return ret; 1193 } 1194 1195 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1196 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1197 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1198 L3_HIGH_PRIO_CREDITS(2)); 1199 1200 /* WaInsertDummyPushConstPs:bxt */ 1201 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1202 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1203 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1204 1205 return 0; 1206 } 1207 1208 static int kbl_init_workarounds(struct intel_engine_cs *engine) 1209 { 1210 struct drm_i915_private *dev_priv = engine->i915; 1211 int ret; 1212 1213 ret = gen9_init_workarounds(engine); 1214 if (ret) 1215 return ret; 1216 1217 /* WaEnableGapsTsvCreditFix:kbl */ 1218 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1219 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1220 1221 /* WaDisableDynamicCreditSharing:kbl */ 1222 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1223 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1224 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1225 1226 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1227 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1228 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1229 HDC_FENCE_DEST_SLM_DISABLE); 1230 1231 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1232 * involving this register should also be added to WA batch as required. 1233 */ 1234 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1235 /* WaDisableLSQCROPERFforOCL:kbl */ 1236 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1237 GEN8_LQSC_RO_PERF_DIS); 1238 1239 /* WaInsertDummyPushConstPs:kbl */ 1240 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1241 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1242 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1243 1244 /* WaDisableGafsUnitClkGating:kbl */ 1245 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1246 1247 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1248 WA_SET_BIT_MASKED( 1249 GEN7_HALF_SLICE_CHICKEN1, 1250 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1251 1252 /* WaDisableLSQCROPERFforOCL:kbl */ 1253 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1254 if (ret) 1255 return ret; 1256 1257 return 0; 1258 } 1259 1260 int init_workarounds_ring(struct intel_engine_cs *engine) 1261 { 1262 struct drm_i915_private *dev_priv = engine->i915; 1263 1264 WARN_ON(engine->id != RCS); 1265 1266 dev_priv->workarounds.count = 0; 1267 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1268 1269 if (IS_BROADWELL(dev_priv)) 1270 return bdw_init_workarounds(engine); 1271 1272 if (IS_CHERRYVIEW(dev_priv)) 1273 return chv_init_workarounds(engine); 1274 1275 if (IS_SKYLAKE(dev_priv)) 1276 return skl_init_workarounds(engine); 1277 1278 if (IS_BROXTON(dev_priv)) 1279 return bxt_init_workarounds(engine); 1280 1281 if (IS_KABYLAKE(dev_priv)) 1282 return kbl_init_workarounds(engine); 1283 1284 return 0; 1285 } 1286 1287 static int init_render_ring(struct intel_engine_cs *engine) 1288 { 1289 struct drm_i915_private *dev_priv = engine->i915; 1290 int ret = init_ring_common(engine); 1291 if (ret) 1292 return ret; 1293 1294 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1295 if (IS_GEN(dev_priv, 4, 6)) 1296 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1297 1298 /* We need to disable the AsyncFlip performance optimisations in order 1299 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1300 * programmed to '1' on all products. 1301 * 1302 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1303 */ 1304 if (IS_GEN(dev_priv, 6, 7)) 1305 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1306 1307 /* Required for the hardware to program scanline values for waiting */ 1308 /* WaEnableFlushTlbInvalidationMode:snb */ 1309 if (IS_GEN6(dev_priv)) 1310 I915_WRITE(GFX_MODE, 1311 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1312 1313 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1314 if (IS_GEN7(dev_priv)) 1315 I915_WRITE(GFX_MODE_GEN7, 1316 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1317 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1318 1319 if (IS_GEN6(dev_priv)) { 1320 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1321 * "If this bit is set, STCunit will have LRA as replacement 1322 * policy. [...] This bit must be reset. LRA replacement 1323 * policy is not supported." 1324 */ 1325 I915_WRITE(CACHE_MODE_0, 1326 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1327 } 1328 1329 if (IS_GEN(dev_priv, 6, 7)) 1330 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1331 1332 if (HAS_L3_DPF(dev_priv)) 1333 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); 1334 1335 return init_workarounds_ring(engine); 1336 } 1337 1338 static void render_ring_cleanup(struct intel_engine_cs *engine) 1339 { 1340 struct drm_i915_private *dev_priv = engine->i915; 1341 1342 if (dev_priv->semaphore_obj) { 1343 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1344 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 1345 dev_priv->semaphore_obj = NULL; 1346 } 1347 1348 intel_fini_pipe_control(engine); 1349 } 1350 1351 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1352 unsigned int num_dwords) 1353 { 1354 #define MBOX_UPDATE_DWORDS 8 1355 struct intel_engine_cs *signaller = signaller_req->engine; 1356 struct drm_i915_private *dev_priv = signaller_req->i915; 1357 struct intel_engine_cs *waiter; 1358 enum intel_engine_id id; 1359 int ret, num_rings; 1360 1361 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1362 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1363 #undef MBOX_UPDATE_DWORDS 1364 1365 ret = intel_ring_begin(signaller_req, num_dwords); 1366 if (ret) 1367 return ret; 1368 1369 for_each_engine_id(waiter, dev_priv, id) { 1370 u32 seqno; 1371 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1372 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1373 continue; 1374 1375 seqno = i915_gem_request_get_seqno(signaller_req); 1376 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1377 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1378 PIPE_CONTROL_QW_WRITE | 1379 PIPE_CONTROL_CS_STALL); 1380 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1381 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1382 intel_ring_emit(signaller, seqno); 1383 intel_ring_emit(signaller, 0); 1384 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1385 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1386 intel_ring_emit(signaller, 0); 1387 } 1388 1389 return 0; 1390 } 1391 1392 static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, 1393 unsigned int num_dwords) 1394 { 1395 #define MBOX_UPDATE_DWORDS 6 1396 struct intel_engine_cs *signaller = signaller_req->engine; 1397 struct drm_i915_private *dev_priv = signaller_req->i915; 1398 struct intel_engine_cs *waiter; 1399 enum intel_engine_id id; 1400 int ret, num_rings; 1401 1402 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1403 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1404 #undef MBOX_UPDATE_DWORDS 1405 1406 ret = intel_ring_begin(signaller_req, num_dwords); 1407 if (ret) 1408 return ret; 1409 1410 for_each_engine_id(waiter, dev_priv, id) { 1411 u32 seqno; 1412 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1413 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1414 continue; 1415 1416 seqno = i915_gem_request_get_seqno(signaller_req); 1417 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1418 MI_FLUSH_DW_OP_STOREDW); 1419 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1420 MI_FLUSH_DW_USE_GTT); 1421 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1422 intel_ring_emit(signaller, seqno); 1423 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1424 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1425 intel_ring_emit(signaller, 0); 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1432 unsigned int num_dwords) 1433 { 1434 struct intel_engine_cs *signaller = signaller_req->engine; 1435 struct drm_i915_private *dev_priv = signaller_req->i915; 1436 struct intel_engine_cs *useless; 1437 enum intel_engine_id id; 1438 int ret, num_rings; 1439 1440 #define MBOX_UPDATE_DWORDS 3 1441 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1442 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1443 #undef MBOX_UPDATE_DWORDS 1444 1445 ret = intel_ring_begin(signaller_req, num_dwords); 1446 if (ret) 1447 return ret; 1448 1449 for_each_engine_id(useless, dev_priv, id) { 1450 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; 1451 1452 if (i915_mmio_reg_valid(mbox_reg)) { 1453 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1454 1455 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1456 intel_ring_emit_reg(signaller, mbox_reg); 1457 intel_ring_emit(signaller, seqno); 1458 } 1459 } 1460 1461 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1462 if (num_rings % 2 == 0) 1463 intel_ring_emit(signaller, MI_NOOP); 1464 1465 return 0; 1466 } 1467 1468 /** 1469 * gen6_add_request - Update the semaphore mailbox registers 1470 * 1471 * @request - request to write to the ring 1472 * 1473 * Update the mailbox registers in the *other* rings with the current seqno. 1474 * This acts like a signal in the canonical semaphore. 1475 */ 1476 static int 1477 gen6_add_request(struct drm_i915_gem_request *req) 1478 { 1479 struct intel_engine_cs *engine = req->engine; 1480 int ret; 1481 1482 if (engine->semaphore.signal) 1483 ret = engine->semaphore.signal(req, 4); 1484 else 1485 ret = intel_ring_begin(req, 4); 1486 1487 if (ret) 1488 return ret; 1489 1490 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1491 intel_ring_emit(engine, 1492 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1493 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1494 intel_ring_emit(engine, MI_USER_INTERRUPT); 1495 __intel_ring_advance(engine); 1496 1497 return 0; 1498 } 1499 1500 static int 1501 gen8_render_add_request(struct drm_i915_gem_request *req) 1502 { 1503 struct intel_engine_cs *engine = req->engine; 1504 int ret; 1505 1506 if (engine->semaphore.signal) 1507 ret = engine->semaphore.signal(req, 8); 1508 else 1509 ret = intel_ring_begin(req, 8); 1510 if (ret) 1511 return ret; 1512 1513 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); 1514 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | 1515 PIPE_CONTROL_CS_STALL | 1516 PIPE_CONTROL_QW_WRITE)); 1517 intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); 1518 intel_ring_emit(engine, 0); 1519 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1520 /* We're thrashing one dword of HWS. */ 1521 intel_ring_emit(engine, 0); 1522 intel_ring_emit(engine, MI_USER_INTERRUPT); 1523 intel_ring_emit(engine, MI_NOOP); 1524 __intel_ring_advance(engine); 1525 1526 return 0; 1527 } 1528 1529 static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, 1530 u32 seqno) 1531 { 1532 return dev_priv->last_seqno < seqno; 1533 } 1534 1535 /** 1536 * intel_ring_sync - sync the waiter to the signaller on seqno 1537 * 1538 * @waiter - ring that is waiting 1539 * @signaller - ring which has, or will signal 1540 * @seqno - seqno which the waiter will block on 1541 */ 1542 1543 static int 1544 gen8_ring_sync(struct drm_i915_gem_request *waiter_req, 1545 struct intel_engine_cs *signaller, 1546 u32 seqno) 1547 { 1548 struct intel_engine_cs *waiter = waiter_req->engine; 1549 struct drm_i915_private *dev_priv = waiter_req->i915; 1550 u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); 1551 struct i915_hw_ppgtt *ppgtt; 1552 int ret; 1553 1554 ret = intel_ring_begin(waiter_req, 4); 1555 if (ret) 1556 return ret; 1557 1558 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1559 MI_SEMAPHORE_GLOBAL_GTT | 1560 MI_SEMAPHORE_SAD_GTE_SDD); 1561 intel_ring_emit(waiter, seqno); 1562 intel_ring_emit(waiter, lower_32_bits(offset)); 1563 intel_ring_emit(waiter, upper_32_bits(offset)); 1564 intel_ring_advance(waiter); 1565 1566 /* When the !RCS engines idle waiting upon a semaphore, they lose their 1567 * pagetables and we must reload them before executing the batch. 1568 * We do this on the i915_switch_context() following the wait and 1569 * before the dispatch. 1570 */ 1571 ppgtt = waiter_req->ctx->ppgtt; 1572 if (ppgtt && waiter_req->engine->id != RCS) 1573 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); 1574 return 0; 1575 } 1576 1577 static int 1578 gen6_ring_sync(struct drm_i915_gem_request *waiter_req, 1579 struct intel_engine_cs *signaller, 1580 u32 seqno) 1581 { 1582 struct intel_engine_cs *waiter = waiter_req->engine; 1583 u32 dw1 = MI_SEMAPHORE_MBOX | 1584 MI_SEMAPHORE_COMPARE | 1585 MI_SEMAPHORE_REGISTER; 1586 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1587 int ret; 1588 1589 /* Throughout all of the GEM code, seqno passed implies our current 1590 * seqno is >= the last seqno executed. However for hardware the 1591 * comparison is strictly greater than. 1592 */ 1593 seqno -= 1; 1594 1595 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1596 1597 ret = intel_ring_begin(waiter_req, 4); 1598 if (ret) 1599 return ret; 1600 1601 /* If seqno wrap happened, omit the wait with no-ops */ 1602 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { 1603 intel_ring_emit(waiter, dw1 | wait_mbox); 1604 intel_ring_emit(waiter, seqno); 1605 intel_ring_emit(waiter, 0); 1606 intel_ring_emit(waiter, MI_NOOP); 1607 } else { 1608 intel_ring_emit(waiter, MI_NOOP); 1609 intel_ring_emit(waiter, MI_NOOP); 1610 intel_ring_emit(waiter, MI_NOOP); 1611 intel_ring_emit(waiter, MI_NOOP); 1612 } 1613 intel_ring_advance(waiter); 1614 1615 return 0; 1616 } 1617 1618 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1619 do { \ 1620 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 1621 PIPE_CONTROL_DEPTH_STALL); \ 1622 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 1623 intel_ring_emit(ring__, 0); \ 1624 intel_ring_emit(ring__, 0); \ 1625 } while (0) 1626 1627 static int 1628 pc_render_add_request(struct drm_i915_gem_request *req) 1629 { 1630 struct intel_engine_cs *engine = req->engine; 1631 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1632 int ret; 1633 1634 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 1635 * incoherent with writes to memory, i.e. completely fubar, 1636 * so we need to use PIPE_NOTIFY instead. 1637 * 1638 * However, we also need to workaround the qword write 1639 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1640 * memory before requesting an interrupt. 1641 */ 1642 ret = intel_ring_begin(req, 32); 1643 if (ret) 1644 return ret; 1645 1646 intel_ring_emit(engine, 1647 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1648 PIPE_CONTROL_WRITE_FLUSH | 1649 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1650 intel_ring_emit(engine, 1651 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1652 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1653 intel_ring_emit(engine, 0); 1654 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1655 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1656 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1657 scratch_addr += 2 * CACHELINE_BYTES; 1658 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1659 scratch_addr += 2 * CACHELINE_BYTES; 1660 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1661 scratch_addr += 2 * CACHELINE_BYTES; 1662 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1663 scratch_addr += 2 * CACHELINE_BYTES; 1664 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1665 1666 intel_ring_emit(engine, 1667 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1668 PIPE_CONTROL_WRITE_FLUSH | 1669 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1670 PIPE_CONTROL_NOTIFY); 1671 intel_ring_emit(engine, 1672 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1673 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1674 intel_ring_emit(engine, 0); 1675 __intel_ring_advance(engine); 1676 1677 return 0; 1678 } 1679 1680 static void 1681 gen6_seqno_barrier(struct intel_engine_cs *engine) 1682 { 1683 struct drm_i915_private *dev_priv = engine->i915; 1684 1685 /* Workaround to force correct ordering between irq and seqno writes on 1686 * ivb (and maybe also on snb) by reading from a CS register (like 1687 * ACTHD) before reading the status page. 1688 * 1689 * Note that this effectively stalls the read by the time it takes to 1690 * do a memory transaction, which more or less ensures that the write 1691 * from the GPU has sufficient time to invalidate the CPU cacheline. 1692 * Alternatively we could delay the interrupt from the CS ring to give 1693 * the write time to land, but that would incur a delay after every 1694 * batch i.e. much more frequent than a delay when waiting for the 1695 * interrupt (with the same net latency). 1696 * 1697 * Also note that to prevent whole machine hangs on gen7, we have to 1698 * take the spinlock to guard against concurrent cacheline access. 1699 */ 1700 spin_lock_irq(&dev_priv->uncore.lock); 1701 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1702 spin_unlock_irq(&dev_priv->uncore.lock); 1703 } 1704 1705 static u32 1706 ring_get_seqno(struct intel_engine_cs *engine) 1707 { 1708 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 1709 } 1710 1711 static void 1712 ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1713 { 1714 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 1715 } 1716 1717 static u32 1718 pc_render_get_seqno(struct intel_engine_cs *engine) 1719 { 1720 return engine->scratch.cpu_page[0]; 1721 } 1722 1723 static void 1724 pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1725 { 1726 engine->scratch.cpu_page[0] = seqno; 1727 } 1728 1729 static bool 1730 gen5_ring_get_irq(struct intel_engine_cs *engine) 1731 { 1732 struct drm_i915_private *dev_priv = engine->i915; 1733 unsigned long flags; 1734 1735 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1736 return false; 1737 1738 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1739 if (engine->irq_refcount++ == 0) 1740 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1741 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1742 1743 return true; 1744 } 1745 1746 static void 1747 gen5_ring_put_irq(struct intel_engine_cs *engine) 1748 { 1749 struct drm_i915_private *dev_priv = engine->i915; 1750 unsigned long flags; 1751 1752 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1753 if (--engine->irq_refcount == 0) 1754 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1755 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1756 } 1757 1758 static bool 1759 i9xx_ring_get_irq(struct intel_engine_cs *engine) 1760 { 1761 struct drm_i915_private *dev_priv = engine->i915; 1762 unsigned long flags; 1763 1764 if (!intel_irqs_enabled(dev_priv)) 1765 return false; 1766 1767 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1768 if (engine->irq_refcount++ == 0) { 1769 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1770 I915_WRITE(IMR, dev_priv->irq_mask); 1771 POSTING_READ(IMR); 1772 } 1773 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1774 1775 return true; 1776 } 1777 1778 static void 1779 i9xx_ring_put_irq(struct intel_engine_cs *engine) 1780 { 1781 struct drm_i915_private *dev_priv = engine->i915; 1782 unsigned long flags; 1783 1784 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1785 if (--engine->irq_refcount == 0) { 1786 dev_priv->irq_mask |= engine->irq_enable_mask; 1787 I915_WRITE(IMR, dev_priv->irq_mask); 1788 POSTING_READ(IMR); 1789 } 1790 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1791 } 1792 1793 static bool 1794 i8xx_ring_get_irq(struct intel_engine_cs *engine) 1795 { 1796 struct drm_i915_private *dev_priv = engine->i915; 1797 unsigned long flags; 1798 1799 if (!intel_irqs_enabled(dev_priv)) 1800 return false; 1801 1802 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1803 if (engine->irq_refcount++ == 0) { 1804 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1805 I915_WRITE16(IMR, dev_priv->irq_mask); 1806 POSTING_READ16(IMR); 1807 } 1808 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1809 1810 return true; 1811 } 1812 1813 static void 1814 i8xx_ring_put_irq(struct intel_engine_cs *engine) 1815 { 1816 struct drm_i915_private *dev_priv = engine->i915; 1817 unsigned long flags; 1818 1819 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1820 if (--engine->irq_refcount == 0) { 1821 dev_priv->irq_mask |= engine->irq_enable_mask; 1822 I915_WRITE16(IMR, dev_priv->irq_mask); 1823 POSTING_READ16(IMR); 1824 } 1825 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1826 } 1827 1828 static int 1829 bsd_ring_flush(struct drm_i915_gem_request *req, 1830 u32 invalidate_domains, 1831 u32 flush_domains) 1832 { 1833 struct intel_engine_cs *engine = req->engine; 1834 int ret; 1835 1836 ret = intel_ring_begin(req, 2); 1837 if (ret) 1838 return ret; 1839 1840 intel_ring_emit(engine, MI_FLUSH); 1841 intel_ring_emit(engine, MI_NOOP); 1842 intel_ring_advance(engine); 1843 return 0; 1844 } 1845 1846 static int 1847 i9xx_add_request(struct drm_i915_gem_request *req) 1848 { 1849 struct intel_engine_cs *engine = req->engine; 1850 int ret; 1851 1852 ret = intel_ring_begin(req, 4); 1853 if (ret) 1854 return ret; 1855 1856 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1857 intel_ring_emit(engine, 1858 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1859 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1860 intel_ring_emit(engine, MI_USER_INTERRUPT); 1861 __intel_ring_advance(engine); 1862 1863 return 0; 1864 } 1865 1866 static bool 1867 gen6_ring_get_irq(struct intel_engine_cs *engine) 1868 { 1869 struct drm_i915_private *dev_priv = engine->i915; 1870 unsigned long flags; 1871 1872 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1873 return false; 1874 1875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1876 if (engine->irq_refcount++ == 0) { 1877 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) 1878 I915_WRITE_IMR(engine, 1879 ~(engine->irq_enable_mask | 1880 GT_PARITY_ERROR(dev_priv))); 1881 else 1882 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1883 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1884 } 1885 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1886 1887 return true; 1888 } 1889 1890 static void 1891 gen6_ring_put_irq(struct intel_engine_cs *engine) 1892 { 1893 struct drm_i915_private *dev_priv = engine->i915; 1894 unsigned long flags; 1895 1896 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1897 if (--engine->irq_refcount == 0) { 1898 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) 1899 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); 1900 else 1901 I915_WRITE_IMR(engine, ~0); 1902 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1903 } 1904 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1905 } 1906 1907 static bool 1908 hsw_vebox_get_irq(struct intel_engine_cs *engine) 1909 { 1910 struct drm_i915_private *dev_priv = engine->i915; 1911 unsigned long flags; 1912 1913 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1914 return false; 1915 1916 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1917 if (engine->irq_refcount++ == 0) { 1918 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1919 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); 1920 } 1921 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1922 1923 return true; 1924 } 1925 1926 static void 1927 hsw_vebox_put_irq(struct intel_engine_cs *engine) 1928 { 1929 struct drm_i915_private *dev_priv = engine->i915; 1930 unsigned long flags; 1931 1932 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1933 if (--engine->irq_refcount == 0) { 1934 I915_WRITE_IMR(engine, ~0); 1935 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); 1936 } 1937 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1938 } 1939 1940 static bool 1941 gen8_ring_get_irq(struct intel_engine_cs *engine) 1942 { 1943 struct drm_i915_private *dev_priv = engine->i915; 1944 unsigned long flags; 1945 1946 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1947 return false; 1948 1949 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1950 if (engine->irq_refcount++ == 0) { 1951 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { 1952 I915_WRITE_IMR(engine, 1953 ~(engine->irq_enable_mask | 1954 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1955 } else { 1956 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1957 } 1958 POSTING_READ(RING_IMR(engine->mmio_base)); 1959 } 1960 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1961 1962 return true; 1963 } 1964 1965 static void 1966 gen8_ring_put_irq(struct intel_engine_cs *engine) 1967 { 1968 struct drm_i915_private *dev_priv = engine->i915; 1969 unsigned long flags; 1970 1971 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1972 if (--engine->irq_refcount == 0) { 1973 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { 1974 I915_WRITE_IMR(engine, 1975 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1976 } else { 1977 I915_WRITE_IMR(engine, ~0); 1978 } 1979 POSTING_READ(RING_IMR(engine->mmio_base)); 1980 } 1981 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1982 } 1983 1984 static int 1985 i965_dispatch_execbuffer(struct drm_i915_gem_request *req, 1986 u64 offset, u32 length, 1987 unsigned dispatch_flags) 1988 { 1989 struct intel_engine_cs *engine = req->engine; 1990 int ret; 1991 1992 ret = intel_ring_begin(req, 2); 1993 if (ret) 1994 return ret; 1995 1996 intel_ring_emit(engine, 1997 MI_BATCH_BUFFER_START | 1998 MI_BATCH_GTT | 1999 (dispatch_flags & I915_DISPATCH_SECURE ? 2000 0 : MI_BATCH_NON_SECURE_I965)); 2001 intel_ring_emit(engine, offset); 2002 intel_ring_advance(engine); 2003 2004 return 0; 2005 } 2006 2007 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 2008 #define I830_BATCH_LIMIT (256*1024) 2009 #define I830_TLB_ENTRIES (2) 2010 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 2011 static int 2012 i830_dispatch_execbuffer(struct drm_i915_gem_request *req, 2013 u64 offset, u32 len, 2014 unsigned dispatch_flags) 2015 { 2016 struct intel_engine_cs *engine = req->engine; 2017 u32 cs_offset = engine->scratch.gtt_offset; 2018 int ret; 2019 2020 ret = intel_ring_begin(req, 6); 2021 if (ret) 2022 return ret; 2023 2024 /* Evict the invalid PTE TLBs */ 2025 intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); 2026 intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 2027 intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 2028 intel_ring_emit(engine, cs_offset); 2029 intel_ring_emit(engine, 0xdeadbeef); 2030 intel_ring_emit(engine, MI_NOOP); 2031 intel_ring_advance(engine); 2032 2033 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 2034 if (len > I830_BATCH_LIMIT) 2035 return -ENOSPC; 2036 2037 ret = intel_ring_begin(req, 6 + 2); 2038 if (ret) 2039 return ret; 2040 2041 /* Blit the batch (which has now all relocs applied) to the 2042 * stable batch scratch bo area (so that the CS never 2043 * stumbles over its tlb invalidation bug) ... 2044 */ 2045 intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 2046 intel_ring_emit(engine, 2047 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 2048 intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); 2049 intel_ring_emit(engine, cs_offset); 2050 intel_ring_emit(engine, 4096); 2051 intel_ring_emit(engine, offset); 2052 2053 intel_ring_emit(engine, MI_FLUSH); 2054 intel_ring_emit(engine, MI_NOOP); 2055 intel_ring_advance(engine); 2056 2057 /* ... and execute it. */ 2058 offset = cs_offset; 2059 } 2060 2061 ret = intel_ring_begin(req, 2); 2062 if (ret) 2063 return ret; 2064 2065 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 2066 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 2067 0 : MI_BATCH_NON_SECURE)); 2068 intel_ring_advance(engine); 2069 2070 return 0; 2071 } 2072 2073 static int 2074 i915_dispatch_execbuffer(struct drm_i915_gem_request *req, 2075 u64 offset, u32 len, 2076 unsigned dispatch_flags) 2077 { 2078 struct intel_engine_cs *engine = req->engine; 2079 int ret; 2080 2081 ret = intel_ring_begin(req, 2); 2082 if (ret) 2083 return ret; 2084 2085 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 2086 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 2087 0 : MI_BATCH_NON_SECURE)); 2088 intel_ring_advance(engine); 2089 2090 return 0; 2091 } 2092 2093 static void cleanup_phys_status_page(struct intel_engine_cs *engine) 2094 { 2095 struct drm_i915_private *dev_priv = engine->i915; 2096 2097 if (!dev_priv->status_page_dmah) 2098 return; 2099 2100 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); 2101 engine->status_page.page_addr = NULL; 2102 } 2103 2104 static void cleanup_status_page(struct intel_engine_cs *engine) 2105 { 2106 struct drm_i915_gem_object *obj; 2107 2108 obj = engine->status_page.obj; 2109 if (obj == NULL) 2110 return; 2111 2112 kunmap(sg_page(obj->pages->sgl)); 2113 i915_gem_object_ggtt_unpin(obj); 2114 drm_gem_object_unreference(&obj->base); 2115 engine->status_page.obj = NULL; 2116 } 2117 2118 static int init_status_page(struct intel_engine_cs *engine) 2119 { 2120 struct drm_i915_gem_object *obj = engine->status_page.obj; 2121 2122 if (obj == NULL) { 2123 unsigned flags; 2124 int ret; 2125 2126 obj = i915_gem_object_create(engine->i915->dev, 4096); 2127 if (IS_ERR(obj)) { 2128 DRM_ERROR("Failed to allocate status page\n"); 2129 return PTR_ERR(obj); 2130 } 2131 2132 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2133 if (ret) 2134 goto err_unref; 2135 2136 flags = 0; 2137 if (!HAS_LLC(engine->i915)) 2138 /* On g33, we cannot place HWS above 256MiB, so 2139 * restrict its pinning to the low mappable arena. 2140 * Though this restriction is not documented for 2141 * gen4, gen5, or byt, they also behave similarly 2142 * and hang if the HWS is placed at the top of the 2143 * GTT. To generalise, it appears that all !llc 2144 * platforms have issues with us placing the HWS 2145 * above the mappable region (even though we never 2146 * actualy map it). 2147 */ 2148 flags |= PIN_MAPPABLE; 2149 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 2150 if (ret) { 2151 err_unref: 2152 drm_gem_object_unreference(&obj->base); 2153 return ret; 2154 } 2155 2156 engine->status_page.obj = obj; 2157 } 2158 2159 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 2160 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 2161 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 2162 2163 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 2164 engine->name, engine->status_page.gfx_addr); 2165 2166 return 0; 2167 } 2168 2169 static int init_phys_status_page(struct intel_engine_cs *engine) 2170 { 2171 struct drm_i915_private *dev_priv = engine->i915; 2172 2173 if (!dev_priv->status_page_dmah) { 2174 dev_priv->status_page_dmah = 2175 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); 2176 if (!dev_priv->status_page_dmah) 2177 return -ENOMEM; 2178 } 2179 2180 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 2181 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 2182 2183 return 0; 2184 } 2185 2186 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2187 { 2188 GEM_BUG_ON(ringbuf->vma == NULL); 2189 GEM_BUG_ON(ringbuf->virtual_start == NULL); 2190 2191 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 2192 i915_gem_object_unpin_map(ringbuf->obj); 2193 else 2194 i915_vma_unpin_iomap(ringbuf->vma); 2195 ringbuf->virtual_start = NULL; 2196 2197 i915_gem_object_ggtt_unpin(ringbuf->obj); 2198 ringbuf->vma = NULL; 2199 } 2200 2201 int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, 2202 struct intel_ringbuffer *ringbuf) 2203 { 2204 struct drm_i915_gem_object *obj = ringbuf->obj; 2205 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2206 unsigned flags = PIN_OFFSET_BIAS | 4096; 2207 void *addr; 2208 int ret; 2209 2210 if (HAS_LLC(dev_priv) && !obj->stolen) { 2211 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); 2212 if (ret) 2213 return ret; 2214 2215 ret = i915_gem_object_set_to_cpu_domain(obj, true); 2216 if (ret) 2217 goto err_unpin; 2218 2219 addr = i915_gem_object_pin_map(obj); 2220 if (IS_ERR(addr)) { 2221 ret = PTR_ERR(addr); 2222 goto err_unpin; 2223 } 2224 } else { 2225 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 2226 flags | PIN_MAPPABLE); 2227 if (ret) 2228 return ret; 2229 2230 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2231 if (ret) 2232 goto err_unpin; 2233 2234 /* Access through the GTT requires the device to be awake. */ 2235 assert_rpm_wakelock_held(dev_priv); 2236 2237 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); 2238 if (IS_ERR(addr)) { 2239 ret = PTR_ERR(addr); 2240 goto err_unpin; 2241 } 2242 } 2243 2244 ringbuf->virtual_start = addr; 2245 ringbuf->vma = i915_gem_obj_to_ggtt(obj); 2246 return 0; 2247 2248 err_unpin: 2249 i915_gem_object_ggtt_unpin(obj); 2250 return ret; 2251 } 2252 2253 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2254 { 2255 drm_gem_object_unreference(&ringbuf->obj->base); 2256 ringbuf->obj = NULL; 2257 } 2258 2259 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2260 struct intel_ringbuffer *ringbuf) 2261 { 2262 struct drm_i915_gem_object *obj; 2263 2264 obj = NULL; 2265 if (!HAS_LLC(dev)) 2266 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2267 if (obj == NULL) 2268 obj = i915_gem_object_create(dev, ringbuf->size); 2269 if (IS_ERR(obj)) 2270 return PTR_ERR(obj); 2271 2272 /* mark ring buffers as read-only from GPU side by default */ 2273 obj->gt_ro = 1; 2274 2275 ringbuf->obj = obj; 2276 2277 return 0; 2278 } 2279 2280 struct intel_ringbuffer * 2281 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) 2282 { 2283 struct intel_ringbuffer *ring; 2284 int ret; 2285 2286 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2287 if (ring == NULL) { 2288 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 2289 engine->name); 2290 return ERR_PTR(-ENOMEM); 2291 } 2292 2293 ring->engine = engine; 2294 list_add(&ring->link, &engine->buffers); 2295 2296 ring->size = size; 2297 /* Workaround an erratum on the i830 which causes a hang if 2298 * the TAIL pointer points to within the last 2 cachelines 2299 * of the buffer. 2300 */ 2301 ring->effective_size = size; 2302 if (IS_I830(engine->i915) || IS_845G(engine->i915)) 2303 ring->effective_size -= 2 * CACHELINE_BYTES; 2304 2305 ring->last_retired_head = -1; 2306 intel_ring_update_space(ring); 2307 2308 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); 2309 if (ret) { 2310 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2311 engine->name, ret); 2312 list_del(&ring->link); 2313 kfree(ring); 2314 return ERR_PTR(ret); 2315 } 2316 2317 return ring; 2318 } 2319 2320 void 2321 intel_ringbuffer_free(struct intel_ringbuffer *ring) 2322 { 2323 intel_destroy_ringbuffer_obj(ring); 2324 list_del(&ring->link); 2325 kfree(ring); 2326 } 2327 2328 static int intel_ring_context_pin(struct i915_gem_context *ctx, 2329 struct intel_engine_cs *engine) 2330 { 2331 struct intel_context *ce = &ctx->engine[engine->id]; 2332 int ret; 2333 2334 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 2335 2336 if (ce->pin_count++) 2337 return 0; 2338 2339 if (ce->state) { 2340 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); 2341 if (ret) 2342 goto error; 2343 } 2344 2345 /* The kernel context is only used as a placeholder for flushing the 2346 * active context. It is never used for submitting user rendering and 2347 * as such never requires the golden render context, and so we can skip 2348 * emitting it when we switch to the kernel context. This is required 2349 * as during eviction we cannot allocate and pin the renderstate in 2350 * order to initialise the context. 2351 */ 2352 if (ctx == ctx->i915->kernel_context) 2353 ce->initialised = true; 2354 2355 i915_gem_context_reference(ctx); 2356 return 0; 2357 2358 error: 2359 ce->pin_count = 0; 2360 return ret; 2361 } 2362 2363 static void intel_ring_context_unpin(struct i915_gem_context *ctx, 2364 struct intel_engine_cs *engine) 2365 { 2366 struct intel_context *ce = &ctx->engine[engine->id]; 2367 2368 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 2369 2370 if (--ce->pin_count) 2371 return; 2372 2373 if (ce->state) 2374 i915_gem_object_ggtt_unpin(ce->state); 2375 2376 i915_gem_context_unreference(ctx); 2377 } 2378 2379 static int intel_init_ring_buffer(struct drm_device *dev, 2380 struct intel_engine_cs *engine) 2381 { 2382 struct drm_i915_private *dev_priv = to_i915(dev); 2383 struct intel_ringbuffer *ringbuf; 2384 int ret; 2385 2386 WARN_ON(engine->buffer); 2387 2388 engine->i915 = dev_priv; 2389 INIT_LIST_HEAD(&engine->active_list); 2390 INIT_LIST_HEAD(&engine->request_list); 2391 INIT_LIST_HEAD(&engine->execlist_queue); 2392 INIT_LIST_HEAD(&engine->buffers); 2393 i915_gem_batch_pool_init(dev, &engine->batch_pool); 2394 memset(engine->semaphore.sync_seqno, 0, 2395 sizeof(engine->semaphore.sync_seqno)); 2396 2397 init_waitqueue_head(&engine->irq_queue); 2398 2399 /* We may need to do things with the shrinker which 2400 * require us to immediately switch back to the default 2401 * context. This can cause a problem as pinning the 2402 * default context also requires GTT space which may not 2403 * be available. To avoid this we always pin the default 2404 * context. 2405 */ 2406 ret = intel_ring_context_pin(dev_priv->kernel_context, engine); 2407 if (ret) 2408 goto error; 2409 2410 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2411 if (IS_ERR(ringbuf)) { 2412 ret = PTR_ERR(ringbuf); 2413 goto error; 2414 } 2415 engine->buffer = ringbuf; 2416 2417 if (I915_NEED_GFX_HWS(dev_priv)) { 2418 ret = init_status_page(engine); 2419 if (ret) 2420 goto error; 2421 } else { 2422 WARN_ON(engine->id != RCS); 2423 ret = init_phys_status_page(engine); 2424 if (ret) 2425 goto error; 2426 } 2427 2428 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); 2429 if (ret) { 2430 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2431 engine->name, ret); 2432 intel_destroy_ringbuffer_obj(ringbuf); 2433 goto error; 2434 } 2435 2436 ret = i915_cmd_parser_init_ring(engine); 2437 if (ret) 2438 goto error; 2439 2440 return 0; 2441 2442 error: 2443 intel_cleanup_engine(engine); 2444 return ret; 2445 } 2446 2447 void intel_cleanup_engine(struct intel_engine_cs *engine) 2448 { 2449 struct drm_i915_private *dev_priv; 2450 2451 if (!intel_engine_initialized(engine)) 2452 return; 2453 2454 dev_priv = engine->i915; 2455 2456 if (engine->buffer) { 2457 intel_stop_engine(engine); 2458 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2459 2460 intel_unpin_ringbuffer_obj(engine->buffer); 2461 intel_ringbuffer_free(engine->buffer); 2462 engine->buffer = NULL; 2463 } 2464 2465 if (engine->cleanup) 2466 engine->cleanup(engine); 2467 2468 if (I915_NEED_GFX_HWS(dev_priv)) { 2469 cleanup_status_page(engine); 2470 } else { 2471 WARN_ON(engine->id != RCS); 2472 cleanup_phys_status_page(engine); 2473 } 2474 2475 i915_cmd_parser_fini_ring(engine); 2476 i915_gem_batch_pool_fini(&engine->batch_pool); 2477 2478 intel_ring_context_unpin(dev_priv->kernel_context, engine); 2479 2480 engine->i915 = NULL; 2481 } 2482 2483 int intel_engine_idle(struct intel_engine_cs *engine) 2484 { 2485 struct drm_i915_gem_request *req; 2486 2487 /* Wait upon the last request to be completed */ 2488 if (list_empty(&engine->request_list)) 2489 return 0; 2490 2491 req = list_entry(engine->request_list.prev, 2492 struct drm_i915_gem_request, 2493 list); 2494 2495 /* Make sure we do not trigger any retires */ 2496 return __i915_wait_request(req, 2497 req->i915->mm.interruptible, 2498 NULL, NULL); 2499 } 2500 2501 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2502 { 2503 int ret; 2504 2505 /* Flush enough space to reduce the likelihood of waiting after 2506 * we start building the request - in which case we will just 2507 * have to repeat work. 2508 */ 2509 request->reserved_space += LEGACY_REQUEST_SIZE; 2510 2511 request->ringbuf = request->engine->buffer; 2512 2513 ret = intel_ring_begin(request, 0); 2514 if (ret) 2515 return ret; 2516 2517 request->reserved_space -= LEGACY_REQUEST_SIZE; 2518 return 0; 2519 } 2520 2521 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2522 { 2523 struct intel_ringbuffer *ringbuf = req->ringbuf; 2524 struct intel_engine_cs *engine = req->engine; 2525 struct drm_i915_gem_request *target; 2526 2527 intel_ring_update_space(ringbuf); 2528 if (ringbuf->space >= bytes) 2529 return 0; 2530 2531 /* 2532 * Space is reserved in the ringbuffer for finalising the request, 2533 * as that cannot be allowed to fail. During request finalisation, 2534 * reserved_space is set to 0 to stop the overallocation and the 2535 * assumption is that then we never need to wait (which has the 2536 * risk of failing with EINTR). 2537 * 2538 * See also i915_gem_request_alloc() and i915_add_request(). 2539 */ 2540 GEM_BUG_ON(!req->reserved_space); 2541 2542 list_for_each_entry(target, &engine->request_list, list) { 2543 unsigned space; 2544 2545 /* 2546 * The request queue is per-engine, so can contain requests 2547 * from multiple ringbuffers. Here, we must ignore any that 2548 * aren't from the ringbuffer we're considering. 2549 */ 2550 if (target->ringbuf != ringbuf) 2551 continue; 2552 2553 /* Would completion of this request free enough space? */ 2554 space = __intel_ring_space(target->postfix, ringbuf->tail, 2555 ringbuf->size); 2556 if (space >= bytes) 2557 break; 2558 } 2559 2560 if (WARN_ON(&target->list == &engine->request_list)) 2561 return -ENOSPC; 2562 2563 return i915_wait_request(target); 2564 } 2565 2566 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2567 { 2568 struct intel_ringbuffer *ringbuf = req->ringbuf; 2569 int remain_actual = ringbuf->size - ringbuf->tail; 2570 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2571 int bytes = num_dwords * sizeof(u32); 2572 int total_bytes, wait_bytes; 2573 bool need_wrap = false; 2574 2575 total_bytes = bytes + req->reserved_space; 2576 2577 if (unlikely(bytes > remain_usable)) { 2578 /* 2579 * Not enough space for the basic request. So need to flush 2580 * out the remainder and then wait for base + reserved. 2581 */ 2582 wait_bytes = remain_actual + total_bytes; 2583 need_wrap = true; 2584 } else if (unlikely(total_bytes > remain_usable)) { 2585 /* 2586 * The base request will fit but the reserved space 2587 * falls off the end. So we don't need an immediate wrap 2588 * and only need to effectively wait for the reserved 2589 * size space from the start of ringbuffer. 2590 */ 2591 wait_bytes = remain_actual + req->reserved_space; 2592 } else { 2593 /* No wrapping required, just waiting. */ 2594 wait_bytes = total_bytes; 2595 } 2596 2597 if (wait_bytes > ringbuf->space) { 2598 int ret = wait_for_space(req, wait_bytes); 2599 if (unlikely(ret)) 2600 return ret; 2601 2602 intel_ring_update_space(ringbuf); 2603 if (unlikely(ringbuf->space < wait_bytes)) 2604 return -EAGAIN; 2605 } 2606 2607 if (unlikely(need_wrap)) { 2608 GEM_BUG_ON(remain_actual > ringbuf->space); 2609 GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); 2610 2611 /* Fill the tail with MI_NOOP */ 2612 memset(ringbuf->virtual_start + ringbuf->tail, 2613 0, remain_actual); 2614 ringbuf->tail = 0; 2615 ringbuf->space -= remain_actual; 2616 } 2617 2618 ringbuf->space -= bytes; 2619 GEM_BUG_ON(ringbuf->space < 0); 2620 return 0; 2621 } 2622 2623 /* Align the ring tail to a cacheline boundary */ 2624 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2625 { 2626 struct intel_engine_cs *engine = req->engine; 2627 int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2628 int ret; 2629 2630 if (num_dwords == 0) 2631 return 0; 2632 2633 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2634 ret = intel_ring_begin(req, num_dwords); 2635 if (ret) 2636 return ret; 2637 2638 while (num_dwords--) 2639 intel_ring_emit(engine, MI_NOOP); 2640 2641 intel_ring_advance(engine); 2642 2643 return 0; 2644 } 2645 2646 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2647 { 2648 struct drm_i915_private *dev_priv = engine->i915; 2649 2650 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2651 * so long as the semaphore value in the register/page is greater 2652 * than the sync value), so whenever we reset the seqno, 2653 * so long as we reset the tracking semaphore value to 0, it will 2654 * always be before the next request's seqno. If we don't reset 2655 * the semaphore value, then when the seqno moves backwards all 2656 * future waits will complete instantly (causing rendering corruption). 2657 */ 2658 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 2659 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2660 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2661 if (HAS_VEBOX(dev_priv)) 2662 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 2663 } 2664 if (dev_priv->semaphore_obj) { 2665 struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; 2666 struct page *page = i915_gem_object_get_dirty_page(obj, 0); 2667 void *semaphores = kmap(page); 2668 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 2669 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); 2670 kunmap(page); 2671 } 2672 memset(engine->semaphore.sync_seqno, 0, 2673 sizeof(engine->semaphore.sync_seqno)); 2674 2675 engine->set_seqno(engine, seqno); 2676 engine->last_submitted_seqno = seqno; 2677 2678 engine->hangcheck.seqno = seqno; 2679 } 2680 2681 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2682 u32 value) 2683 { 2684 struct drm_i915_private *dev_priv = engine->i915; 2685 2686 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2687 2688 /* Every tail move must follow the sequence below */ 2689 2690 /* Disable notification that the ring is IDLE. The GT 2691 * will then assume that it is busy and bring it out of rc6. 2692 */ 2693 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2694 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2695 2696 /* Clear the context id. Here be magic! */ 2697 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 2698 2699 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2700 if (intel_wait_for_register_fw(dev_priv, 2701 GEN6_BSD_SLEEP_PSMI_CONTROL, 2702 GEN6_BSD_SLEEP_INDICATOR, 2703 0, 2704 50)) 2705 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2706 2707 /* Now that the ring is fully powered up, update the tail */ 2708 I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); 2709 POSTING_READ_FW(RING_TAIL(engine->mmio_base)); 2710 2711 /* Let the ring send IDLE messages to the GT again, 2712 * and so let it sleep to conserve power when idle. 2713 */ 2714 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2715 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2716 2717 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2718 } 2719 2720 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2721 u32 invalidate, u32 flush) 2722 { 2723 struct intel_engine_cs *engine = req->engine; 2724 uint32_t cmd; 2725 int ret; 2726 2727 ret = intel_ring_begin(req, 4); 2728 if (ret) 2729 return ret; 2730 2731 cmd = MI_FLUSH_DW; 2732 if (INTEL_GEN(req->i915) >= 8) 2733 cmd += 1; 2734 2735 /* We always require a command barrier so that subsequent 2736 * commands, such as breadcrumb interrupts, are strictly ordered 2737 * wrt the contents of the write cache being flushed to memory 2738 * (and thus being coherent from the CPU). 2739 */ 2740 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2741 2742 /* 2743 * Bspec vol 1c.5 - video engine command streamer: 2744 * "If ENABLED, all TLBs will be invalidated once the flush 2745 * operation is complete. This bit is only valid when the 2746 * Post-Sync Operation field is a value of 1h or 3h." 2747 */ 2748 if (invalidate & I915_GEM_GPU_DOMAINS) 2749 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2750 2751 intel_ring_emit(engine, cmd); 2752 intel_ring_emit(engine, 2753 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2754 if (INTEL_GEN(req->i915) >= 8) { 2755 intel_ring_emit(engine, 0); /* upper addr */ 2756 intel_ring_emit(engine, 0); /* value */ 2757 } else { 2758 intel_ring_emit(engine, 0); 2759 intel_ring_emit(engine, MI_NOOP); 2760 } 2761 intel_ring_advance(engine); 2762 return 0; 2763 } 2764 2765 static int 2766 gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2767 u64 offset, u32 len, 2768 unsigned dispatch_flags) 2769 { 2770 struct intel_engine_cs *engine = req->engine; 2771 bool ppgtt = USES_PPGTT(engine->dev) && 2772 !(dispatch_flags & I915_DISPATCH_SECURE); 2773 int ret; 2774 2775 ret = intel_ring_begin(req, 4); 2776 if (ret) 2777 return ret; 2778 2779 /* FIXME(BDW): Address space and security selectors. */ 2780 intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2781 (dispatch_flags & I915_DISPATCH_RS ? 2782 MI_BATCH_RESOURCE_STREAMER : 0)); 2783 intel_ring_emit(engine, lower_32_bits(offset)); 2784 intel_ring_emit(engine, upper_32_bits(offset)); 2785 intel_ring_emit(engine, MI_NOOP); 2786 intel_ring_advance(engine); 2787 2788 return 0; 2789 } 2790 2791 static int 2792 hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2793 u64 offset, u32 len, 2794 unsigned dispatch_flags) 2795 { 2796 struct intel_engine_cs *engine = req->engine; 2797 int ret; 2798 2799 ret = intel_ring_begin(req, 2); 2800 if (ret) 2801 return ret; 2802 2803 intel_ring_emit(engine, 2804 MI_BATCH_BUFFER_START | 2805 (dispatch_flags & I915_DISPATCH_SECURE ? 2806 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2807 (dispatch_flags & I915_DISPATCH_RS ? 2808 MI_BATCH_RESOURCE_STREAMER : 0)); 2809 /* bit0-7 is the length on GEN6+ */ 2810 intel_ring_emit(engine, offset); 2811 intel_ring_advance(engine); 2812 2813 return 0; 2814 } 2815 2816 static int 2817 gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2818 u64 offset, u32 len, 2819 unsigned dispatch_flags) 2820 { 2821 struct intel_engine_cs *engine = req->engine; 2822 int ret; 2823 2824 ret = intel_ring_begin(req, 2); 2825 if (ret) 2826 return ret; 2827 2828 intel_ring_emit(engine, 2829 MI_BATCH_BUFFER_START | 2830 (dispatch_flags & I915_DISPATCH_SECURE ? 2831 0 : MI_BATCH_NON_SECURE_I965)); 2832 /* bit0-7 is the length on GEN6+ */ 2833 intel_ring_emit(engine, offset); 2834 intel_ring_advance(engine); 2835 2836 return 0; 2837 } 2838 2839 /* Blitter support (SandyBridge+) */ 2840 2841 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2842 u32 invalidate, u32 flush) 2843 { 2844 struct intel_engine_cs *engine = req->engine; 2845 uint32_t cmd; 2846 int ret; 2847 2848 ret = intel_ring_begin(req, 4); 2849 if (ret) 2850 return ret; 2851 2852 cmd = MI_FLUSH_DW; 2853 if (INTEL_GEN(req->i915) >= 8) 2854 cmd += 1; 2855 2856 /* We always require a command barrier so that subsequent 2857 * commands, such as breadcrumb interrupts, are strictly ordered 2858 * wrt the contents of the write cache being flushed to memory 2859 * (and thus being coherent from the CPU). 2860 */ 2861 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2862 2863 /* 2864 * Bspec vol 1c.3 - blitter engine command streamer: 2865 * "If ENABLED, all TLBs will be invalidated once the flush 2866 * operation is complete. This bit is only valid when the 2867 * Post-Sync Operation field is a value of 1h or 3h." 2868 */ 2869 if (invalidate & I915_GEM_DOMAIN_RENDER) 2870 cmd |= MI_INVALIDATE_TLB; 2871 intel_ring_emit(engine, cmd); 2872 intel_ring_emit(engine, 2873 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2874 if (INTEL_GEN(req->i915) >= 8) { 2875 intel_ring_emit(engine, 0); /* upper addr */ 2876 intel_ring_emit(engine, 0); /* value */ 2877 } else { 2878 intel_ring_emit(engine, 0); 2879 intel_ring_emit(engine, MI_NOOP); 2880 } 2881 intel_ring_advance(engine); 2882 2883 return 0; 2884 } 2885 2886 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, 2887 struct intel_engine_cs *engine) 2888 { 2889 struct drm_i915_gem_object *obj; 2890 int ret, i; 2891 2892 if (!i915_semaphore_is_enabled(dev_priv)) 2893 return; 2894 2895 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { 2896 obj = i915_gem_object_create(dev_priv->dev, 4096); 2897 if (IS_ERR(obj)) { 2898 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2899 i915.semaphores = 0; 2900 } else { 2901 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2902 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2903 if (ret != 0) { 2904 drm_gem_object_unreference(&obj->base); 2905 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2906 i915.semaphores = 0; 2907 } else { 2908 dev_priv->semaphore_obj = obj; 2909 } 2910 } 2911 } 2912 2913 if (!i915_semaphore_is_enabled(dev_priv)) 2914 return; 2915 2916 if (INTEL_GEN(dev_priv) >= 8) { 2917 u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); 2918 2919 engine->semaphore.sync_to = gen8_ring_sync; 2920 engine->semaphore.signal = gen8_xcs_signal; 2921 2922 for (i = 0; i < I915_NUM_ENGINES; i++) { 2923 u64 ring_offset; 2924 2925 if (i != engine->id) 2926 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); 2927 else 2928 ring_offset = MI_SEMAPHORE_SYNC_INVALID; 2929 2930 engine->semaphore.signal_ggtt[i] = ring_offset; 2931 } 2932 } else if (INTEL_GEN(dev_priv) >= 6) { 2933 engine->semaphore.sync_to = gen6_ring_sync; 2934 engine->semaphore.signal = gen6_signal; 2935 2936 /* 2937 * The current semaphore is only applied on pre-gen8 2938 * platform. And there is no VCS2 ring on the pre-gen8 2939 * platform. So the semaphore between RCS and VCS2 is 2940 * initialized as INVALID. Gen8 will initialize the 2941 * sema between VCS2 and RCS later. 2942 */ 2943 for (i = 0; i < I915_NUM_ENGINES; i++) { 2944 static const struct { 2945 u32 wait_mbox; 2946 i915_reg_t mbox_reg; 2947 } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { 2948 [RCS] = { 2949 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, 2950 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, 2951 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, 2952 }, 2953 [VCS] = { 2954 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, 2955 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, 2956 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, 2957 }, 2958 [BCS] = { 2959 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, 2960 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, 2961 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, 2962 }, 2963 [VECS] = { 2964 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, 2965 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, 2966 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, 2967 }, 2968 }; 2969 u32 wait_mbox; 2970 i915_reg_t mbox_reg; 2971 2972 if (i == engine->id || i == VCS2) { 2973 wait_mbox = MI_SEMAPHORE_SYNC_INVALID; 2974 mbox_reg = GEN6_NOSYNC; 2975 } else { 2976 wait_mbox = sem_data[engine->id][i].wait_mbox; 2977 mbox_reg = sem_data[engine->id][i].mbox_reg; 2978 } 2979 2980 engine->semaphore.mbox.wait[i] = wait_mbox; 2981 engine->semaphore.mbox.signal[i] = mbox_reg; 2982 } 2983 } 2984 } 2985 2986 static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2987 struct intel_engine_cs *engine) 2988 { 2989 if (INTEL_GEN(dev_priv) >= 8) { 2990 engine->irq_get = gen8_ring_get_irq; 2991 engine->irq_put = gen8_ring_put_irq; 2992 engine->irq_seqno_barrier = gen6_seqno_barrier; 2993 } else if (INTEL_GEN(dev_priv) >= 6) { 2994 engine->irq_get = gen6_ring_get_irq; 2995 engine->irq_put = gen6_ring_put_irq; 2996 engine->irq_seqno_barrier = gen6_seqno_barrier; 2997 } else if (INTEL_GEN(dev_priv) >= 5) { 2998 engine->irq_get = gen5_ring_get_irq; 2999 engine->irq_put = gen5_ring_put_irq; 3000 } else if (INTEL_GEN(dev_priv) >= 3) { 3001 engine->irq_get = i9xx_ring_get_irq; 3002 engine->irq_put = i9xx_ring_put_irq; 3003 } else { 3004 engine->irq_get = i8xx_ring_get_irq; 3005 engine->irq_put = i8xx_ring_put_irq; 3006 } 3007 } 3008 3009 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 3010 struct intel_engine_cs *engine) 3011 { 3012 engine->init_hw = init_ring_common; 3013 engine->write_tail = ring_write_tail; 3014 engine->get_seqno = ring_get_seqno; 3015 engine->set_seqno = ring_set_seqno; 3016 3017 engine->add_request = i9xx_add_request; 3018 if (INTEL_GEN(dev_priv) >= 6) 3019 engine->add_request = gen6_add_request; 3020 3021 if (INTEL_GEN(dev_priv) >= 8) 3022 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3023 else if (INTEL_GEN(dev_priv) >= 6) 3024 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3025 else if (INTEL_GEN(dev_priv) >= 4) 3026 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 3027 else if (IS_I830(dev_priv) || IS_845G(dev_priv)) 3028 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 3029 else 3030 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 3031 3032 intel_ring_init_irq(dev_priv, engine); 3033 intel_ring_init_semaphores(dev_priv, engine); 3034 } 3035 3036 int intel_init_render_ring_buffer(struct drm_device *dev) 3037 { 3038 struct drm_i915_private *dev_priv = dev->dev_private; 3039 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 3040 struct drm_i915_gem_object *obj; 3041 int ret; 3042 3043 engine->name = "render ring"; 3044 engine->id = RCS; 3045 engine->exec_id = I915_EXEC_RENDER; 3046 engine->hw_id = 0; 3047 engine->mmio_base = RENDER_RING_BASE; 3048 3049 intel_ring_default_vfuncs(dev_priv, engine); 3050 3051 if (INTEL_GEN(dev_priv) >= 8) { 3052 engine->init_context = intel_rcs_ctx_init; 3053 engine->add_request = gen8_render_add_request; 3054 engine->flush = gen8_render_ring_flush; 3055 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 3056 if (i915_semaphore_is_enabled(dev_priv)) 3057 engine->semaphore.signal = gen8_rcs_signal; 3058 } else if (INTEL_GEN(dev_priv) >= 6) { 3059 engine->init_context = intel_rcs_ctx_init; 3060 engine->flush = gen7_render_ring_flush; 3061 if (IS_GEN6(dev_priv)) 3062 engine->flush = gen6_render_ring_flush; 3063 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 3064 } else if (IS_GEN5(dev_priv)) { 3065 engine->add_request = pc_render_add_request; 3066 engine->flush = gen4_render_ring_flush; 3067 engine->get_seqno = pc_render_get_seqno; 3068 engine->set_seqno = pc_render_set_seqno; 3069 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 3070 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 3071 } else { 3072 if (INTEL_GEN(dev_priv) < 4) 3073 engine->flush = gen2_render_ring_flush; 3074 else 3075 engine->flush = gen4_render_ring_flush; 3076 engine->irq_enable_mask = I915_USER_INTERRUPT; 3077 } 3078 3079 if (IS_HASWELL(dev_priv)) 3080 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 3081 3082 engine->init_hw = init_render_ring; 3083 engine->cleanup = render_ring_cleanup; 3084 3085 /* Workaround batchbuffer to combat CS tlb bug. */ 3086 if (HAS_BROKEN_CS_TLB(dev_priv)) { 3087 obj = i915_gem_object_create(dev, I830_WA_SIZE); 3088 if (IS_ERR(obj)) { 3089 DRM_ERROR("Failed to allocate batch bo\n"); 3090 return PTR_ERR(obj); 3091 } 3092 3093 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 3094 if (ret != 0) { 3095 drm_gem_object_unreference(&obj->base); 3096 DRM_ERROR("Failed to ping batch bo\n"); 3097 return ret; 3098 } 3099 3100 engine->scratch.obj = obj; 3101 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 3102 } 3103 3104 ret = intel_init_ring_buffer(dev, engine); 3105 if (ret) 3106 return ret; 3107 3108 if (INTEL_GEN(dev_priv) >= 5) { 3109 ret = intel_init_pipe_control(engine); 3110 if (ret) 3111 return ret; 3112 } 3113 3114 return 0; 3115 } 3116 3117 int intel_init_bsd_ring_buffer(struct drm_device *dev) 3118 { 3119 struct drm_i915_private *dev_priv = dev->dev_private; 3120 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 3121 3122 engine->name = "bsd ring"; 3123 engine->id = VCS; 3124 engine->exec_id = I915_EXEC_BSD; 3125 engine->hw_id = 1; 3126 3127 intel_ring_default_vfuncs(dev_priv, engine); 3128 3129 if (INTEL_GEN(dev_priv) >= 6) { 3130 engine->mmio_base = GEN6_BSD_RING_BASE; 3131 /* gen6 bsd needs a special wa for tail updates */ 3132 if (IS_GEN6(dev_priv)) 3133 engine->write_tail = gen6_bsd_ring_write_tail; 3134 engine->flush = gen6_bsd_ring_flush; 3135 if (INTEL_GEN(dev_priv) >= 8) 3136 engine->irq_enable_mask = 3137 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 3138 else 3139 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 3140 } else { 3141 engine->mmio_base = BSD_RING_BASE; 3142 engine->flush = bsd_ring_flush; 3143 if (IS_GEN5(dev_priv)) 3144 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 3145 else 3146 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 3147 } 3148 3149 return intel_init_ring_buffer(dev, engine); 3150 } 3151 3152 /** 3153 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 3154 */ 3155 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 3156 { 3157 struct drm_i915_private *dev_priv = dev->dev_private; 3158 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 3159 3160 engine->name = "bsd2 ring"; 3161 engine->id = VCS2; 3162 engine->exec_id = I915_EXEC_BSD; 3163 engine->hw_id = 4; 3164 engine->mmio_base = GEN8_BSD2_RING_BASE; 3165 3166 intel_ring_default_vfuncs(dev_priv, engine); 3167 3168 engine->flush = gen6_bsd_ring_flush; 3169 engine->irq_enable_mask = 3170 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 3171 3172 return intel_init_ring_buffer(dev, engine); 3173 } 3174 3175 int intel_init_blt_ring_buffer(struct drm_device *dev) 3176 { 3177 struct drm_i915_private *dev_priv = dev->dev_private; 3178 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 3179 3180 engine->name = "blitter ring"; 3181 engine->id = BCS; 3182 engine->exec_id = I915_EXEC_BLT; 3183 engine->hw_id = 2; 3184 engine->mmio_base = BLT_RING_BASE; 3185 3186 intel_ring_default_vfuncs(dev_priv, engine); 3187 3188 engine->flush = gen6_ring_flush; 3189 if (INTEL_GEN(dev_priv) >= 8) 3190 engine->irq_enable_mask = 3191 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 3192 else 3193 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 3194 3195 return intel_init_ring_buffer(dev, engine); 3196 } 3197 3198 int intel_init_vebox_ring_buffer(struct drm_device *dev) 3199 { 3200 struct drm_i915_private *dev_priv = dev->dev_private; 3201 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 3202 3203 engine->name = "video enhancement ring"; 3204 engine->id = VECS; 3205 engine->exec_id = I915_EXEC_VEBOX; 3206 engine->hw_id = 3; 3207 engine->mmio_base = VEBOX_RING_BASE; 3208 3209 intel_ring_default_vfuncs(dev_priv, engine); 3210 3211 engine->flush = gen6_ring_flush; 3212 3213 if (INTEL_GEN(dev_priv) >= 8) { 3214 engine->irq_enable_mask = 3215 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3216 } else { 3217 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3218 engine->irq_get = hsw_vebox_get_irq; 3219 engine->irq_put = hsw_vebox_put_irq; 3220 } 3221 3222 return intel_init_ring_buffer(dev, engine); 3223 } 3224 3225 int 3226 intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 3227 { 3228 struct intel_engine_cs *engine = req->engine; 3229 int ret; 3230 3231 if (!engine->gpu_caches_dirty) 3232 return 0; 3233 3234 ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); 3235 if (ret) 3236 return ret; 3237 3238 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); 3239 3240 engine->gpu_caches_dirty = false; 3241 return 0; 3242 } 3243 3244 int 3245 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 3246 { 3247 struct intel_engine_cs *engine = req->engine; 3248 uint32_t flush_domains; 3249 int ret; 3250 3251 flush_domains = 0; 3252 if (engine->gpu_caches_dirty) 3253 flush_domains = I915_GEM_GPU_DOMAINS; 3254 3255 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3256 if (ret) 3257 return ret; 3258 3259 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3260 3261 engine->gpu_caches_dirty = false; 3262 return 0; 3263 } 3264 3265 void 3266 intel_stop_engine(struct intel_engine_cs *engine) 3267 { 3268 int ret; 3269 3270 if (!intel_engine_initialized(engine)) 3271 return; 3272 3273 ret = intel_engine_idle(engine); 3274 if (ret) 3275 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 3276 engine->name, ret); 3277 3278 stop_ring(engine); 3279 } 3280