1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 /* Rough estimate of the typical request size, performing a flush, 38 * set-context and then emitting the batch. 39 */ 40 #define LEGACY_REQUEST_SIZE 200 41 42 int __intel_ring_space(int head, int tail, int size) 43 { 44 int space = head - tail; 45 if (space <= 0) 46 space += size; 47 return space - I915_RING_FREE_SPACE; 48 } 49 50 void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 51 { 52 if (ringbuf->last_retired_head != -1) { 53 ringbuf->head = ringbuf->last_retired_head; 54 ringbuf->last_retired_head = -1; 55 } 56 57 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, 58 ringbuf->tail, ringbuf->size); 59 } 60 61 static void __intel_ring_advance(struct intel_engine_cs *engine) 62 { 63 struct intel_ringbuffer *ringbuf = engine->buffer; 64 ringbuf->tail &= ringbuf->size - 1; 65 engine->write_tail(engine, ringbuf->tail); 66 } 67 68 static int 69 gen2_render_ring_flush(struct drm_i915_gem_request *req, 70 u32 invalidate_domains, 71 u32 flush_domains) 72 { 73 struct intel_engine_cs *engine = req->engine; 74 u32 cmd; 75 int ret; 76 77 cmd = MI_FLUSH; 78 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 79 cmd |= MI_NO_WRITE_FLUSH; 80 81 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 82 cmd |= MI_READ_FLUSH; 83 84 ret = intel_ring_begin(req, 2); 85 if (ret) 86 return ret; 87 88 intel_ring_emit(engine, cmd); 89 intel_ring_emit(engine, MI_NOOP); 90 intel_ring_advance(engine); 91 92 return 0; 93 } 94 95 static int 96 gen4_render_ring_flush(struct drm_i915_gem_request *req, 97 u32 invalidate_domains, 98 u32 flush_domains) 99 { 100 struct intel_engine_cs *engine = req->engine; 101 u32 cmd; 102 int ret; 103 104 /* 105 * read/write caches: 106 * 107 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 108 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 109 * also flushed at 2d versus 3d pipeline switches. 110 * 111 * read-only caches: 112 * 113 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 114 * MI_READ_FLUSH is set, and is always flushed on 965. 115 * 116 * I915_GEM_DOMAIN_COMMAND may not exist? 117 * 118 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 119 * invalidated when MI_EXE_FLUSH is set. 120 * 121 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 122 * invalidated with every MI_FLUSH. 123 * 124 * TLBs: 125 * 126 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 127 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 128 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 129 * are flushed at any MI_FLUSH. 130 */ 131 132 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 133 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 134 cmd &= ~MI_NO_WRITE_FLUSH; 135 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 136 cmd |= MI_EXE_FLUSH; 137 138 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 139 (IS_G4X(req->i915) || IS_GEN5(req->i915))) 140 cmd |= MI_INVALIDATE_ISP; 141 142 ret = intel_ring_begin(req, 2); 143 if (ret) 144 return ret; 145 146 intel_ring_emit(engine, cmd); 147 intel_ring_emit(engine, MI_NOOP); 148 intel_ring_advance(engine); 149 150 return 0; 151 } 152 153 /** 154 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 155 * implementing two workarounds on gen6. From section 1.4.7.1 156 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 157 * 158 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 159 * produced by non-pipelined state commands), software needs to first 160 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 161 * 0. 162 * 163 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 164 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 165 * 166 * And the workaround for these two requires this workaround first: 167 * 168 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 169 * BEFORE the pipe-control with a post-sync op and no write-cache 170 * flushes. 171 * 172 * And this last workaround is tricky because of the requirements on 173 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 174 * volume 2 part 1: 175 * 176 * "1 of the following must also be set: 177 * - Render Target Cache Flush Enable ([12] of DW1) 178 * - Depth Cache Flush Enable ([0] of DW1) 179 * - Stall at Pixel Scoreboard ([1] of DW1) 180 * - Depth Stall ([13] of DW1) 181 * - Post-Sync Operation ([13] of DW1) 182 * - Notify Enable ([8] of DW1)" 183 * 184 * The cache flushes require the workaround flush that triggered this 185 * one, so we can't use it. Depth stall would trigger the same. 186 * Post-sync nonzero is what triggered this second workaround, so we 187 * can't use that one either. Notify enable is IRQs, which aren't 188 * really our business. That leaves only stall at scoreboard. 189 */ 190 static int 191 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 192 { 193 struct intel_engine_cs *engine = req->engine; 194 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 195 int ret; 196 197 ret = intel_ring_begin(req, 6); 198 if (ret) 199 return ret; 200 201 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 202 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 203 PIPE_CONTROL_STALL_AT_SCOREBOARD); 204 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 205 intel_ring_emit(engine, 0); /* low dword */ 206 intel_ring_emit(engine, 0); /* high dword */ 207 intel_ring_emit(engine, MI_NOOP); 208 intel_ring_advance(engine); 209 210 ret = intel_ring_begin(req, 6); 211 if (ret) 212 return ret; 213 214 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 215 intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); 216 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 217 intel_ring_emit(engine, 0); 218 intel_ring_emit(engine, 0); 219 intel_ring_emit(engine, MI_NOOP); 220 intel_ring_advance(engine); 221 222 return 0; 223 } 224 225 static int 226 gen6_render_ring_flush(struct drm_i915_gem_request *req, 227 u32 invalidate_domains, u32 flush_domains) 228 { 229 struct intel_engine_cs *engine = req->engine; 230 u32 flags = 0; 231 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 232 int ret; 233 234 /* Force SNB workarounds for PIPE_CONTROL flushes */ 235 ret = intel_emit_post_sync_nonzero_flush(req); 236 if (ret) 237 return ret; 238 239 /* Just flush everything. Experiments have shown that reducing the 240 * number of bits based on the write domains has little performance 241 * impact. 242 */ 243 if (flush_domains) { 244 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 245 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 246 /* 247 * Ensure that any following seqno writes only happen 248 * when the render cache is indeed flushed. 249 */ 250 flags |= PIPE_CONTROL_CS_STALL; 251 } 252 if (invalidate_domains) { 253 flags |= PIPE_CONTROL_TLB_INVALIDATE; 254 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 255 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 256 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 259 /* 260 * TLB invalidate requires a post-sync write. 261 */ 262 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 263 } 264 265 ret = intel_ring_begin(req, 4); 266 if (ret) 267 return ret; 268 269 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 270 intel_ring_emit(engine, flags); 271 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 272 intel_ring_emit(engine, 0); 273 intel_ring_advance(engine); 274 275 return 0; 276 } 277 278 static int 279 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 280 { 281 struct intel_engine_cs *engine = req->engine; 282 int ret; 283 284 ret = intel_ring_begin(req, 4); 285 if (ret) 286 return ret; 287 288 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 289 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 290 PIPE_CONTROL_STALL_AT_SCOREBOARD); 291 intel_ring_emit(engine, 0); 292 intel_ring_emit(engine, 0); 293 intel_ring_advance(engine); 294 295 return 0; 296 } 297 298 static int 299 gen7_render_ring_flush(struct drm_i915_gem_request *req, 300 u32 invalidate_domains, u32 flush_domains) 301 { 302 struct intel_engine_cs *engine = req->engine; 303 u32 flags = 0; 304 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 305 int ret; 306 307 /* 308 * Ensure that any following seqno writes only happen when the render 309 * cache is indeed flushed. 310 * 311 * Workaround: 4th PIPE_CONTROL command (except the ones with only 312 * read-cache invalidate bits set) must have the CS_STALL bit set. We 313 * don't try to be clever and just set it unconditionally. 314 */ 315 flags |= PIPE_CONTROL_CS_STALL; 316 317 /* Just flush everything. Experiments have shown that reducing the 318 * number of bits based on the write domains has little performance 319 * impact. 320 */ 321 if (flush_domains) { 322 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 323 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 324 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 325 flags |= PIPE_CONTROL_FLUSH_ENABLE; 326 } 327 if (invalidate_domains) { 328 flags |= PIPE_CONTROL_TLB_INVALIDATE; 329 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 330 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 331 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 332 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 333 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 334 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 335 /* 336 * TLB invalidate requires a post-sync write. 337 */ 338 flags |= PIPE_CONTROL_QW_WRITE; 339 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 340 341 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 342 343 /* Workaround: we must issue a pipe_control with CS-stall bit 344 * set before a pipe_control command that has the state cache 345 * invalidate bit set. */ 346 gen7_render_ring_cs_stall_wa(req); 347 } 348 349 ret = intel_ring_begin(req, 4); 350 if (ret) 351 return ret; 352 353 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 354 intel_ring_emit(engine, flags); 355 intel_ring_emit(engine, scratch_addr); 356 intel_ring_emit(engine, 0); 357 intel_ring_advance(engine); 358 359 return 0; 360 } 361 362 static int 363 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 364 u32 flags, u32 scratch_addr) 365 { 366 struct intel_engine_cs *engine = req->engine; 367 int ret; 368 369 ret = intel_ring_begin(req, 6); 370 if (ret) 371 return ret; 372 373 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); 374 intel_ring_emit(engine, flags); 375 intel_ring_emit(engine, scratch_addr); 376 intel_ring_emit(engine, 0); 377 intel_ring_emit(engine, 0); 378 intel_ring_emit(engine, 0); 379 intel_ring_advance(engine); 380 381 return 0; 382 } 383 384 static int 385 gen8_render_ring_flush(struct drm_i915_gem_request *req, 386 u32 invalidate_domains, u32 flush_domains) 387 { 388 u32 flags = 0; 389 u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 390 int ret; 391 392 flags |= PIPE_CONTROL_CS_STALL; 393 394 if (flush_domains) { 395 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 396 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 397 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 398 flags |= PIPE_CONTROL_FLUSH_ENABLE; 399 } 400 if (invalidate_domains) { 401 flags |= PIPE_CONTROL_TLB_INVALIDATE; 402 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 403 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 404 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 405 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 406 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 407 flags |= PIPE_CONTROL_QW_WRITE; 408 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 409 410 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 411 ret = gen8_emit_pipe_control(req, 412 PIPE_CONTROL_CS_STALL | 413 PIPE_CONTROL_STALL_AT_SCOREBOARD, 414 0); 415 if (ret) 416 return ret; 417 } 418 419 return gen8_emit_pipe_control(req, flags, scratch_addr); 420 } 421 422 static void ring_write_tail(struct intel_engine_cs *engine, 423 u32 value) 424 { 425 struct drm_i915_private *dev_priv = engine->i915; 426 I915_WRITE_TAIL(engine, value); 427 } 428 429 u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 430 { 431 struct drm_i915_private *dev_priv = engine->i915; 432 u64 acthd; 433 434 if (INTEL_GEN(dev_priv) >= 8) 435 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 436 RING_ACTHD_UDW(engine->mmio_base)); 437 else if (INTEL_GEN(dev_priv) >= 4) 438 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 439 else 440 acthd = I915_READ(ACTHD); 441 442 return acthd; 443 } 444 445 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 446 { 447 struct drm_i915_private *dev_priv = engine->i915; 448 u32 addr; 449 450 addr = dev_priv->status_page_dmah->busaddr; 451 if (INTEL_GEN(dev_priv) >= 4) 452 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 453 I915_WRITE(HWS_PGA, addr); 454 } 455 456 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 457 { 458 struct drm_i915_private *dev_priv = engine->i915; 459 i915_reg_t mmio; 460 461 /* The ring status page addresses are no longer next to the rest of 462 * the ring registers as of gen7. 463 */ 464 if (IS_GEN7(dev_priv)) { 465 switch (engine->id) { 466 case RCS: 467 mmio = RENDER_HWS_PGA_GEN7; 468 break; 469 case BCS: 470 mmio = BLT_HWS_PGA_GEN7; 471 break; 472 /* 473 * VCS2 actually doesn't exist on Gen7. Only shut up 474 * gcc switch check warning 475 */ 476 case VCS2: 477 case VCS: 478 mmio = BSD_HWS_PGA_GEN7; 479 break; 480 case VECS: 481 mmio = VEBOX_HWS_PGA_GEN7; 482 break; 483 } 484 } else if (IS_GEN6(dev_priv)) { 485 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 486 } else { 487 /* XXX: gen8 returns to sanity */ 488 mmio = RING_HWS_PGA(engine->mmio_base); 489 } 490 491 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); 492 POSTING_READ(mmio); 493 494 /* 495 * Flush the TLB for this page 496 * 497 * FIXME: These two bits have disappeared on gen8, so a question 498 * arises: do we still need this and if so how should we go about 499 * invalidating the TLB? 500 */ 501 if (IS_GEN(dev_priv, 6, 7)) { 502 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 503 504 /* ring should be idle before issuing a sync flush*/ 505 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 506 507 I915_WRITE(reg, 508 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 509 INSTPM_SYNC_FLUSH)); 510 if (intel_wait_for_register(dev_priv, 511 reg, INSTPM_SYNC_FLUSH, 0, 512 1000)) 513 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 514 engine->name); 515 } 516 } 517 518 static bool stop_ring(struct intel_engine_cs *engine) 519 { 520 struct drm_i915_private *dev_priv = engine->i915; 521 522 if (!IS_GEN2(dev_priv)) { 523 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 524 if (intel_wait_for_register(dev_priv, 525 RING_MI_MODE(engine->mmio_base), 526 MODE_IDLE, 527 MODE_IDLE, 528 1000)) { 529 DRM_ERROR("%s : timed out trying to stop ring\n", 530 engine->name); 531 /* Sometimes we observe that the idle flag is not 532 * set even though the ring is empty. So double 533 * check before giving up. 534 */ 535 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 536 return false; 537 } 538 } 539 540 I915_WRITE_CTL(engine, 0); 541 I915_WRITE_HEAD(engine, 0); 542 engine->write_tail(engine, 0); 543 544 if (!IS_GEN2(dev_priv)) { 545 (void)I915_READ_CTL(engine); 546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 547 } 548 549 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 550 } 551 552 void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 553 { 554 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 555 } 556 557 static int init_ring_common(struct intel_engine_cs *engine) 558 { 559 struct drm_i915_private *dev_priv = engine->i915; 560 struct intel_ringbuffer *ringbuf = engine->buffer; 561 struct drm_i915_gem_object *obj = ringbuf->obj; 562 int ret = 0; 563 564 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 565 566 if (!stop_ring(engine)) { 567 /* G45 ring initialization often fails to reset head to zero */ 568 DRM_DEBUG_KMS("%s head not reset to zero " 569 "ctl %08x head %08x tail %08x start %08x\n", 570 engine->name, 571 I915_READ_CTL(engine), 572 I915_READ_HEAD(engine), 573 I915_READ_TAIL(engine), 574 I915_READ_START(engine)); 575 576 if (!stop_ring(engine)) { 577 DRM_ERROR("failed to set %s head to zero " 578 "ctl %08x head %08x tail %08x start %08x\n", 579 engine->name, 580 I915_READ_CTL(engine), 581 I915_READ_HEAD(engine), 582 I915_READ_TAIL(engine), 583 I915_READ_START(engine)); 584 ret = -EIO; 585 goto out; 586 } 587 } 588 589 if (I915_NEED_GFX_HWS(dev_priv)) 590 intel_ring_setup_status_page(engine); 591 else 592 ring_setup_phys_status_page(engine); 593 594 /* Enforce ordering by reading HEAD register back */ 595 I915_READ_HEAD(engine); 596 597 /* Initialize the ring. This must happen _after_ we've cleared the ring 598 * registers with the above sequence (the readback of the HEAD registers 599 * also enforces ordering), otherwise the hw might lose the new ring 600 * register values. */ 601 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); 602 603 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 604 if (I915_READ_HEAD(engine)) 605 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 606 engine->name, I915_READ_HEAD(engine)); 607 I915_WRITE_HEAD(engine, 0); 608 (void)I915_READ_HEAD(engine); 609 610 I915_WRITE_CTL(engine, 611 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 612 | RING_VALID); 613 614 /* If the head is still not zero, the ring is dead */ 615 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && 616 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && 617 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { 618 DRM_ERROR("%s initialization failed " 619 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 620 engine->name, 621 I915_READ_CTL(engine), 622 I915_READ_CTL(engine) & RING_VALID, 623 I915_READ_HEAD(engine), I915_READ_TAIL(engine), 624 I915_READ_START(engine), 625 (unsigned long)i915_gem_obj_ggtt_offset(obj)); 626 ret = -EIO; 627 goto out; 628 } 629 630 ringbuf->last_retired_head = -1; 631 ringbuf->head = I915_READ_HEAD(engine); 632 ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; 633 intel_ring_update_space(ringbuf); 634 635 intel_engine_init_hangcheck(engine); 636 637 out: 638 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 639 640 return ret; 641 } 642 643 void intel_fini_pipe_control(struct intel_engine_cs *engine) 644 { 645 if (engine->scratch.obj == NULL) 646 return; 647 648 i915_gem_object_ggtt_unpin(engine->scratch.obj); 649 drm_gem_object_unreference(&engine->scratch.obj->base); 650 engine->scratch.obj = NULL; 651 } 652 653 int intel_init_pipe_control(struct intel_engine_cs *engine, int size) 654 { 655 struct drm_i915_gem_object *obj; 656 int ret; 657 658 WARN_ON(engine->scratch.obj); 659 660 obj = i915_gem_object_create_stolen(&engine->i915->drm, size); 661 if (!obj) 662 obj = i915_gem_object_create(&engine->i915->drm, size); 663 if (IS_ERR(obj)) { 664 DRM_ERROR("Failed to allocate scratch page\n"); 665 ret = PTR_ERR(obj); 666 goto err; 667 } 668 669 ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH); 670 if (ret) 671 goto err_unref; 672 673 engine->scratch.obj = obj; 674 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 675 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 676 engine->name, engine->scratch.gtt_offset); 677 return 0; 678 679 err_unref: 680 drm_gem_object_unreference(&engine->scratch.obj->base); 681 err: 682 return ret; 683 } 684 685 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 686 { 687 struct intel_engine_cs *engine = req->engine; 688 struct i915_workarounds *w = &req->i915->workarounds; 689 int ret, i; 690 691 if (w->count == 0) 692 return 0; 693 694 engine->gpu_caches_dirty = true; 695 ret = intel_ring_flush_all_caches(req); 696 if (ret) 697 return ret; 698 699 ret = intel_ring_begin(req, (w->count * 2 + 2)); 700 if (ret) 701 return ret; 702 703 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); 704 for (i = 0; i < w->count; i++) { 705 intel_ring_emit_reg(engine, w->reg[i].addr); 706 intel_ring_emit(engine, w->reg[i].value); 707 } 708 intel_ring_emit(engine, MI_NOOP); 709 710 intel_ring_advance(engine); 711 712 engine->gpu_caches_dirty = true; 713 ret = intel_ring_flush_all_caches(req); 714 if (ret) 715 return ret; 716 717 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 718 719 return 0; 720 } 721 722 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 723 { 724 int ret; 725 726 ret = intel_ring_workarounds_emit(req); 727 if (ret != 0) 728 return ret; 729 730 ret = i915_gem_render_state_init(req); 731 if (ret) 732 return ret; 733 734 return 0; 735 } 736 737 static int wa_add(struct drm_i915_private *dev_priv, 738 i915_reg_t addr, 739 const u32 mask, const u32 val) 740 { 741 const u32 idx = dev_priv->workarounds.count; 742 743 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 744 return -ENOSPC; 745 746 dev_priv->workarounds.reg[idx].addr = addr; 747 dev_priv->workarounds.reg[idx].value = val; 748 dev_priv->workarounds.reg[idx].mask = mask; 749 750 dev_priv->workarounds.count++; 751 752 return 0; 753 } 754 755 #define WA_REG(addr, mask, val) do { \ 756 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 757 if (r) \ 758 return r; \ 759 } while (0) 760 761 #define WA_SET_BIT_MASKED(addr, mask) \ 762 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 763 764 #define WA_CLR_BIT_MASKED(addr, mask) \ 765 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 766 767 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 768 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 769 770 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 771 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 772 773 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 774 775 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 776 i915_reg_t reg) 777 { 778 struct drm_i915_private *dev_priv = engine->i915; 779 struct i915_workarounds *wa = &dev_priv->workarounds; 780 const uint32_t index = wa->hw_whitelist_count[engine->id]; 781 782 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 783 return -EINVAL; 784 785 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), 786 i915_mmio_reg_offset(reg)); 787 wa->hw_whitelist_count[engine->id]++; 788 789 return 0; 790 } 791 792 static int gen8_init_workarounds(struct intel_engine_cs *engine) 793 { 794 struct drm_i915_private *dev_priv = engine->i915; 795 796 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 797 798 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 799 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 800 801 /* WaDisablePartialInstShootdown:bdw,chv */ 802 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 803 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 804 805 /* Use Force Non-Coherent whenever executing a 3D context. This is a 806 * workaround for for a possible hang in the unlikely event a TLB 807 * invalidation occurs during a PSD flush. 808 */ 809 /* WaForceEnableNonCoherent:bdw,chv */ 810 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 811 WA_SET_BIT_MASKED(HDC_CHICKEN0, 812 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 813 HDC_FORCE_NON_COHERENT); 814 815 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 816 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 817 * polygons in the same 8x4 pixel/sample area to be processed without 818 * stalling waiting for the earlier ones to write to Hierarchical Z 819 * buffer." 820 * 821 * This optimization is off by default for BDW and CHV; turn it on. 822 */ 823 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 824 825 /* Wa4x4STCOptimizationDisable:bdw,chv */ 826 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 827 828 /* 829 * BSpec recommends 8x4 when MSAA is used, 830 * however in practice 16x4 seems fastest. 831 * 832 * Note that PS/WM thread counts depend on the WIZ hashing 833 * disable bit, which we don't touch here, but it's good 834 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 835 */ 836 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 837 GEN6_WIZ_HASHING_MASK, 838 GEN6_WIZ_HASHING_16x4); 839 840 return 0; 841 } 842 843 static int bdw_init_workarounds(struct intel_engine_cs *engine) 844 { 845 struct drm_i915_private *dev_priv = engine->i915; 846 int ret; 847 848 ret = gen8_init_workarounds(engine); 849 if (ret) 850 return ret; 851 852 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 853 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 854 855 /* WaDisableDopClockGating:bdw */ 856 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 857 DOP_CLOCK_GATING_DISABLE); 858 859 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 860 GEN8_SAMPLER_POWER_BYPASS_DIS); 861 862 WA_SET_BIT_MASKED(HDC_CHICKEN0, 863 /* WaForceContextSaveRestoreNonCoherent:bdw */ 864 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 865 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 866 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 867 868 return 0; 869 } 870 871 static int chv_init_workarounds(struct intel_engine_cs *engine) 872 { 873 struct drm_i915_private *dev_priv = engine->i915; 874 int ret; 875 876 ret = gen8_init_workarounds(engine); 877 if (ret) 878 return ret; 879 880 /* WaDisableThreadStallDopClockGating:chv */ 881 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 882 883 /* Improve HiZ throughput on CHV. */ 884 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 885 886 return 0; 887 } 888 889 static int gen9_init_workarounds(struct intel_engine_cs *engine) 890 { 891 struct drm_i915_private *dev_priv = engine->i915; 892 int ret; 893 894 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 895 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 896 897 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ 898 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 899 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 900 901 /* WaDisableKillLogic:bxt,skl,kbl */ 902 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 903 ECOCHK_DIS_TLB); 904 905 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ 906 /* WaDisablePartialInstShootdown:skl,bxt,kbl */ 907 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 908 FLOW_CONTROL_ENABLE | 909 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 910 911 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 912 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 913 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 914 915 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 916 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || 917 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 918 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 919 GEN9_DG_MIRROR_FIX_ENABLE); 920 921 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 922 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || 923 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 924 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 925 GEN9_RHWO_OPTIMIZATION_DISABLE); 926 /* 927 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 928 * but we do that in per ctx batchbuffer as there is an issue 929 * with this register not getting restored on ctx restore 930 */ 931 } 932 933 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ 934 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 935 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 936 GEN9_ENABLE_YV12_BUGFIX | 937 GEN9_ENABLE_GPGPU_PREEMPTION); 938 939 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ 940 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 941 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 942 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 943 944 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ 945 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 946 GEN9_CCS_TLB_PREFETCH_ENABLE); 947 948 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 949 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || 950 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 951 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 952 PIXEL_MASK_CAMMING_DISABLE); 953 954 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 955 WA_SET_BIT_MASKED(HDC_CHICKEN0, 956 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 957 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 958 959 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 960 * both tied to WaForceContextSaveRestoreNonCoherent 961 * in some hsds for skl. We keep the tie for all gen9. The 962 * documentation is a bit hazy and so we want to get common behaviour, 963 * even though there is no clear evidence we would need both on kbl/bxt. 964 * This area has been source of system hangs so we play it safe 965 * and mimic the skl regardless of what bspec says. 966 * 967 * Use Force Non-Coherent whenever executing a 3D context. This 968 * is a workaround for a possible hang in the unlikely event 969 * a TLB invalidation occurs during a PSD flush. 970 */ 971 972 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 973 WA_SET_BIT_MASKED(HDC_CHICKEN0, 974 HDC_FORCE_NON_COHERENT); 975 976 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 977 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 978 BDW_DISABLE_HDC_INVALIDATION); 979 980 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 981 if (IS_SKYLAKE(dev_priv) || 982 IS_KABYLAKE(dev_priv) || 983 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 984 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 985 GEN8_SAMPLER_POWER_BYPASS_DIS); 986 987 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ 988 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 989 990 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 991 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 992 GEN8_LQSC_FLUSH_COHERENT_LINES)); 993 994 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ 995 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 996 if (ret) 997 return ret; 998 999 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 1000 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 1001 if (ret) 1002 return ret; 1003 1004 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ 1005 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 1006 if (ret) 1007 return ret; 1008 1009 return 0; 1010 } 1011 1012 static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1013 { 1014 struct drm_i915_private *dev_priv = engine->i915; 1015 u8 vals[3] = { 0, 0, 0 }; 1016 unsigned int i; 1017 1018 for (i = 0; i < 3; i++) { 1019 u8 ss; 1020 1021 /* 1022 * Only consider slices where one, and only one, subslice has 7 1023 * EUs 1024 */ 1025 if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) 1026 continue; 1027 1028 /* 1029 * subslice_7eu[i] != 0 (because of the check above) and 1030 * ss_max == 4 (maximum number of subslices possible per slice) 1031 * 1032 * -> 0 <= ss <= 3; 1033 */ 1034 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1; 1035 vals[i] = 3 - ss; 1036 } 1037 1038 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 1039 return 0; 1040 1041 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 1042 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 1043 GEN9_IZ_HASHING_MASK(2) | 1044 GEN9_IZ_HASHING_MASK(1) | 1045 GEN9_IZ_HASHING_MASK(0), 1046 GEN9_IZ_HASHING(2, vals[2]) | 1047 GEN9_IZ_HASHING(1, vals[1]) | 1048 GEN9_IZ_HASHING(0, vals[0])); 1049 1050 return 0; 1051 } 1052 1053 static int skl_init_workarounds(struct intel_engine_cs *engine) 1054 { 1055 struct drm_i915_private *dev_priv = engine->i915; 1056 int ret; 1057 1058 ret = gen9_init_workarounds(engine); 1059 if (ret) 1060 return ret; 1061 1062 /* 1063 * Actual WA is to disable percontext preemption granularity control 1064 * until D0 which is the default case so this is equivalent to 1065 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1066 */ 1067 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { 1068 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1069 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1070 } 1071 1072 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { 1073 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1074 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1075 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1076 } 1077 1078 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1079 * involving this register should also be added to WA batch as required. 1080 */ 1081 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) 1082 /* WaDisableLSQCROPERFforOCL:skl */ 1083 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1084 GEN8_LQSC_RO_PERF_DIS); 1085 1086 /* WaEnableGapsTsvCreditFix:skl */ 1087 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { 1088 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1089 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1090 } 1091 1092 /* WaDisablePowerCompilerClockGating:skl */ 1093 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) 1094 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1095 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1096 1097 /* WaBarrierPerformanceFixDisable:skl */ 1098 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) 1099 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1100 HDC_FENCE_DEST_SLM_DISABLE | 1101 HDC_BARRIER_PERFORMANCE_DISABLE); 1102 1103 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1104 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) 1105 WA_SET_BIT_MASKED( 1106 GEN7_HALF_SLICE_CHICKEN1, 1107 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1108 1109 /* WaDisableGafsUnitClkGating:skl */ 1110 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1111 1112 /* WaInPlaceDecompressionHang:skl */ 1113 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 1114 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1115 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1116 1117 /* WaDisableLSQCROPERFforOCL:skl */ 1118 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1119 if (ret) 1120 return ret; 1121 1122 return skl_tune_iz_hashing(engine); 1123 } 1124 1125 static int bxt_init_workarounds(struct intel_engine_cs *engine) 1126 { 1127 struct drm_i915_private *dev_priv = engine->i915; 1128 int ret; 1129 1130 ret = gen9_init_workarounds(engine); 1131 if (ret) 1132 return ret; 1133 1134 /* WaStoreMultiplePTEenable:bxt */ 1135 /* This is a requirement according to Hardware specification */ 1136 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1137 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1138 1139 /* WaSetClckGatingDisableMedia:bxt */ 1140 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1141 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1142 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1143 } 1144 1145 /* WaDisableThreadStallDopClockGating:bxt */ 1146 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1147 STALL_DOP_GATING_DISABLE); 1148 1149 /* WaDisablePooledEuLoadBalancingFix:bxt */ 1150 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { 1151 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, 1152 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1153 } 1154 1155 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1156 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { 1157 WA_SET_BIT_MASKED( 1158 GEN7_HALF_SLICE_CHICKEN1, 1159 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1160 } 1161 1162 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ 1163 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1164 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1165 /* WaDisableLSQCROPERFforOCL:bxt */ 1166 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { 1167 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1168 if (ret) 1169 return ret; 1170 1171 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1172 if (ret) 1173 return ret; 1174 } 1175 1176 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1177 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1179 L3_HIGH_PRIO_CREDITS(2)); 1180 1181 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1182 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1185 1186 /* WaInPlaceDecompressionHang:bxt */ 1187 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1188 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1189 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1190 1191 return 0; 1192 } 1193 1194 static int kbl_init_workarounds(struct intel_engine_cs *engine) 1195 { 1196 struct drm_i915_private *dev_priv = engine->i915; 1197 int ret; 1198 1199 ret = gen9_init_workarounds(engine); 1200 if (ret) 1201 return ret; 1202 1203 /* WaEnableGapsTsvCreditFix:kbl */ 1204 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1205 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1206 1207 /* WaDisableDynamicCreditSharing:kbl */ 1208 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1209 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1210 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1211 1212 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1213 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1214 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1215 HDC_FENCE_DEST_SLM_DISABLE); 1216 1217 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1218 * involving this register should also be added to WA batch as required. 1219 */ 1220 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1221 /* WaDisableLSQCROPERFforOCL:kbl */ 1222 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1223 GEN8_LQSC_RO_PERF_DIS); 1224 1225 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1226 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1227 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1228 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1229 1230 /* WaDisableGafsUnitClkGating:kbl */ 1231 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1232 1233 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1234 WA_SET_BIT_MASKED( 1235 GEN7_HALF_SLICE_CHICKEN1, 1236 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1237 1238 /* WaInPlaceDecompressionHang:kbl */ 1239 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1240 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1241 1242 /* WaDisableLSQCROPERFforOCL:kbl */ 1243 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1244 if (ret) 1245 return ret; 1246 1247 return 0; 1248 } 1249 1250 int init_workarounds_ring(struct intel_engine_cs *engine) 1251 { 1252 struct drm_i915_private *dev_priv = engine->i915; 1253 1254 WARN_ON(engine->id != RCS); 1255 1256 dev_priv->workarounds.count = 0; 1257 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1258 1259 if (IS_BROADWELL(dev_priv)) 1260 return bdw_init_workarounds(engine); 1261 1262 if (IS_CHERRYVIEW(dev_priv)) 1263 return chv_init_workarounds(engine); 1264 1265 if (IS_SKYLAKE(dev_priv)) 1266 return skl_init_workarounds(engine); 1267 1268 if (IS_BROXTON(dev_priv)) 1269 return bxt_init_workarounds(engine); 1270 1271 if (IS_KABYLAKE(dev_priv)) 1272 return kbl_init_workarounds(engine); 1273 1274 return 0; 1275 } 1276 1277 static int init_render_ring(struct intel_engine_cs *engine) 1278 { 1279 struct drm_i915_private *dev_priv = engine->i915; 1280 int ret = init_ring_common(engine); 1281 if (ret) 1282 return ret; 1283 1284 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1285 if (IS_GEN(dev_priv, 4, 6)) 1286 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1287 1288 /* We need to disable the AsyncFlip performance optimisations in order 1289 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1290 * programmed to '1' on all products. 1291 * 1292 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1293 */ 1294 if (IS_GEN(dev_priv, 6, 7)) 1295 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1296 1297 /* Required for the hardware to program scanline values for waiting */ 1298 /* WaEnableFlushTlbInvalidationMode:snb */ 1299 if (IS_GEN6(dev_priv)) 1300 I915_WRITE(GFX_MODE, 1301 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1302 1303 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1304 if (IS_GEN7(dev_priv)) 1305 I915_WRITE(GFX_MODE_GEN7, 1306 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1307 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1308 1309 if (IS_GEN6(dev_priv)) { 1310 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1311 * "If this bit is set, STCunit will have LRA as replacement 1312 * policy. [...] This bit must be reset. LRA replacement 1313 * policy is not supported." 1314 */ 1315 I915_WRITE(CACHE_MODE_0, 1316 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1317 } 1318 1319 if (IS_GEN(dev_priv, 6, 7)) 1320 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1321 1322 if (INTEL_INFO(dev_priv)->gen >= 6) 1323 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1324 1325 return init_workarounds_ring(engine); 1326 } 1327 1328 static void render_ring_cleanup(struct intel_engine_cs *engine) 1329 { 1330 struct drm_i915_private *dev_priv = engine->i915; 1331 1332 if (dev_priv->semaphore_obj) { 1333 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1334 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 1335 dev_priv->semaphore_obj = NULL; 1336 } 1337 1338 intel_fini_pipe_control(engine); 1339 } 1340 1341 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1342 unsigned int num_dwords) 1343 { 1344 #define MBOX_UPDATE_DWORDS 8 1345 struct intel_engine_cs *signaller = signaller_req->engine; 1346 struct drm_i915_private *dev_priv = signaller_req->i915; 1347 struct intel_engine_cs *waiter; 1348 enum intel_engine_id id; 1349 int ret, num_rings; 1350 1351 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1352 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1353 #undef MBOX_UPDATE_DWORDS 1354 1355 ret = intel_ring_begin(signaller_req, num_dwords); 1356 if (ret) 1357 return ret; 1358 1359 for_each_engine_id(waiter, dev_priv, id) { 1360 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1361 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1362 continue; 1363 1364 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1365 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1366 PIPE_CONTROL_QW_WRITE | 1367 PIPE_CONTROL_CS_STALL); 1368 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1369 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1370 intel_ring_emit(signaller, signaller_req->seqno); 1371 intel_ring_emit(signaller, 0); 1372 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1373 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1374 intel_ring_emit(signaller, 0); 1375 } 1376 1377 return 0; 1378 } 1379 1380 static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, 1381 unsigned int num_dwords) 1382 { 1383 #define MBOX_UPDATE_DWORDS 6 1384 struct intel_engine_cs *signaller = signaller_req->engine; 1385 struct drm_i915_private *dev_priv = signaller_req->i915; 1386 struct intel_engine_cs *waiter; 1387 enum intel_engine_id id; 1388 int ret, num_rings; 1389 1390 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1391 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1392 #undef MBOX_UPDATE_DWORDS 1393 1394 ret = intel_ring_begin(signaller_req, num_dwords); 1395 if (ret) 1396 return ret; 1397 1398 for_each_engine_id(waiter, dev_priv, id) { 1399 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1400 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1401 continue; 1402 1403 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1404 MI_FLUSH_DW_OP_STOREDW); 1405 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1406 MI_FLUSH_DW_USE_GTT); 1407 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1408 intel_ring_emit(signaller, signaller_req->seqno); 1409 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1410 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1411 intel_ring_emit(signaller, 0); 1412 } 1413 1414 return 0; 1415 } 1416 1417 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1418 unsigned int num_dwords) 1419 { 1420 struct intel_engine_cs *signaller = signaller_req->engine; 1421 struct drm_i915_private *dev_priv = signaller_req->i915; 1422 struct intel_engine_cs *useless; 1423 enum intel_engine_id id; 1424 int ret, num_rings; 1425 1426 #define MBOX_UPDATE_DWORDS 3 1427 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); 1428 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1429 #undef MBOX_UPDATE_DWORDS 1430 1431 ret = intel_ring_begin(signaller_req, num_dwords); 1432 if (ret) 1433 return ret; 1434 1435 for_each_engine_id(useless, dev_priv, id) { 1436 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; 1437 1438 if (i915_mmio_reg_valid(mbox_reg)) { 1439 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1440 intel_ring_emit_reg(signaller, mbox_reg); 1441 intel_ring_emit(signaller, signaller_req->seqno); 1442 } 1443 } 1444 1445 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1446 if (num_rings % 2 == 0) 1447 intel_ring_emit(signaller, MI_NOOP); 1448 1449 return 0; 1450 } 1451 1452 /** 1453 * gen6_add_request - Update the semaphore mailbox registers 1454 * 1455 * @request - request to write to the ring 1456 * 1457 * Update the mailbox registers in the *other* rings with the current seqno. 1458 * This acts like a signal in the canonical semaphore. 1459 */ 1460 static int 1461 gen6_add_request(struct drm_i915_gem_request *req) 1462 { 1463 struct intel_engine_cs *engine = req->engine; 1464 int ret; 1465 1466 if (engine->semaphore.signal) 1467 ret = engine->semaphore.signal(req, 4); 1468 else 1469 ret = intel_ring_begin(req, 4); 1470 1471 if (ret) 1472 return ret; 1473 1474 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1475 intel_ring_emit(engine, 1476 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1477 intel_ring_emit(engine, req->seqno); 1478 intel_ring_emit(engine, MI_USER_INTERRUPT); 1479 __intel_ring_advance(engine); 1480 1481 return 0; 1482 } 1483 1484 static int 1485 gen8_render_add_request(struct drm_i915_gem_request *req) 1486 { 1487 struct intel_engine_cs *engine = req->engine; 1488 int ret; 1489 1490 if (engine->semaphore.signal) 1491 ret = engine->semaphore.signal(req, 8); 1492 else 1493 ret = intel_ring_begin(req, 8); 1494 if (ret) 1495 return ret; 1496 1497 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); 1498 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | 1499 PIPE_CONTROL_CS_STALL | 1500 PIPE_CONTROL_QW_WRITE)); 1501 intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); 1502 intel_ring_emit(engine, 0); 1503 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1504 /* We're thrashing one dword of HWS. */ 1505 intel_ring_emit(engine, 0); 1506 intel_ring_emit(engine, MI_USER_INTERRUPT); 1507 intel_ring_emit(engine, MI_NOOP); 1508 __intel_ring_advance(engine); 1509 1510 return 0; 1511 } 1512 1513 static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, 1514 u32 seqno) 1515 { 1516 return dev_priv->last_seqno < seqno; 1517 } 1518 1519 /** 1520 * intel_ring_sync - sync the waiter to the signaller on seqno 1521 * 1522 * @waiter - ring that is waiting 1523 * @signaller - ring which has, or will signal 1524 * @seqno - seqno which the waiter will block on 1525 */ 1526 1527 static int 1528 gen8_ring_sync(struct drm_i915_gem_request *waiter_req, 1529 struct intel_engine_cs *signaller, 1530 u32 seqno) 1531 { 1532 struct intel_engine_cs *waiter = waiter_req->engine; 1533 struct drm_i915_private *dev_priv = waiter_req->i915; 1534 u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); 1535 struct i915_hw_ppgtt *ppgtt; 1536 int ret; 1537 1538 ret = intel_ring_begin(waiter_req, 4); 1539 if (ret) 1540 return ret; 1541 1542 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1543 MI_SEMAPHORE_GLOBAL_GTT | 1544 MI_SEMAPHORE_SAD_GTE_SDD); 1545 intel_ring_emit(waiter, seqno); 1546 intel_ring_emit(waiter, lower_32_bits(offset)); 1547 intel_ring_emit(waiter, upper_32_bits(offset)); 1548 intel_ring_advance(waiter); 1549 1550 /* When the !RCS engines idle waiting upon a semaphore, they lose their 1551 * pagetables and we must reload them before executing the batch. 1552 * We do this on the i915_switch_context() following the wait and 1553 * before the dispatch. 1554 */ 1555 ppgtt = waiter_req->ctx->ppgtt; 1556 if (ppgtt && waiter_req->engine->id != RCS) 1557 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); 1558 return 0; 1559 } 1560 1561 static int 1562 gen6_ring_sync(struct drm_i915_gem_request *waiter_req, 1563 struct intel_engine_cs *signaller, 1564 u32 seqno) 1565 { 1566 struct intel_engine_cs *waiter = waiter_req->engine; 1567 u32 dw1 = MI_SEMAPHORE_MBOX | 1568 MI_SEMAPHORE_COMPARE | 1569 MI_SEMAPHORE_REGISTER; 1570 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1571 int ret; 1572 1573 /* Throughout all of the GEM code, seqno passed implies our current 1574 * seqno is >= the last seqno executed. However for hardware the 1575 * comparison is strictly greater than. 1576 */ 1577 seqno -= 1; 1578 1579 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1580 1581 ret = intel_ring_begin(waiter_req, 4); 1582 if (ret) 1583 return ret; 1584 1585 /* If seqno wrap happened, omit the wait with no-ops */ 1586 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { 1587 intel_ring_emit(waiter, dw1 | wait_mbox); 1588 intel_ring_emit(waiter, seqno); 1589 intel_ring_emit(waiter, 0); 1590 intel_ring_emit(waiter, MI_NOOP); 1591 } else { 1592 intel_ring_emit(waiter, MI_NOOP); 1593 intel_ring_emit(waiter, MI_NOOP); 1594 intel_ring_emit(waiter, MI_NOOP); 1595 intel_ring_emit(waiter, MI_NOOP); 1596 } 1597 intel_ring_advance(waiter); 1598 1599 return 0; 1600 } 1601 1602 static void 1603 gen5_seqno_barrier(struct intel_engine_cs *ring) 1604 { 1605 /* MI_STORE are internally buffered by the GPU and not flushed 1606 * either by MI_FLUSH or SyncFlush or any other combination of 1607 * MI commands. 1608 * 1609 * "Only the submission of the store operation is guaranteed. 1610 * The write result will be complete (coherent) some time later 1611 * (this is practically a finite period but there is no guaranteed 1612 * latency)." 1613 * 1614 * Empirically, we observe that we need a delay of at least 75us to 1615 * be sure that the seqno write is visible by the CPU. 1616 */ 1617 usleep_range(125, 250); 1618 } 1619 1620 static void 1621 gen6_seqno_barrier(struct intel_engine_cs *engine) 1622 { 1623 struct drm_i915_private *dev_priv = engine->i915; 1624 1625 /* Workaround to force correct ordering between irq and seqno writes on 1626 * ivb (and maybe also on snb) by reading from a CS register (like 1627 * ACTHD) before reading the status page. 1628 * 1629 * Note that this effectively stalls the read by the time it takes to 1630 * do a memory transaction, which more or less ensures that the write 1631 * from the GPU has sufficient time to invalidate the CPU cacheline. 1632 * Alternatively we could delay the interrupt from the CS ring to give 1633 * the write time to land, but that would incur a delay after every 1634 * batch i.e. much more frequent than a delay when waiting for the 1635 * interrupt (with the same net latency). 1636 * 1637 * Also note that to prevent whole machine hangs on gen7, we have to 1638 * take the spinlock to guard against concurrent cacheline access. 1639 */ 1640 spin_lock_irq(&dev_priv->uncore.lock); 1641 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1642 spin_unlock_irq(&dev_priv->uncore.lock); 1643 } 1644 1645 static void 1646 gen5_irq_enable(struct intel_engine_cs *engine) 1647 { 1648 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); 1649 } 1650 1651 static void 1652 gen5_irq_disable(struct intel_engine_cs *engine) 1653 { 1654 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); 1655 } 1656 1657 static void 1658 i9xx_irq_enable(struct intel_engine_cs *engine) 1659 { 1660 struct drm_i915_private *dev_priv = engine->i915; 1661 1662 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1663 I915_WRITE(IMR, dev_priv->irq_mask); 1664 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1665 } 1666 1667 static void 1668 i9xx_irq_disable(struct intel_engine_cs *engine) 1669 { 1670 struct drm_i915_private *dev_priv = engine->i915; 1671 1672 dev_priv->irq_mask |= engine->irq_enable_mask; 1673 I915_WRITE(IMR, dev_priv->irq_mask); 1674 } 1675 1676 static void 1677 i8xx_irq_enable(struct intel_engine_cs *engine) 1678 { 1679 struct drm_i915_private *dev_priv = engine->i915; 1680 1681 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1682 I915_WRITE16(IMR, dev_priv->irq_mask); 1683 POSTING_READ16(RING_IMR(engine->mmio_base)); 1684 } 1685 1686 static void 1687 i8xx_irq_disable(struct intel_engine_cs *engine) 1688 { 1689 struct drm_i915_private *dev_priv = engine->i915; 1690 1691 dev_priv->irq_mask |= engine->irq_enable_mask; 1692 I915_WRITE16(IMR, dev_priv->irq_mask); 1693 } 1694 1695 static int 1696 bsd_ring_flush(struct drm_i915_gem_request *req, 1697 u32 invalidate_domains, 1698 u32 flush_domains) 1699 { 1700 struct intel_engine_cs *engine = req->engine; 1701 int ret; 1702 1703 ret = intel_ring_begin(req, 2); 1704 if (ret) 1705 return ret; 1706 1707 intel_ring_emit(engine, MI_FLUSH); 1708 intel_ring_emit(engine, MI_NOOP); 1709 intel_ring_advance(engine); 1710 return 0; 1711 } 1712 1713 static int 1714 i9xx_add_request(struct drm_i915_gem_request *req) 1715 { 1716 struct intel_engine_cs *engine = req->engine; 1717 int ret; 1718 1719 ret = intel_ring_begin(req, 4); 1720 if (ret) 1721 return ret; 1722 1723 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1724 intel_ring_emit(engine, 1725 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1726 intel_ring_emit(engine, req->seqno); 1727 intel_ring_emit(engine, MI_USER_INTERRUPT); 1728 __intel_ring_advance(engine); 1729 1730 return 0; 1731 } 1732 1733 static void 1734 gen6_irq_enable(struct intel_engine_cs *engine) 1735 { 1736 struct drm_i915_private *dev_priv = engine->i915; 1737 1738 I915_WRITE_IMR(engine, 1739 ~(engine->irq_enable_mask | 1740 engine->irq_keep_mask)); 1741 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1742 } 1743 1744 static void 1745 gen6_irq_disable(struct intel_engine_cs *engine) 1746 { 1747 struct drm_i915_private *dev_priv = engine->i915; 1748 1749 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1750 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1751 } 1752 1753 static void 1754 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1755 { 1756 struct drm_i915_private *dev_priv = engine->i915; 1757 1758 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1759 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); 1760 } 1761 1762 static void 1763 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1764 { 1765 struct drm_i915_private *dev_priv = engine->i915; 1766 1767 I915_WRITE_IMR(engine, ~0); 1768 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); 1769 } 1770 1771 static void 1772 gen8_irq_enable(struct intel_engine_cs *engine) 1773 { 1774 struct drm_i915_private *dev_priv = engine->i915; 1775 1776 I915_WRITE_IMR(engine, 1777 ~(engine->irq_enable_mask | 1778 engine->irq_keep_mask)); 1779 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1780 } 1781 1782 static void 1783 gen8_irq_disable(struct intel_engine_cs *engine) 1784 { 1785 struct drm_i915_private *dev_priv = engine->i915; 1786 1787 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1788 } 1789 1790 static int 1791 i965_dispatch_execbuffer(struct drm_i915_gem_request *req, 1792 u64 offset, u32 length, 1793 unsigned dispatch_flags) 1794 { 1795 struct intel_engine_cs *engine = req->engine; 1796 int ret; 1797 1798 ret = intel_ring_begin(req, 2); 1799 if (ret) 1800 return ret; 1801 1802 intel_ring_emit(engine, 1803 MI_BATCH_BUFFER_START | 1804 MI_BATCH_GTT | 1805 (dispatch_flags & I915_DISPATCH_SECURE ? 1806 0 : MI_BATCH_NON_SECURE_I965)); 1807 intel_ring_emit(engine, offset); 1808 intel_ring_advance(engine); 1809 1810 return 0; 1811 } 1812 1813 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1814 #define I830_BATCH_LIMIT (256*1024) 1815 #define I830_TLB_ENTRIES (2) 1816 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1817 static int 1818 i830_dispatch_execbuffer(struct drm_i915_gem_request *req, 1819 u64 offset, u32 len, 1820 unsigned dispatch_flags) 1821 { 1822 struct intel_engine_cs *engine = req->engine; 1823 u32 cs_offset = engine->scratch.gtt_offset; 1824 int ret; 1825 1826 ret = intel_ring_begin(req, 6); 1827 if (ret) 1828 return ret; 1829 1830 /* Evict the invalid PTE TLBs */ 1831 intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1832 intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1833 intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1834 intel_ring_emit(engine, cs_offset); 1835 intel_ring_emit(engine, 0xdeadbeef); 1836 intel_ring_emit(engine, MI_NOOP); 1837 intel_ring_advance(engine); 1838 1839 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1840 if (len > I830_BATCH_LIMIT) 1841 return -ENOSPC; 1842 1843 ret = intel_ring_begin(req, 6 + 2); 1844 if (ret) 1845 return ret; 1846 1847 /* Blit the batch (which has now all relocs applied) to the 1848 * stable batch scratch bo area (so that the CS never 1849 * stumbles over its tlb invalidation bug) ... 1850 */ 1851 intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1852 intel_ring_emit(engine, 1853 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1854 intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1855 intel_ring_emit(engine, cs_offset); 1856 intel_ring_emit(engine, 4096); 1857 intel_ring_emit(engine, offset); 1858 1859 intel_ring_emit(engine, MI_FLUSH); 1860 intel_ring_emit(engine, MI_NOOP); 1861 intel_ring_advance(engine); 1862 1863 /* ... and execute it. */ 1864 offset = cs_offset; 1865 } 1866 1867 ret = intel_ring_begin(req, 2); 1868 if (ret) 1869 return ret; 1870 1871 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1872 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1873 0 : MI_BATCH_NON_SECURE)); 1874 intel_ring_advance(engine); 1875 1876 return 0; 1877 } 1878 1879 static int 1880 i915_dispatch_execbuffer(struct drm_i915_gem_request *req, 1881 u64 offset, u32 len, 1882 unsigned dispatch_flags) 1883 { 1884 struct intel_engine_cs *engine = req->engine; 1885 int ret; 1886 1887 ret = intel_ring_begin(req, 2); 1888 if (ret) 1889 return ret; 1890 1891 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1892 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1893 0 : MI_BATCH_NON_SECURE)); 1894 intel_ring_advance(engine); 1895 1896 return 0; 1897 } 1898 1899 static void cleanup_phys_status_page(struct intel_engine_cs *engine) 1900 { 1901 struct drm_i915_private *dev_priv = engine->i915; 1902 1903 if (!dev_priv->status_page_dmah) 1904 return; 1905 1906 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); 1907 engine->status_page.page_addr = NULL; 1908 } 1909 1910 static void cleanup_status_page(struct intel_engine_cs *engine) 1911 { 1912 struct drm_i915_gem_object *obj; 1913 1914 obj = engine->status_page.obj; 1915 if (obj == NULL) 1916 return; 1917 1918 kunmap(sg_page(obj->pages->sgl)); 1919 i915_gem_object_ggtt_unpin(obj); 1920 drm_gem_object_unreference(&obj->base); 1921 engine->status_page.obj = NULL; 1922 } 1923 1924 static int init_status_page(struct intel_engine_cs *engine) 1925 { 1926 struct drm_i915_gem_object *obj = engine->status_page.obj; 1927 1928 if (obj == NULL) { 1929 unsigned flags; 1930 int ret; 1931 1932 obj = i915_gem_object_create(&engine->i915->drm, 4096); 1933 if (IS_ERR(obj)) { 1934 DRM_ERROR("Failed to allocate status page\n"); 1935 return PTR_ERR(obj); 1936 } 1937 1938 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1939 if (ret) 1940 goto err_unref; 1941 1942 flags = 0; 1943 if (!HAS_LLC(engine->i915)) 1944 /* On g33, we cannot place HWS above 256MiB, so 1945 * restrict its pinning to the low mappable arena. 1946 * Though this restriction is not documented for 1947 * gen4, gen5, or byt, they also behave similarly 1948 * and hang if the HWS is placed at the top of the 1949 * GTT. To generalise, it appears that all !llc 1950 * platforms have issues with us placing the HWS 1951 * above the mappable region (even though we never 1952 * actualy map it). 1953 */ 1954 flags |= PIN_MAPPABLE; 1955 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1956 if (ret) { 1957 err_unref: 1958 drm_gem_object_unreference(&obj->base); 1959 return ret; 1960 } 1961 1962 engine->status_page.obj = obj; 1963 } 1964 1965 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1966 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1967 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 1968 1969 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1970 engine->name, engine->status_page.gfx_addr); 1971 1972 return 0; 1973 } 1974 1975 static int init_phys_status_page(struct intel_engine_cs *engine) 1976 { 1977 struct drm_i915_private *dev_priv = engine->i915; 1978 1979 if (!dev_priv->status_page_dmah) { 1980 dev_priv->status_page_dmah = 1981 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); 1982 if (!dev_priv->status_page_dmah) 1983 return -ENOMEM; 1984 } 1985 1986 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1987 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 1988 1989 return 0; 1990 } 1991 1992 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1993 { 1994 GEM_BUG_ON(ringbuf->vma == NULL); 1995 GEM_BUG_ON(ringbuf->virtual_start == NULL); 1996 1997 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 1998 i915_gem_object_unpin_map(ringbuf->obj); 1999 else 2000 i915_vma_unpin_iomap(ringbuf->vma); 2001 ringbuf->virtual_start = NULL; 2002 2003 i915_gem_object_ggtt_unpin(ringbuf->obj); 2004 ringbuf->vma = NULL; 2005 } 2006 2007 int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, 2008 struct intel_ringbuffer *ringbuf) 2009 { 2010 struct drm_i915_gem_object *obj = ringbuf->obj; 2011 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2012 unsigned flags = PIN_OFFSET_BIAS | 4096; 2013 void *addr; 2014 int ret; 2015 2016 if (HAS_LLC(dev_priv) && !obj->stolen) { 2017 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); 2018 if (ret) 2019 return ret; 2020 2021 ret = i915_gem_object_set_to_cpu_domain(obj, true); 2022 if (ret) 2023 goto err_unpin; 2024 2025 addr = i915_gem_object_pin_map(obj); 2026 if (IS_ERR(addr)) { 2027 ret = PTR_ERR(addr); 2028 goto err_unpin; 2029 } 2030 } else { 2031 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 2032 flags | PIN_MAPPABLE); 2033 if (ret) 2034 return ret; 2035 2036 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2037 if (ret) 2038 goto err_unpin; 2039 2040 /* Access through the GTT requires the device to be awake. */ 2041 assert_rpm_wakelock_held(dev_priv); 2042 2043 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); 2044 if (IS_ERR(addr)) { 2045 ret = PTR_ERR(addr); 2046 goto err_unpin; 2047 } 2048 } 2049 2050 ringbuf->virtual_start = addr; 2051 ringbuf->vma = i915_gem_obj_to_ggtt(obj); 2052 return 0; 2053 2054 err_unpin: 2055 i915_gem_object_ggtt_unpin(obj); 2056 return ret; 2057 } 2058 2059 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2060 { 2061 drm_gem_object_unreference(&ringbuf->obj->base); 2062 ringbuf->obj = NULL; 2063 } 2064 2065 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2066 struct intel_ringbuffer *ringbuf) 2067 { 2068 struct drm_i915_gem_object *obj; 2069 2070 obj = NULL; 2071 if (!HAS_LLC(dev)) 2072 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2073 if (obj == NULL) 2074 obj = i915_gem_object_create(dev, ringbuf->size); 2075 if (IS_ERR(obj)) 2076 return PTR_ERR(obj); 2077 2078 /* mark ring buffers as read-only from GPU side by default */ 2079 obj->gt_ro = 1; 2080 2081 ringbuf->obj = obj; 2082 2083 return 0; 2084 } 2085 2086 struct intel_ringbuffer * 2087 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) 2088 { 2089 struct intel_ringbuffer *ring; 2090 int ret; 2091 2092 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2093 if (ring == NULL) { 2094 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 2095 engine->name); 2096 return ERR_PTR(-ENOMEM); 2097 } 2098 2099 ring->engine = engine; 2100 list_add(&ring->link, &engine->buffers); 2101 2102 ring->size = size; 2103 /* Workaround an erratum on the i830 which causes a hang if 2104 * the TAIL pointer points to within the last 2 cachelines 2105 * of the buffer. 2106 */ 2107 ring->effective_size = size; 2108 if (IS_I830(engine->i915) || IS_845G(engine->i915)) 2109 ring->effective_size -= 2 * CACHELINE_BYTES; 2110 2111 ring->last_retired_head = -1; 2112 intel_ring_update_space(ring); 2113 2114 ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); 2115 if (ret) { 2116 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2117 engine->name, ret); 2118 list_del(&ring->link); 2119 kfree(ring); 2120 return ERR_PTR(ret); 2121 } 2122 2123 return ring; 2124 } 2125 2126 void 2127 intel_ringbuffer_free(struct intel_ringbuffer *ring) 2128 { 2129 intel_destroy_ringbuffer_obj(ring); 2130 list_del(&ring->link); 2131 kfree(ring); 2132 } 2133 2134 static int intel_ring_context_pin(struct i915_gem_context *ctx, 2135 struct intel_engine_cs *engine) 2136 { 2137 struct intel_context *ce = &ctx->engine[engine->id]; 2138 int ret; 2139 2140 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 2141 2142 if (ce->pin_count++) 2143 return 0; 2144 2145 if (ce->state) { 2146 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); 2147 if (ret) 2148 goto error; 2149 } 2150 2151 /* The kernel context is only used as a placeholder for flushing the 2152 * active context. It is never used for submitting user rendering and 2153 * as such never requires the golden render context, and so we can skip 2154 * emitting it when we switch to the kernel context. This is required 2155 * as during eviction we cannot allocate and pin the renderstate in 2156 * order to initialise the context. 2157 */ 2158 if (ctx == ctx->i915->kernel_context) 2159 ce->initialised = true; 2160 2161 i915_gem_context_reference(ctx); 2162 return 0; 2163 2164 error: 2165 ce->pin_count = 0; 2166 return ret; 2167 } 2168 2169 static void intel_ring_context_unpin(struct i915_gem_context *ctx, 2170 struct intel_engine_cs *engine) 2171 { 2172 struct intel_context *ce = &ctx->engine[engine->id]; 2173 2174 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 2175 2176 if (--ce->pin_count) 2177 return; 2178 2179 if (ce->state) 2180 i915_gem_object_ggtt_unpin(ce->state); 2181 2182 i915_gem_context_unreference(ctx); 2183 } 2184 2185 static int intel_init_ring_buffer(struct drm_device *dev, 2186 struct intel_engine_cs *engine) 2187 { 2188 struct drm_i915_private *dev_priv = to_i915(dev); 2189 struct intel_ringbuffer *ringbuf; 2190 int ret; 2191 2192 WARN_ON(engine->buffer); 2193 2194 engine->i915 = dev_priv; 2195 INIT_LIST_HEAD(&engine->active_list); 2196 INIT_LIST_HEAD(&engine->request_list); 2197 INIT_LIST_HEAD(&engine->execlist_queue); 2198 INIT_LIST_HEAD(&engine->buffers); 2199 i915_gem_batch_pool_init(dev, &engine->batch_pool); 2200 memset(engine->semaphore.sync_seqno, 0, 2201 sizeof(engine->semaphore.sync_seqno)); 2202 2203 ret = intel_engine_init_breadcrumbs(engine); 2204 if (ret) 2205 goto error; 2206 2207 /* We may need to do things with the shrinker which 2208 * require us to immediately switch back to the default 2209 * context. This can cause a problem as pinning the 2210 * default context also requires GTT space which may not 2211 * be available. To avoid this we always pin the default 2212 * context. 2213 */ 2214 ret = intel_ring_context_pin(dev_priv->kernel_context, engine); 2215 if (ret) 2216 goto error; 2217 2218 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2219 if (IS_ERR(ringbuf)) { 2220 ret = PTR_ERR(ringbuf); 2221 goto error; 2222 } 2223 engine->buffer = ringbuf; 2224 2225 if (I915_NEED_GFX_HWS(dev_priv)) { 2226 ret = init_status_page(engine); 2227 if (ret) 2228 goto error; 2229 } else { 2230 WARN_ON(engine->id != RCS); 2231 ret = init_phys_status_page(engine); 2232 if (ret) 2233 goto error; 2234 } 2235 2236 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); 2237 if (ret) { 2238 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2239 engine->name, ret); 2240 intel_destroy_ringbuffer_obj(ringbuf); 2241 goto error; 2242 } 2243 2244 ret = i915_cmd_parser_init_ring(engine); 2245 if (ret) 2246 goto error; 2247 2248 return 0; 2249 2250 error: 2251 intel_cleanup_engine(engine); 2252 return ret; 2253 } 2254 2255 void intel_cleanup_engine(struct intel_engine_cs *engine) 2256 { 2257 struct drm_i915_private *dev_priv; 2258 2259 if (!intel_engine_initialized(engine)) 2260 return; 2261 2262 dev_priv = engine->i915; 2263 2264 if (engine->buffer) { 2265 intel_stop_engine(engine); 2266 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2267 2268 intel_unpin_ringbuffer_obj(engine->buffer); 2269 intel_ringbuffer_free(engine->buffer); 2270 engine->buffer = NULL; 2271 } 2272 2273 if (engine->cleanup) 2274 engine->cleanup(engine); 2275 2276 if (I915_NEED_GFX_HWS(dev_priv)) { 2277 cleanup_status_page(engine); 2278 } else { 2279 WARN_ON(engine->id != RCS); 2280 cleanup_phys_status_page(engine); 2281 } 2282 2283 i915_cmd_parser_fini_ring(engine); 2284 i915_gem_batch_pool_fini(&engine->batch_pool); 2285 intel_engine_fini_breadcrumbs(engine); 2286 2287 intel_ring_context_unpin(dev_priv->kernel_context, engine); 2288 2289 engine->i915 = NULL; 2290 } 2291 2292 int intel_engine_idle(struct intel_engine_cs *engine) 2293 { 2294 struct drm_i915_gem_request *req; 2295 2296 /* Wait upon the last request to be completed */ 2297 if (list_empty(&engine->request_list)) 2298 return 0; 2299 2300 req = list_entry(engine->request_list.prev, 2301 struct drm_i915_gem_request, 2302 list); 2303 2304 /* Make sure we do not trigger any retires */ 2305 return __i915_wait_request(req, 2306 req->i915->mm.interruptible, 2307 NULL, NULL); 2308 } 2309 2310 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2311 { 2312 int ret; 2313 2314 /* Flush enough space to reduce the likelihood of waiting after 2315 * we start building the request - in which case we will just 2316 * have to repeat work. 2317 */ 2318 request->reserved_space += LEGACY_REQUEST_SIZE; 2319 2320 request->ringbuf = request->engine->buffer; 2321 2322 ret = intel_ring_begin(request, 0); 2323 if (ret) 2324 return ret; 2325 2326 request->reserved_space -= LEGACY_REQUEST_SIZE; 2327 return 0; 2328 } 2329 2330 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2331 { 2332 struct intel_ringbuffer *ringbuf = req->ringbuf; 2333 struct intel_engine_cs *engine = req->engine; 2334 struct drm_i915_gem_request *target; 2335 2336 intel_ring_update_space(ringbuf); 2337 if (ringbuf->space >= bytes) 2338 return 0; 2339 2340 /* 2341 * Space is reserved in the ringbuffer for finalising the request, 2342 * as that cannot be allowed to fail. During request finalisation, 2343 * reserved_space is set to 0 to stop the overallocation and the 2344 * assumption is that then we never need to wait (which has the 2345 * risk of failing with EINTR). 2346 * 2347 * See also i915_gem_request_alloc() and i915_add_request(). 2348 */ 2349 GEM_BUG_ON(!req->reserved_space); 2350 2351 list_for_each_entry(target, &engine->request_list, list) { 2352 unsigned space; 2353 2354 /* 2355 * The request queue is per-engine, so can contain requests 2356 * from multiple ringbuffers. Here, we must ignore any that 2357 * aren't from the ringbuffer we're considering. 2358 */ 2359 if (target->ringbuf != ringbuf) 2360 continue; 2361 2362 /* Would completion of this request free enough space? */ 2363 space = __intel_ring_space(target->postfix, ringbuf->tail, 2364 ringbuf->size); 2365 if (space >= bytes) 2366 break; 2367 } 2368 2369 if (WARN_ON(&target->list == &engine->request_list)) 2370 return -ENOSPC; 2371 2372 return i915_wait_request(target); 2373 } 2374 2375 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2376 { 2377 struct intel_ringbuffer *ringbuf = req->ringbuf; 2378 int remain_actual = ringbuf->size - ringbuf->tail; 2379 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2380 int bytes = num_dwords * sizeof(u32); 2381 int total_bytes, wait_bytes; 2382 bool need_wrap = false; 2383 2384 total_bytes = bytes + req->reserved_space; 2385 2386 if (unlikely(bytes > remain_usable)) { 2387 /* 2388 * Not enough space for the basic request. So need to flush 2389 * out the remainder and then wait for base + reserved. 2390 */ 2391 wait_bytes = remain_actual + total_bytes; 2392 need_wrap = true; 2393 } else if (unlikely(total_bytes > remain_usable)) { 2394 /* 2395 * The base request will fit but the reserved space 2396 * falls off the end. So we don't need an immediate wrap 2397 * and only need to effectively wait for the reserved 2398 * size space from the start of ringbuffer. 2399 */ 2400 wait_bytes = remain_actual + req->reserved_space; 2401 } else { 2402 /* No wrapping required, just waiting. */ 2403 wait_bytes = total_bytes; 2404 } 2405 2406 if (wait_bytes > ringbuf->space) { 2407 int ret = wait_for_space(req, wait_bytes); 2408 if (unlikely(ret)) 2409 return ret; 2410 2411 intel_ring_update_space(ringbuf); 2412 if (unlikely(ringbuf->space < wait_bytes)) 2413 return -EAGAIN; 2414 } 2415 2416 if (unlikely(need_wrap)) { 2417 GEM_BUG_ON(remain_actual > ringbuf->space); 2418 GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); 2419 2420 /* Fill the tail with MI_NOOP */ 2421 memset(ringbuf->virtual_start + ringbuf->tail, 2422 0, remain_actual); 2423 ringbuf->tail = 0; 2424 ringbuf->space -= remain_actual; 2425 } 2426 2427 ringbuf->space -= bytes; 2428 GEM_BUG_ON(ringbuf->space < 0); 2429 return 0; 2430 } 2431 2432 /* Align the ring tail to a cacheline boundary */ 2433 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2434 { 2435 struct intel_engine_cs *engine = req->engine; 2436 int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2437 int ret; 2438 2439 if (num_dwords == 0) 2440 return 0; 2441 2442 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2443 ret = intel_ring_begin(req, num_dwords); 2444 if (ret) 2445 return ret; 2446 2447 while (num_dwords--) 2448 intel_ring_emit(engine, MI_NOOP); 2449 2450 intel_ring_advance(engine); 2451 2452 return 0; 2453 } 2454 2455 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2456 { 2457 struct drm_i915_private *dev_priv = engine->i915; 2458 2459 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2460 * so long as the semaphore value in the register/page is greater 2461 * than the sync value), so whenever we reset the seqno, 2462 * so long as we reset the tracking semaphore value to 0, it will 2463 * always be before the next request's seqno. If we don't reset 2464 * the semaphore value, then when the seqno moves backwards all 2465 * future waits will complete instantly (causing rendering corruption). 2466 */ 2467 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 2468 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2469 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2470 if (HAS_VEBOX(dev_priv)) 2471 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 2472 } 2473 if (dev_priv->semaphore_obj) { 2474 struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; 2475 struct page *page = i915_gem_object_get_dirty_page(obj, 0); 2476 void *semaphores = kmap(page); 2477 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 2478 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); 2479 kunmap(page); 2480 } 2481 memset(engine->semaphore.sync_seqno, 0, 2482 sizeof(engine->semaphore.sync_seqno)); 2483 2484 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 2485 if (engine->irq_seqno_barrier) 2486 engine->irq_seqno_barrier(engine); 2487 engine->last_submitted_seqno = seqno; 2488 2489 engine->hangcheck.seqno = seqno; 2490 2491 /* After manually advancing the seqno, fake the interrupt in case 2492 * there are any waiters for that seqno. 2493 */ 2494 rcu_read_lock(); 2495 intel_engine_wakeup(engine); 2496 rcu_read_unlock(); 2497 } 2498 2499 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2500 u32 value) 2501 { 2502 struct drm_i915_private *dev_priv = engine->i915; 2503 2504 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2505 2506 /* Every tail move must follow the sequence below */ 2507 2508 /* Disable notification that the ring is IDLE. The GT 2509 * will then assume that it is busy and bring it out of rc6. 2510 */ 2511 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2512 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2513 2514 /* Clear the context id. Here be magic! */ 2515 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 2516 2517 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2518 if (intel_wait_for_register_fw(dev_priv, 2519 GEN6_BSD_SLEEP_PSMI_CONTROL, 2520 GEN6_BSD_SLEEP_INDICATOR, 2521 0, 2522 50)) 2523 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2524 2525 /* Now that the ring is fully powered up, update the tail */ 2526 I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); 2527 POSTING_READ_FW(RING_TAIL(engine->mmio_base)); 2528 2529 /* Let the ring send IDLE messages to the GT again, 2530 * and so let it sleep to conserve power when idle. 2531 */ 2532 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2533 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2534 2535 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2536 } 2537 2538 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2539 u32 invalidate, u32 flush) 2540 { 2541 struct intel_engine_cs *engine = req->engine; 2542 uint32_t cmd; 2543 int ret; 2544 2545 ret = intel_ring_begin(req, 4); 2546 if (ret) 2547 return ret; 2548 2549 cmd = MI_FLUSH_DW; 2550 if (INTEL_GEN(req->i915) >= 8) 2551 cmd += 1; 2552 2553 /* We always require a command barrier so that subsequent 2554 * commands, such as breadcrumb interrupts, are strictly ordered 2555 * wrt the contents of the write cache being flushed to memory 2556 * (and thus being coherent from the CPU). 2557 */ 2558 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2559 2560 /* 2561 * Bspec vol 1c.5 - video engine command streamer: 2562 * "If ENABLED, all TLBs will be invalidated once the flush 2563 * operation is complete. This bit is only valid when the 2564 * Post-Sync Operation field is a value of 1h or 3h." 2565 */ 2566 if (invalidate & I915_GEM_GPU_DOMAINS) 2567 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2568 2569 intel_ring_emit(engine, cmd); 2570 intel_ring_emit(engine, 2571 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2572 if (INTEL_GEN(req->i915) >= 8) { 2573 intel_ring_emit(engine, 0); /* upper addr */ 2574 intel_ring_emit(engine, 0); /* value */ 2575 } else { 2576 intel_ring_emit(engine, 0); 2577 intel_ring_emit(engine, MI_NOOP); 2578 } 2579 intel_ring_advance(engine); 2580 return 0; 2581 } 2582 2583 static int 2584 gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2585 u64 offset, u32 len, 2586 unsigned dispatch_flags) 2587 { 2588 struct intel_engine_cs *engine = req->engine; 2589 bool ppgtt = USES_PPGTT(engine->dev) && 2590 !(dispatch_flags & I915_DISPATCH_SECURE); 2591 int ret; 2592 2593 ret = intel_ring_begin(req, 4); 2594 if (ret) 2595 return ret; 2596 2597 /* FIXME(BDW): Address space and security selectors. */ 2598 intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2599 (dispatch_flags & I915_DISPATCH_RS ? 2600 MI_BATCH_RESOURCE_STREAMER : 0)); 2601 intel_ring_emit(engine, lower_32_bits(offset)); 2602 intel_ring_emit(engine, upper_32_bits(offset)); 2603 intel_ring_emit(engine, MI_NOOP); 2604 intel_ring_advance(engine); 2605 2606 return 0; 2607 } 2608 2609 static int 2610 hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2611 u64 offset, u32 len, 2612 unsigned dispatch_flags) 2613 { 2614 struct intel_engine_cs *engine = req->engine; 2615 int ret; 2616 2617 ret = intel_ring_begin(req, 2); 2618 if (ret) 2619 return ret; 2620 2621 intel_ring_emit(engine, 2622 MI_BATCH_BUFFER_START | 2623 (dispatch_flags & I915_DISPATCH_SECURE ? 2624 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2625 (dispatch_flags & I915_DISPATCH_RS ? 2626 MI_BATCH_RESOURCE_STREAMER : 0)); 2627 /* bit0-7 is the length on GEN6+ */ 2628 intel_ring_emit(engine, offset); 2629 intel_ring_advance(engine); 2630 2631 return 0; 2632 } 2633 2634 static int 2635 gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2636 u64 offset, u32 len, 2637 unsigned dispatch_flags) 2638 { 2639 struct intel_engine_cs *engine = req->engine; 2640 int ret; 2641 2642 ret = intel_ring_begin(req, 2); 2643 if (ret) 2644 return ret; 2645 2646 intel_ring_emit(engine, 2647 MI_BATCH_BUFFER_START | 2648 (dispatch_flags & I915_DISPATCH_SECURE ? 2649 0 : MI_BATCH_NON_SECURE_I965)); 2650 /* bit0-7 is the length on GEN6+ */ 2651 intel_ring_emit(engine, offset); 2652 intel_ring_advance(engine); 2653 2654 return 0; 2655 } 2656 2657 /* Blitter support (SandyBridge+) */ 2658 2659 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2660 u32 invalidate, u32 flush) 2661 { 2662 struct intel_engine_cs *engine = req->engine; 2663 uint32_t cmd; 2664 int ret; 2665 2666 ret = intel_ring_begin(req, 4); 2667 if (ret) 2668 return ret; 2669 2670 cmd = MI_FLUSH_DW; 2671 if (INTEL_GEN(req->i915) >= 8) 2672 cmd += 1; 2673 2674 /* We always require a command barrier so that subsequent 2675 * commands, such as breadcrumb interrupts, are strictly ordered 2676 * wrt the contents of the write cache being flushed to memory 2677 * (and thus being coherent from the CPU). 2678 */ 2679 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2680 2681 /* 2682 * Bspec vol 1c.3 - blitter engine command streamer: 2683 * "If ENABLED, all TLBs will be invalidated once the flush 2684 * operation is complete. This bit is only valid when the 2685 * Post-Sync Operation field is a value of 1h or 3h." 2686 */ 2687 if (invalidate & I915_GEM_DOMAIN_RENDER) 2688 cmd |= MI_INVALIDATE_TLB; 2689 intel_ring_emit(engine, cmd); 2690 intel_ring_emit(engine, 2691 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2692 if (INTEL_GEN(req->i915) >= 8) { 2693 intel_ring_emit(engine, 0); /* upper addr */ 2694 intel_ring_emit(engine, 0); /* value */ 2695 } else { 2696 intel_ring_emit(engine, 0); 2697 intel_ring_emit(engine, MI_NOOP); 2698 } 2699 intel_ring_advance(engine); 2700 2701 return 0; 2702 } 2703 2704 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, 2705 struct intel_engine_cs *engine) 2706 { 2707 struct drm_i915_gem_object *obj; 2708 int ret, i; 2709 2710 if (!i915_semaphore_is_enabled(dev_priv)) 2711 return; 2712 2713 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { 2714 obj = i915_gem_object_create(&dev_priv->drm, 4096); 2715 if (IS_ERR(obj)) { 2716 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2717 i915.semaphores = 0; 2718 } else { 2719 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2720 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2721 if (ret != 0) { 2722 drm_gem_object_unreference(&obj->base); 2723 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2724 i915.semaphores = 0; 2725 } else { 2726 dev_priv->semaphore_obj = obj; 2727 } 2728 } 2729 } 2730 2731 if (!i915_semaphore_is_enabled(dev_priv)) 2732 return; 2733 2734 if (INTEL_GEN(dev_priv) >= 8) { 2735 u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); 2736 2737 engine->semaphore.sync_to = gen8_ring_sync; 2738 engine->semaphore.signal = gen8_xcs_signal; 2739 2740 for (i = 0; i < I915_NUM_ENGINES; i++) { 2741 u64 ring_offset; 2742 2743 if (i != engine->id) 2744 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); 2745 else 2746 ring_offset = MI_SEMAPHORE_SYNC_INVALID; 2747 2748 engine->semaphore.signal_ggtt[i] = ring_offset; 2749 } 2750 } else if (INTEL_GEN(dev_priv) >= 6) { 2751 engine->semaphore.sync_to = gen6_ring_sync; 2752 engine->semaphore.signal = gen6_signal; 2753 2754 /* 2755 * The current semaphore is only applied on pre-gen8 2756 * platform. And there is no VCS2 ring on the pre-gen8 2757 * platform. So the semaphore between RCS and VCS2 is 2758 * initialized as INVALID. Gen8 will initialize the 2759 * sema between VCS2 and RCS later. 2760 */ 2761 for (i = 0; i < I915_NUM_ENGINES; i++) { 2762 static const struct { 2763 u32 wait_mbox; 2764 i915_reg_t mbox_reg; 2765 } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { 2766 [RCS] = { 2767 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, 2768 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, 2769 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, 2770 }, 2771 [VCS] = { 2772 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, 2773 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, 2774 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, 2775 }, 2776 [BCS] = { 2777 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, 2778 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, 2779 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, 2780 }, 2781 [VECS] = { 2782 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, 2783 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, 2784 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, 2785 }, 2786 }; 2787 u32 wait_mbox; 2788 i915_reg_t mbox_reg; 2789 2790 if (i == engine->id || i == VCS2) { 2791 wait_mbox = MI_SEMAPHORE_SYNC_INVALID; 2792 mbox_reg = GEN6_NOSYNC; 2793 } else { 2794 wait_mbox = sem_data[engine->id][i].wait_mbox; 2795 mbox_reg = sem_data[engine->id][i].mbox_reg; 2796 } 2797 2798 engine->semaphore.mbox.wait[i] = wait_mbox; 2799 engine->semaphore.mbox.signal[i] = mbox_reg; 2800 } 2801 } 2802 } 2803 2804 static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2805 struct intel_engine_cs *engine) 2806 { 2807 if (INTEL_GEN(dev_priv) >= 8) { 2808 engine->irq_enable = gen8_irq_enable; 2809 engine->irq_disable = gen8_irq_disable; 2810 engine->irq_seqno_barrier = gen6_seqno_barrier; 2811 } else if (INTEL_GEN(dev_priv) >= 6) { 2812 engine->irq_enable = gen6_irq_enable; 2813 engine->irq_disable = gen6_irq_disable; 2814 engine->irq_seqno_barrier = gen6_seqno_barrier; 2815 } else if (INTEL_GEN(dev_priv) >= 5) { 2816 engine->irq_enable = gen5_irq_enable; 2817 engine->irq_disable = gen5_irq_disable; 2818 engine->irq_seqno_barrier = gen5_seqno_barrier; 2819 } else if (INTEL_GEN(dev_priv) >= 3) { 2820 engine->irq_enable = i9xx_irq_enable; 2821 engine->irq_disable = i9xx_irq_disable; 2822 } else { 2823 engine->irq_enable = i8xx_irq_enable; 2824 engine->irq_disable = i8xx_irq_disable; 2825 } 2826 } 2827 2828 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2829 struct intel_engine_cs *engine) 2830 { 2831 engine->init_hw = init_ring_common; 2832 engine->write_tail = ring_write_tail; 2833 2834 engine->add_request = i9xx_add_request; 2835 if (INTEL_GEN(dev_priv) >= 6) 2836 engine->add_request = gen6_add_request; 2837 2838 if (INTEL_GEN(dev_priv) >= 8) 2839 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2840 else if (INTEL_GEN(dev_priv) >= 6) 2841 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2842 else if (INTEL_GEN(dev_priv) >= 4) 2843 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 2844 else if (IS_I830(dev_priv) || IS_845G(dev_priv)) 2845 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 2846 else 2847 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 2848 2849 intel_ring_init_irq(dev_priv, engine); 2850 intel_ring_init_semaphores(dev_priv, engine); 2851 } 2852 2853 int intel_init_render_ring_buffer(struct drm_device *dev) 2854 { 2855 struct drm_i915_private *dev_priv = to_i915(dev); 2856 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 2857 int ret; 2858 2859 engine->name = "render ring"; 2860 engine->id = RCS; 2861 engine->exec_id = I915_EXEC_RENDER; 2862 engine->hw_id = 0; 2863 engine->mmio_base = RENDER_RING_BASE; 2864 2865 intel_ring_default_vfuncs(dev_priv, engine); 2866 2867 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2868 if (HAS_L3_DPF(dev_priv)) 2869 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2870 2871 if (INTEL_GEN(dev_priv) >= 8) { 2872 engine->init_context = intel_rcs_ctx_init; 2873 engine->add_request = gen8_render_add_request; 2874 engine->flush = gen8_render_ring_flush; 2875 if (i915_semaphore_is_enabled(dev_priv)) 2876 engine->semaphore.signal = gen8_rcs_signal; 2877 } else if (INTEL_GEN(dev_priv) >= 6) { 2878 engine->init_context = intel_rcs_ctx_init; 2879 engine->flush = gen7_render_ring_flush; 2880 if (IS_GEN6(dev_priv)) 2881 engine->flush = gen6_render_ring_flush; 2882 } else if (IS_GEN5(dev_priv)) { 2883 engine->flush = gen4_render_ring_flush; 2884 } else { 2885 if (INTEL_GEN(dev_priv) < 4) 2886 engine->flush = gen2_render_ring_flush; 2887 else 2888 engine->flush = gen4_render_ring_flush; 2889 engine->irq_enable_mask = I915_USER_INTERRUPT; 2890 } 2891 2892 if (IS_HASWELL(dev_priv)) 2893 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2894 2895 engine->init_hw = init_render_ring; 2896 engine->cleanup = render_ring_cleanup; 2897 2898 ret = intel_init_ring_buffer(dev, engine); 2899 if (ret) 2900 return ret; 2901 2902 if (INTEL_GEN(dev_priv) >= 6) { 2903 ret = intel_init_pipe_control(engine, 4096); 2904 if (ret) 2905 return ret; 2906 } else if (HAS_BROKEN_CS_TLB(dev_priv)) { 2907 ret = intel_init_pipe_control(engine, I830_WA_SIZE); 2908 if (ret) 2909 return ret; 2910 } 2911 2912 return 0; 2913 } 2914 2915 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2916 { 2917 struct drm_i915_private *dev_priv = to_i915(dev); 2918 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2919 2920 engine->name = "bsd ring"; 2921 engine->id = VCS; 2922 engine->exec_id = I915_EXEC_BSD; 2923 engine->hw_id = 1; 2924 2925 intel_ring_default_vfuncs(dev_priv, engine); 2926 2927 if (INTEL_GEN(dev_priv) >= 6) { 2928 engine->mmio_base = GEN6_BSD_RING_BASE; 2929 /* gen6 bsd needs a special wa for tail updates */ 2930 if (IS_GEN6(dev_priv)) 2931 engine->write_tail = gen6_bsd_ring_write_tail; 2932 engine->flush = gen6_bsd_ring_flush; 2933 if (INTEL_GEN(dev_priv) >= 8) 2934 engine->irq_enable_mask = 2935 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2936 else 2937 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2938 } else { 2939 engine->mmio_base = BSD_RING_BASE; 2940 engine->flush = bsd_ring_flush; 2941 if (IS_GEN5(dev_priv)) 2942 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2943 else 2944 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2945 } 2946 2947 return intel_init_ring_buffer(dev, engine); 2948 } 2949 2950 /** 2951 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 2952 */ 2953 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2954 { 2955 struct drm_i915_private *dev_priv = to_i915(dev); 2956 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 2957 2958 engine->name = "bsd2 ring"; 2959 engine->id = VCS2; 2960 engine->exec_id = I915_EXEC_BSD; 2961 engine->hw_id = 4; 2962 engine->mmio_base = GEN8_BSD2_RING_BASE; 2963 2964 intel_ring_default_vfuncs(dev_priv, engine); 2965 2966 engine->flush = gen6_bsd_ring_flush; 2967 engine->irq_enable_mask = 2968 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2969 2970 return intel_init_ring_buffer(dev, engine); 2971 } 2972 2973 int intel_init_blt_ring_buffer(struct drm_device *dev) 2974 { 2975 struct drm_i915_private *dev_priv = to_i915(dev); 2976 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 2977 2978 engine->name = "blitter ring"; 2979 engine->id = BCS; 2980 engine->exec_id = I915_EXEC_BLT; 2981 engine->hw_id = 2; 2982 engine->mmio_base = BLT_RING_BASE; 2983 2984 intel_ring_default_vfuncs(dev_priv, engine); 2985 2986 engine->flush = gen6_ring_flush; 2987 if (INTEL_GEN(dev_priv) >= 8) 2988 engine->irq_enable_mask = 2989 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2990 else 2991 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2992 2993 return intel_init_ring_buffer(dev, engine); 2994 } 2995 2996 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2997 { 2998 struct drm_i915_private *dev_priv = to_i915(dev); 2999 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 3000 3001 engine->name = "video enhancement ring"; 3002 engine->id = VECS; 3003 engine->exec_id = I915_EXEC_VEBOX; 3004 engine->hw_id = 3; 3005 engine->mmio_base = VEBOX_RING_BASE; 3006 3007 intel_ring_default_vfuncs(dev_priv, engine); 3008 3009 engine->flush = gen6_ring_flush; 3010 3011 if (INTEL_GEN(dev_priv) >= 8) { 3012 engine->irq_enable_mask = 3013 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3014 } else { 3015 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3016 engine->irq_enable = hsw_vebox_irq_enable; 3017 engine->irq_disable = hsw_vebox_irq_disable; 3018 } 3019 3020 return intel_init_ring_buffer(dev, engine); 3021 } 3022 3023 int 3024 intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 3025 { 3026 struct intel_engine_cs *engine = req->engine; 3027 int ret; 3028 3029 if (!engine->gpu_caches_dirty) 3030 return 0; 3031 3032 ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); 3033 if (ret) 3034 return ret; 3035 3036 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); 3037 3038 engine->gpu_caches_dirty = false; 3039 return 0; 3040 } 3041 3042 int 3043 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 3044 { 3045 struct intel_engine_cs *engine = req->engine; 3046 uint32_t flush_domains; 3047 int ret; 3048 3049 flush_domains = 0; 3050 if (engine->gpu_caches_dirty) 3051 flush_domains = I915_GEM_GPU_DOMAINS; 3052 3053 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3054 if (ret) 3055 return ret; 3056 3057 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3058 3059 engine->gpu_caches_dirty = false; 3060 return 0; 3061 } 3062 3063 void 3064 intel_stop_engine(struct intel_engine_cs *engine) 3065 { 3066 int ret; 3067 3068 if (!intel_engine_initialized(engine)) 3069 return; 3070 3071 ret = intel_engine_idle(engine); 3072 if (ret) 3073 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 3074 engine->name, ret); 3075 3076 stop_ring(engine); 3077 } 3078