1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 int __intel_ring_space(int head, int tail, int size) 38 { 39 int space = head - tail; 40 if (space <= 0) 41 space += size; 42 return space - I915_RING_FREE_SPACE; 43 } 44 45 void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 46 { 47 if (ringbuf->last_retired_head != -1) { 48 ringbuf->head = ringbuf->last_retired_head; 49 ringbuf->last_retired_head = -1; 50 } 51 52 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, 53 ringbuf->tail, ringbuf->size); 54 } 55 56 bool intel_engine_stopped(struct intel_engine_cs *engine) 57 { 58 struct drm_i915_private *dev_priv = engine->dev->dev_private; 59 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); 60 } 61 62 static void __intel_ring_advance(struct intel_engine_cs *engine) 63 { 64 struct intel_ringbuffer *ringbuf = engine->buffer; 65 ringbuf->tail &= ringbuf->size - 1; 66 if (intel_engine_stopped(engine)) 67 return; 68 engine->write_tail(engine, ringbuf->tail); 69 } 70 71 static int 72 gen2_render_ring_flush(struct drm_i915_gem_request *req, 73 u32 invalidate_domains, 74 u32 flush_domains) 75 { 76 struct intel_engine_cs *engine = req->engine; 77 u32 cmd; 78 int ret; 79 80 cmd = MI_FLUSH; 81 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 82 cmd |= MI_NO_WRITE_FLUSH; 83 84 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 85 cmd |= MI_READ_FLUSH; 86 87 ret = intel_ring_begin(req, 2); 88 if (ret) 89 return ret; 90 91 intel_ring_emit(engine, cmd); 92 intel_ring_emit(engine, MI_NOOP); 93 intel_ring_advance(engine); 94 95 return 0; 96 } 97 98 static int 99 gen4_render_ring_flush(struct drm_i915_gem_request *req, 100 u32 invalidate_domains, 101 u32 flush_domains) 102 { 103 struct intel_engine_cs *engine = req->engine; 104 struct drm_device *dev = engine->dev; 105 u32 cmd; 106 int ret; 107 108 /* 109 * read/write caches: 110 * 111 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 112 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 113 * also flushed at 2d versus 3d pipeline switches. 114 * 115 * read-only caches: 116 * 117 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 118 * MI_READ_FLUSH is set, and is always flushed on 965. 119 * 120 * I915_GEM_DOMAIN_COMMAND may not exist? 121 * 122 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 123 * invalidated when MI_EXE_FLUSH is set. 124 * 125 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 126 * invalidated with every MI_FLUSH. 127 * 128 * TLBs: 129 * 130 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 131 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 132 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 133 * are flushed at any MI_FLUSH. 134 */ 135 136 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 137 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 138 cmd &= ~MI_NO_WRITE_FLUSH; 139 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 140 cmd |= MI_EXE_FLUSH; 141 142 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 143 (IS_G4X(dev) || IS_GEN5(dev))) 144 cmd |= MI_INVALIDATE_ISP; 145 146 ret = intel_ring_begin(req, 2); 147 if (ret) 148 return ret; 149 150 intel_ring_emit(engine, cmd); 151 intel_ring_emit(engine, MI_NOOP); 152 intel_ring_advance(engine); 153 154 return 0; 155 } 156 157 /** 158 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 159 * implementing two workarounds on gen6. From section 1.4.7.1 160 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 161 * 162 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 163 * produced by non-pipelined state commands), software needs to first 164 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 165 * 0. 166 * 167 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 168 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 169 * 170 * And the workaround for these two requires this workaround first: 171 * 172 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 173 * BEFORE the pipe-control with a post-sync op and no write-cache 174 * flushes. 175 * 176 * And this last workaround is tricky because of the requirements on 177 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 178 * volume 2 part 1: 179 * 180 * "1 of the following must also be set: 181 * - Render Target Cache Flush Enable ([12] of DW1) 182 * - Depth Cache Flush Enable ([0] of DW1) 183 * - Stall at Pixel Scoreboard ([1] of DW1) 184 * - Depth Stall ([13] of DW1) 185 * - Post-Sync Operation ([13] of DW1) 186 * - Notify Enable ([8] of DW1)" 187 * 188 * The cache flushes require the workaround flush that triggered this 189 * one, so we can't use it. Depth stall would trigger the same. 190 * Post-sync nonzero is what triggered this second workaround, so we 191 * can't use that one either. Notify enable is IRQs, which aren't 192 * really our business. That leaves only stall at scoreboard. 193 */ 194 static int 195 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 196 { 197 struct intel_engine_cs *engine = req->engine; 198 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 199 int ret; 200 201 ret = intel_ring_begin(req, 6); 202 if (ret) 203 return ret; 204 205 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 206 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 207 PIPE_CONTROL_STALL_AT_SCOREBOARD); 208 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 209 intel_ring_emit(engine, 0); /* low dword */ 210 intel_ring_emit(engine, 0); /* high dword */ 211 intel_ring_emit(engine, MI_NOOP); 212 intel_ring_advance(engine); 213 214 ret = intel_ring_begin(req, 6); 215 if (ret) 216 return ret; 217 218 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); 219 intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); 220 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 221 intel_ring_emit(engine, 0); 222 intel_ring_emit(engine, 0); 223 intel_ring_emit(engine, MI_NOOP); 224 intel_ring_advance(engine); 225 226 return 0; 227 } 228 229 static int 230 gen6_render_ring_flush(struct drm_i915_gem_request *req, 231 u32 invalidate_domains, u32 flush_domains) 232 { 233 struct intel_engine_cs *engine = req->engine; 234 u32 flags = 0; 235 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 236 int ret; 237 238 /* Force SNB workarounds for PIPE_CONTROL flushes */ 239 ret = intel_emit_post_sync_nonzero_flush(req); 240 if (ret) 241 return ret; 242 243 /* Just flush everything. Experiments have shown that reducing the 244 * number of bits based on the write domains has little performance 245 * impact. 246 */ 247 if (flush_domains) { 248 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 249 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 250 /* 251 * Ensure that any following seqno writes only happen 252 * when the render cache is indeed flushed. 253 */ 254 flags |= PIPE_CONTROL_CS_STALL; 255 } 256 if (invalidate_domains) { 257 flags |= PIPE_CONTROL_TLB_INVALIDATE; 258 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 260 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 261 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 262 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 263 /* 264 * TLB invalidate requires a post-sync write. 265 */ 266 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 267 } 268 269 ret = intel_ring_begin(req, 4); 270 if (ret) 271 return ret; 272 273 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 274 intel_ring_emit(engine, flags); 275 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 276 intel_ring_emit(engine, 0); 277 intel_ring_advance(engine); 278 279 return 0; 280 } 281 282 static int 283 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 284 { 285 struct intel_engine_cs *engine = req->engine; 286 int ret; 287 288 ret = intel_ring_begin(req, 4); 289 if (ret) 290 return ret; 291 292 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 293 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | 294 PIPE_CONTROL_STALL_AT_SCOREBOARD); 295 intel_ring_emit(engine, 0); 296 intel_ring_emit(engine, 0); 297 intel_ring_advance(engine); 298 299 return 0; 300 } 301 302 static int 303 gen7_render_ring_flush(struct drm_i915_gem_request *req, 304 u32 invalidate_domains, u32 flush_domains) 305 { 306 struct intel_engine_cs *engine = req->engine; 307 u32 flags = 0; 308 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 309 int ret; 310 311 /* 312 * Ensure that any following seqno writes only happen when the render 313 * cache is indeed flushed. 314 * 315 * Workaround: 4th PIPE_CONTROL command (except the ones with only 316 * read-cache invalidate bits set) must have the CS_STALL bit set. We 317 * don't try to be clever and just set it unconditionally. 318 */ 319 flags |= PIPE_CONTROL_CS_STALL; 320 321 /* Just flush everything. Experiments have shown that reducing the 322 * number of bits based on the write domains has little performance 323 * impact. 324 */ 325 if (flush_domains) { 326 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 327 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 328 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 329 flags |= PIPE_CONTROL_FLUSH_ENABLE; 330 } 331 if (invalidate_domains) { 332 flags |= PIPE_CONTROL_TLB_INVALIDATE; 333 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 334 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 335 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 336 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 337 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 338 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 339 /* 340 * TLB invalidate requires a post-sync write. 341 */ 342 flags |= PIPE_CONTROL_QW_WRITE; 343 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 344 345 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 346 347 /* Workaround: we must issue a pipe_control with CS-stall bit 348 * set before a pipe_control command that has the state cache 349 * invalidate bit set. */ 350 gen7_render_ring_cs_stall_wa(req); 351 } 352 353 ret = intel_ring_begin(req, 4); 354 if (ret) 355 return ret; 356 357 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); 358 intel_ring_emit(engine, flags); 359 intel_ring_emit(engine, scratch_addr); 360 intel_ring_emit(engine, 0); 361 intel_ring_advance(engine); 362 363 return 0; 364 } 365 366 static int 367 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 368 u32 flags, u32 scratch_addr) 369 { 370 struct intel_engine_cs *engine = req->engine; 371 int ret; 372 373 ret = intel_ring_begin(req, 6); 374 if (ret) 375 return ret; 376 377 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); 378 intel_ring_emit(engine, flags); 379 intel_ring_emit(engine, scratch_addr); 380 intel_ring_emit(engine, 0); 381 intel_ring_emit(engine, 0); 382 intel_ring_emit(engine, 0); 383 intel_ring_advance(engine); 384 385 return 0; 386 } 387 388 static int 389 gen8_render_ring_flush(struct drm_i915_gem_request *req, 390 u32 invalidate_domains, u32 flush_domains) 391 { 392 u32 flags = 0; 393 u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 394 int ret; 395 396 flags |= PIPE_CONTROL_CS_STALL; 397 398 if (flush_domains) { 399 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 400 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 401 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 402 flags |= PIPE_CONTROL_FLUSH_ENABLE; 403 } 404 if (invalidate_domains) { 405 flags |= PIPE_CONTROL_TLB_INVALIDATE; 406 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 407 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 408 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 409 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 410 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 411 flags |= PIPE_CONTROL_QW_WRITE; 412 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 413 414 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 415 ret = gen8_emit_pipe_control(req, 416 PIPE_CONTROL_CS_STALL | 417 PIPE_CONTROL_STALL_AT_SCOREBOARD, 418 0); 419 if (ret) 420 return ret; 421 } 422 423 return gen8_emit_pipe_control(req, flags, scratch_addr); 424 } 425 426 static void ring_write_tail(struct intel_engine_cs *engine, 427 u32 value) 428 { 429 struct drm_i915_private *dev_priv = engine->dev->dev_private; 430 I915_WRITE_TAIL(engine, value); 431 } 432 433 u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 434 { 435 struct drm_i915_private *dev_priv = engine->dev->dev_private; 436 u64 acthd; 437 438 if (INTEL_INFO(engine->dev)->gen >= 8) 439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 440 RING_ACTHD_UDW(engine->mmio_base)); 441 else if (INTEL_INFO(engine->dev)->gen >= 4) 442 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 443 else 444 acthd = I915_READ(ACTHD); 445 446 return acthd; 447 } 448 449 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 450 { 451 struct drm_i915_private *dev_priv = engine->dev->dev_private; 452 u32 addr; 453 454 addr = dev_priv->status_page_dmah->busaddr; 455 if (INTEL_INFO(engine->dev)->gen >= 4) 456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 457 I915_WRITE(HWS_PGA, addr); 458 } 459 460 static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 461 { 462 struct drm_device *dev = engine->dev; 463 struct drm_i915_private *dev_priv = engine->dev->dev_private; 464 i915_reg_t mmio; 465 466 /* The ring status page addresses are no longer next to the rest of 467 * the ring registers as of gen7. 468 */ 469 if (IS_GEN7(dev)) { 470 switch (engine->id) { 471 case RCS: 472 mmio = RENDER_HWS_PGA_GEN7; 473 break; 474 case BCS: 475 mmio = BLT_HWS_PGA_GEN7; 476 break; 477 /* 478 * VCS2 actually doesn't exist on Gen7. Only shut up 479 * gcc switch check warning 480 */ 481 case VCS2: 482 case VCS: 483 mmio = BSD_HWS_PGA_GEN7; 484 break; 485 case VECS: 486 mmio = VEBOX_HWS_PGA_GEN7; 487 break; 488 } 489 } else if (IS_GEN6(engine->dev)) { 490 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 491 } else { 492 /* XXX: gen8 returns to sanity */ 493 mmio = RING_HWS_PGA(engine->mmio_base); 494 } 495 496 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); 497 POSTING_READ(mmio); 498 499 /* 500 * Flush the TLB for this page 501 * 502 * FIXME: These two bits have disappeared on gen8, so a question 503 * arises: do we still need this and if so how should we go about 504 * invalidating the TLB? 505 */ 506 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 507 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 508 509 /* ring should be idle before issuing a sync flush*/ 510 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 511 512 I915_WRITE(reg, 513 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 514 INSTPM_SYNC_FLUSH)); 515 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 516 1000)) 517 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 518 engine->name); 519 } 520 } 521 522 static bool stop_ring(struct intel_engine_cs *engine) 523 { 524 struct drm_i915_private *dev_priv = to_i915(engine->dev); 525 526 if (!IS_GEN2(engine->dev)) { 527 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 528 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 529 DRM_ERROR("%s : timed out trying to stop ring\n", 530 engine->name); 531 /* Sometimes we observe that the idle flag is not 532 * set even though the ring is empty. So double 533 * check before giving up. 534 */ 535 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 536 return false; 537 } 538 } 539 540 I915_WRITE_CTL(engine, 0); 541 I915_WRITE_HEAD(engine, 0); 542 engine->write_tail(engine, 0); 543 544 if (!IS_GEN2(engine->dev)) { 545 (void)I915_READ_CTL(engine); 546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 547 } 548 549 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 550 } 551 552 void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 553 { 554 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 555 } 556 557 static int init_ring_common(struct intel_engine_cs *engine) 558 { 559 struct drm_device *dev = engine->dev; 560 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct intel_ringbuffer *ringbuf = engine->buffer; 562 struct drm_i915_gem_object *obj = ringbuf->obj; 563 int ret = 0; 564 565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 566 567 if (!stop_ring(engine)) { 568 /* G45 ring initialization often fails to reset head to zero */ 569 DRM_DEBUG_KMS("%s head not reset to zero " 570 "ctl %08x head %08x tail %08x start %08x\n", 571 engine->name, 572 I915_READ_CTL(engine), 573 I915_READ_HEAD(engine), 574 I915_READ_TAIL(engine), 575 I915_READ_START(engine)); 576 577 if (!stop_ring(engine)) { 578 DRM_ERROR("failed to set %s head to zero " 579 "ctl %08x head %08x tail %08x start %08x\n", 580 engine->name, 581 I915_READ_CTL(engine), 582 I915_READ_HEAD(engine), 583 I915_READ_TAIL(engine), 584 I915_READ_START(engine)); 585 ret = -EIO; 586 goto out; 587 } 588 } 589 590 if (I915_NEED_GFX_HWS(dev)) 591 intel_ring_setup_status_page(engine); 592 else 593 ring_setup_phys_status_page(engine); 594 595 /* Enforce ordering by reading HEAD register back */ 596 I915_READ_HEAD(engine); 597 598 /* Initialize the ring. This must happen _after_ we've cleared the ring 599 * registers with the above sequence (the readback of the HEAD registers 600 * also enforces ordering), otherwise the hw might lose the new ring 601 * register values. */ 602 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); 603 604 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 605 if (I915_READ_HEAD(engine)) 606 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 607 engine->name, I915_READ_HEAD(engine)); 608 I915_WRITE_HEAD(engine, 0); 609 (void)I915_READ_HEAD(engine); 610 611 I915_WRITE_CTL(engine, 612 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 613 | RING_VALID); 614 615 /* If the head is still not zero, the ring is dead */ 616 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && 617 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && 618 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { 619 DRM_ERROR("%s initialization failed " 620 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 621 engine->name, 622 I915_READ_CTL(engine), 623 I915_READ_CTL(engine) & RING_VALID, 624 I915_READ_HEAD(engine), I915_READ_TAIL(engine), 625 I915_READ_START(engine), 626 (unsigned long)i915_gem_obj_ggtt_offset(obj)); 627 ret = -EIO; 628 goto out; 629 } 630 631 ringbuf->last_retired_head = -1; 632 ringbuf->head = I915_READ_HEAD(engine); 633 ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; 634 intel_ring_update_space(ringbuf); 635 636 intel_engine_init_hangcheck(engine); 637 638 out: 639 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 640 641 return ret; 642 } 643 644 void 645 intel_fini_pipe_control(struct intel_engine_cs *engine) 646 { 647 struct drm_device *dev = engine->dev; 648 649 if (engine->scratch.obj == NULL) 650 return; 651 652 if (INTEL_INFO(dev)->gen >= 5) { 653 kunmap(sg_page(engine->scratch.obj->pages->sgl)); 654 i915_gem_object_ggtt_unpin(engine->scratch.obj); 655 } 656 657 drm_gem_object_unreference(&engine->scratch.obj->base); 658 engine->scratch.obj = NULL; 659 } 660 661 int 662 intel_init_pipe_control(struct intel_engine_cs *engine) 663 { 664 int ret; 665 666 WARN_ON(engine->scratch.obj); 667 668 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); 669 if (engine->scratch.obj == NULL) { 670 DRM_ERROR("Failed to allocate seqno page\n"); 671 ret = -ENOMEM; 672 goto err; 673 } 674 675 ret = i915_gem_object_set_cache_level(engine->scratch.obj, 676 I915_CACHE_LLC); 677 if (ret) 678 goto err_unref; 679 680 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); 681 if (ret) 682 goto err_unref; 683 684 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); 685 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); 686 if (engine->scratch.cpu_page == NULL) { 687 ret = -ENOMEM; 688 goto err_unpin; 689 } 690 691 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 692 engine->name, engine->scratch.gtt_offset); 693 return 0; 694 695 err_unpin: 696 i915_gem_object_ggtt_unpin(engine->scratch.obj); 697 err_unref: 698 drm_gem_object_unreference(&engine->scratch.obj->base); 699 err: 700 return ret; 701 } 702 703 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 704 { 705 int ret, i; 706 struct intel_engine_cs *engine = req->engine; 707 struct drm_device *dev = engine->dev; 708 struct drm_i915_private *dev_priv = dev->dev_private; 709 struct i915_workarounds *w = &dev_priv->workarounds; 710 711 if (w->count == 0) 712 return 0; 713 714 engine->gpu_caches_dirty = true; 715 ret = intel_ring_flush_all_caches(req); 716 if (ret) 717 return ret; 718 719 ret = intel_ring_begin(req, (w->count * 2 + 2)); 720 if (ret) 721 return ret; 722 723 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); 724 for (i = 0; i < w->count; i++) { 725 intel_ring_emit_reg(engine, w->reg[i].addr); 726 intel_ring_emit(engine, w->reg[i].value); 727 } 728 intel_ring_emit(engine, MI_NOOP); 729 730 intel_ring_advance(engine); 731 732 engine->gpu_caches_dirty = true; 733 ret = intel_ring_flush_all_caches(req); 734 if (ret) 735 return ret; 736 737 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 738 739 return 0; 740 } 741 742 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 743 { 744 int ret; 745 746 ret = intel_ring_workarounds_emit(req); 747 if (ret != 0) 748 return ret; 749 750 ret = i915_gem_render_state_init(req); 751 if (ret) 752 return ret; 753 754 return 0; 755 } 756 757 static int wa_add(struct drm_i915_private *dev_priv, 758 i915_reg_t addr, 759 const u32 mask, const u32 val) 760 { 761 const u32 idx = dev_priv->workarounds.count; 762 763 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 764 return -ENOSPC; 765 766 dev_priv->workarounds.reg[idx].addr = addr; 767 dev_priv->workarounds.reg[idx].value = val; 768 dev_priv->workarounds.reg[idx].mask = mask; 769 770 dev_priv->workarounds.count++; 771 772 return 0; 773 } 774 775 #define WA_REG(addr, mask, val) do { \ 776 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 777 if (r) \ 778 return r; \ 779 } while (0) 780 781 #define WA_SET_BIT_MASKED(addr, mask) \ 782 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 783 784 #define WA_CLR_BIT_MASKED(addr, mask) \ 785 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 786 787 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 788 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 789 790 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 791 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 792 793 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 794 795 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 796 i915_reg_t reg) 797 { 798 struct drm_i915_private *dev_priv = engine->dev->dev_private; 799 struct i915_workarounds *wa = &dev_priv->workarounds; 800 const uint32_t index = wa->hw_whitelist_count[engine->id]; 801 802 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 803 return -EINVAL; 804 805 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), 806 i915_mmio_reg_offset(reg)); 807 wa->hw_whitelist_count[engine->id]++; 808 809 return 0; 810 } 811 812 static int gen8_init_workarounds(struct intel_engine_cs *engine) 813 { 814 struct drm_device *dev = engine->dev; 815 struct drm_i915_private *dev_priv = dev->dev_private; 816 817 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 818 819 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 820 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 821 822 /* WaDisablePartialInstShootdown:bdw,chv */ 823 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 824 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 825 826 /* Use Force Non-Coherent whenever executing a 3D context. This is a 827 * workaround for for a possible hang in the unlikely event a TLB 828 * invalidation occurs during a PSD flush. 829 */ 830 /* WaForceEnableNonCoherent:bdw,chv */ 831 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 832 WA_SET_BIT_MASKED(HDC_CHICKEN0, 833 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 834 HDC_FORCE_NON_COHERENT); 835 836 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 837 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 838 * polygons in the same 8x4 pixel/sample area to be processed without 839 * stalling waiting for the earlier ones to write to Hierarchical Z 840 * buffer." 841 * 842 * This optimization is off by default for BDW and CHV; turn it on. 843 */ 844 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 845 846 /* Wa4x4STCOptimizationDisable:bdw,chv */ 847 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 848 849 /* 850 * BSpec recommends 8x4 when MSAA is used, 851 * however in practice 16x4 seems fastest. 852 * 853 * Note that PS/WM thread counts depend on the WIZ hashing 854 * disable bit, which we don't touch here, but it's good 855 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 856 */ 857 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 858 GEN6_WIZ_HASHING_MASK, 859 GEN6_WIZ_HASHING_16x4); 860 861 return 0; 862 } 863 864 static int bdw_init_workarounds(struct intel_engine_cs *engine) 865 { 866 int ret; 867 struct drm_device *dev = engine->dev; 868 struct drm_i915_private *dev_priv = dev->dev_private; 869 870 ret = gen8_init_workarounds(engine); 871 if (ret) 872 return ret; 873 874 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 875 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 876 877 /* WaDisableDopClockGating:bdw */ 878 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 879 DOP_CLOCK_GATING_DISABLE); 880 881 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 882 GEN8_SAMPLER_POWER_BYPASS_DIS); 883 884 WA_SET_BIT_MASKED(HDC_CHICKEN0, 885 /* WaForceContextSaveRestoreNonCoherent:bdw */ 886 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 887 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 888 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 889 890 return 0; 891 } 892 893 static int chv_init_workarounds(struct intel_engine_cs *engine) 894 { 895 int ret; 896 struct drm_device *dev = engine->dev; 897 struct drm_i915_private *dev_priv = dev->dev_private; 898 899 ret = gen8_init_workarounds(engine); 900 if (ret) 901 return ret; 902 903 /* WaDisableThreadStallDopClockGating:chv */ 904 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 905 906 /* Improve HiZ throughput on CHV. */ 907 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 908 909 return 0; 910 } 911 912 static int gen9_init_workarounds(struct intel_engine_cs *engine) 913 { 914 struct drm_device *dev = engine->dev; 915 struct drm_i915_private *dev_priv = dev->dev_private; 916 int ret; 917 918 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 919 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 920 921 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ 922 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 923 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 924 925 /* WaDisableKillLogic:bxt,skl,kbl */ 926 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 927 ECOCHK_DIS_TLB); 928 929 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ 930 /* WaDisablePartialInstShootdown:skl,bxt,kbl */ 931 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 932 FLOW_CONTROL_ENABLE | 933 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 934 935 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 936 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 937 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 938 939 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 940 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 941 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 942 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 943 GEN9_DG_MIRROR_FIX_ENABLE); 944 945 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 946 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 947 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 948 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 949 GEN9_RHWO_OPTIMIZATION_DISABLE); 950 /* 951 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 952 * but we do that in per ctx batchbuffer as there is an issue 953 * with this register not getting restored on ctx restore 954 */ 955 } 956 957 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ 958 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 959 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 960 GEN9_ENABLE_YV12_BUGFIX | 961 GEN9_ENABLE_GPGPU_PREEMPTION); 962 963 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ 964 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 965 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 966 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 967 968 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ 969 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 970 GEN9_CCS_TLB_PREFETCH_ENABLE); 971 972 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 973 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 974 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 975 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 976 PIXEL_MASK_CAMMING_DISABLE); 977 978 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 979 WA_SET_BIT_MASKED(HDC_CHICKEN0, 980 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 981 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 982 983 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 984 * both tied to WaForceContextSaveRestoreNonCoherent 985 * in some hsds for skl. We keep the tie for all gen9. The 986 * documentation is a bit hazy and so we want to get common behaviour, 987 * even though there is no clear evidence we would need both on kbl/bxt. 988 * This area has been source of system hangs so we play it safe 989 * and mimic the skl regardless of what bspec says. 990 * 991 * Use Force Non-Coherent whenever executing a 3D context. This 992 * is a workaround for a possible hang in the unlikely event 993 * a TLB invalidation occurs during a PSD flush. 994 */ 995 996 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 997 WA_SET_BIT_MASKED(HDC_CHICKEN0, 998 HDC_FORCE_NON_COHERENT); 999 1000 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 1001 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 1002 BDW_DISABLE_HDC_INVALIDATION); 1003 1004 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 1005 if (IS_SKYLAKE(dev_priv) || 1006 IS_KABYLAKE(dev_priv) || 1007 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1008 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 1009 GEN8_SAMPLER_POWER_BYPASS_DIS); 1010 1011 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ 1012 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 1013 1014 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 1015 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 1016 GEN8_LQSC_FLUSH_COHERENT_LINES)); 1017 1018 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ 1019 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 1020 if (ret) 1021 return ret; 1022 1023 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 1024 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 1025 if (ret) 1026 return ret; 1027 1028 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ 1029 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 1030 if (ret) 1031 return ret; 1032 1033 return 0; 1034 } 1035 1036 static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1037 { 1038 struct drm_device *dev = engine->dev; 1039 struct drm_i915_private *dev_priv = dev->dev_private; 1040 u8 vals[3] = { 0, 0, 0 }; 1041 unsigned int i; 1042 1043 for (i = 0; i < 3; i++) { 1044 u8 ss; 1045 1046 /* 1047 * Only consider slices where one, and only one, subslice has 7 1048 * EUs 1049 */ 1050 if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) 1051 continue; 1052 1053 /* 1054 * subslice_7eu[i] != 0 (because of the check above) and 1055 * ss_max == 4 (maximum number of subslices possible per slice) 1056 * 1057 * -> 0 <= ss <= 3; 1058 */ 1059 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1; 1060 vals[i] = 3 - ss; 1061 } 1062 1063 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 1064 return 0; 1065 1066 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 1067 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 1068 GEN9_IZ_HASHING_MASK(2) | 1069 GEN9_IZ_HASHING_MASK(1) | 1070 GEN9_IZ_HASHING_MASK(0), 1071 GEN9_IZ_HASHING(2, vals[2]) | 1072 GEN9_IZ_HASHING(1, vals[1]) | 1073 GEN9_IZ_HASHING(0, vals[0])); 1074 1075 return 0; 1076 } 1077 1078 static int skl_init_workarounds(struct intel_engine_cs *engine) 1079 { 1080 int ret; 1081 struct drm_device *dev = engine->dev; 1082 struct drm_i915_private *dev_priv = dev->dev_private; 1083 1084 ret = gen9_init_workarounds(engine); 1085 if (ret) 1086 return ret; 1087 1088 /* 1089 * Actual WA is to disable percontext preemption granularity control 1090 * until D0 which is the default case so this is equivalent to 1091 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1092 */ 1093 if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { 1094 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1095 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1096 } 1097 1098 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1099 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1100 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1101 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1102 } 1103 1104 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1105 * involving this register should also be added to WA batch as required. 1106 */ 1107 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1108 /* WaDisableLSQCROPERFforOCL:skl */ 1109 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1110 GEN8_LQSC_RO_PERF_DIS); 1111 1112 /* WaEnableGapsTsvCreditFix:skl */ 1113 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1114 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1115 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1116 } 1117 1118 /* WaDisablePowerCompilerClockGating:skl */ 1119 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1120 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1121 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1122 1123 /* WaBarrierPerformanceFixDisable:skl */ 1124 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1125 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1126 HDC_FENCE_DEST_SLM_DISABLE | 1127 HDC_BARRIER_PERFORMANCE_DISABLE); 1128 1129 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1130 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1131 WA_SET_BIT_MASKED( 1132 GEN7_HALF_SLICE_CHICKEN1, 1133 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1134 1135 /* WaDisableGafsUnitClkGating:skl */ 1136 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1137 1138 /* WaInPlaceDecompressionHang:skl */ 1139 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 1140 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1141 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1142 1143 /* WaDisableLSQCROPERFforOCL:skl */ 1144 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1145 if (ret) 1146 return ret; 1147 1148 return skl_tune_iz_hashing(engine); 1149 } 1150 1151 static int bxt_init_workarounds(struct intel_engine_cs *engine) 1152 { 1153 int ret; 1154 struct drm_device *dev = engine->dev; 1155 struct drm_i915_private *dev_priv = dev->dev_private; 1156 1157 ret = gen9_init_workarounds(engine); 1158 if (ret) 1159 return ret; 1160 1161 /* WaStoreMultiplePTEenable:bxt */ 1162 /* This is a requirement according to Hardware specification */ 1163 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1164 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1165 1166 /* WaSetClckGatingDisableMedia:bxt */ 1167 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1168 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1169 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1170 } 1171 1172 /* WaDisableThreadStallDopClockGating:bxt */ 1173 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1174 STALL_DOP_GATING_DISABLE); 1175 1176 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1177 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1178 WA_SET_BIT_MASKED( 1179 GEN7_HALF_SLICE_CHICKEN1, 1180 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1181 } 1182 1183 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ 1184 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1185 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1186 /* WaDisableLSQCROPERFforOCL:bxt */ 1187 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1188 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1189 if (ret) 1190 return ret; 1191 1192 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1193 if (ret) 1194 return ret; 1195 } 1196 1197 /* WaInsertDummyPushConstPs:bxt */ 1198 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1199 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1200 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1201 1202 /* WaInPlaceDecompressionHang:bxt */ 1203 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1204 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1205 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1206 1207 return 0; 1208 } 1209 1210 static int kbl_init_workarounds(struct intel_engine_cs *engine) 1211 { 1212 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1213 int ret; 1214 1215 ret = gen9_init_workarounds(engine); 1216 if (ret) 1217 return ret; 1218 1219 /* WaEnableGapsTsvCreditFix:kbl */ 1220 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1221 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1222 1223 /* WaDisableDynamicCreditSharing:kbl */ 1224 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1225 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1226 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1227 1228 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1229 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1230 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1231 HDC_FENCE_DEST_SLM_DISABLE); 1232 1233 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1234 * involving this register should also be added to WA batch as required. 1235 */ 1236 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1237 /* WaDisableLSQCROPERFforOCL:kbl */ 1238 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1239 GEN8_LQSC_RO_PERF_DIS); 1240 1241 /* WaInsertDummyPushConstPs:kbl */ 1242 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1243 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1244 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1245 1246 /* WaDisableGafsUnitClkGating:kbl */ 1247 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1248 1249 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1250 WA_SET_BIT_MASKED( 1251 GEN7_HALF_SLICE_CHICKEN1, 1252 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1253 1254 /* WaInPlaceDecompressionHang:kbl */ 1255 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1256 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1257 1258 /* WaDisableLSQCROPERFforOCL:kbl */ 1259 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1260 if (ret) 1261 return ret; 1262 1263 return 0; 1264 } 1265 1266 int init_workarounds_ring(struct intel_engine_cs *engine) 1267 { 1268 struct drm_device *dev = engine->dev; 1269 struct drm_i915_private *dev_priv = dev->dev_private; 1270 1271 WARN_ON(engine->id != RCS); 1272 1273 dev_priv->workarounds.count = 0; 1274 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1275 1276 if (IS_BROADWELL(dev)) 1277 return bdw_init_workarounds(engine); 1278 1279 if (IS_CHERRYVIEW(dev)) 1280 return chv_init_workarounds(engine); 1281 1282 if (IS_SKYLAKE(dev)) 1283 return skl_init_workarounds(engine); 1284 1285 if (IS_BROXTON(dev)) 1286 return bxt_init_workarounds(engine); 1287 1288 if (IS_KABYLAKE(dev_priv)) 1289 return kbl_init_workarounds(engine); 1290 1291 return 0; 1292 } 1293 1294 static int init_render_ring(struct intel_engine_cs *engine) 1295 { 1296 struct drm_device *dev = engine->dev; 1297 struct drm_i915_private *dev_priv = dev->dev_private; 1298 int ret = init_ring_common(engine); 1299 if (ret) 1300 return ret; 1301 1302 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1303 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1304 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1305 1306 /* We need to disable the AsyncFlip performance optimisations in order 1307 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1308 * programmed to '1' on all products. 1309 * 1310 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1311 */ 1312 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1313 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1314 1315 /* Required for the hardware to program scanline values for waiting */ 1316 /* WaEnableFlushTlbInvalidationMode:snb */ 1317 if (INTEL_INFO(dev)->gen == 6) 1318 I915_WRITE(GFX_MODE, 1319 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1320 1321 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1322 if (IS_GEN7(dev)) 1323 I915_WRITE(GFX_MODE_GEN7, 1324 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1325 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1326 1327 if (IS_GEN6(dev)) { 1328 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1329 * "If this bit is set, STCunit will have LRA as replacement 1330 * policy. [...] This bit must be reset. LRA replacement 1331 * policy is not supported." 1332 */ 1333 I915_WRITE(CACHE_MODE_0, 1334 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1335 } 1336 1337 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1338 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1339 1340 if (HAS_L3_DPF(dev)) 1341 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1342 1343 return init_workarounds_ring(engine); 1344 } 1345 1346 static void render_ring_cleanup(struct intel_engine_cs *engine) 1347 { 1348 struct drm_device *dev = engine->dev; 1349 struct drm_i915_private *dev_priv = dev->dev_private; 1350 1351 if (dev_priv->semaphore_obj) { 1352 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1353 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 1354 dev_priv->semaphore_obj = NULL; 1355 } 1356 1357 intel_fini_pipe_control(engine); 1358 } 1359 1360 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1361 unsigned int num_dwords) 1362 { 1363 #define MBOX_UPDATE_DWORDS 8 1364 struct intel_engine_cs *signaller = signaller_req->engine; 1365 struct drm_device *dev = signaller->dev; 1366 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct intel_engine_cs *waiter; 1368 enum intel_engine_id id; 1369 int ret, num_rings; 1370 1371 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1372 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1373 #undef MBOX_UPDATE_DWORDS 1374 1375 ret = intel_ring_begin(signaller_req, num_dwords); 1376 if (ret) 1377 return ret; 1378 1379 for_each_engine_id(waiter, dev_priv, id) { 1380 u32 seqno; 1381 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1382 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1383 continue; 1384 1385 seqno = i915_gem_request_get_seqno(signaller_req); 1386 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1387 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1388 PIPE_CONTROL_QW_WRITE | 1389 PIPE_CONTROL_FLUSH_ENABLE); 1390 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1391 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1392 intel_ring_emit(signaller, seqno); 1393 intel_ring_emit(signaller, 0); 1394 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1395 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1396 intel_ring_emit(signaller, 0); 1397 } 1398 1399 return 0; 1400 } 1401 1402 static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, 1403 unsigned int num_dwords) 1404 { 1405 #define MBOX_UPDATE_DWORDS 6 1406 struct intel_engine_cs *signaller = signaller_req->engine; 1407 struct drm_device *dev = signaller->dev; 1408 struct drm_i915_private *dev_priv = dev->dev_private; 1409 struct intel_engine_cs *waiter; 1410 enum intel_engine_id id; 1411 int ret, num_rings; 1412 1413 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1414 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1415 #undef MBOX_UPDATE_DWORDS 1416 1417 ret = intel_ring_begin(signaller_req, num_dwords); 1418 if (ret) 1419 return ret; 1420 1421 for_each_engine_id(waiter, dev_priv, id) { 1422 u32 seqno; 1423 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1424 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1425 continue; 1426 1427 seqno = i915_gem_request_get_seqno(signaller_req); 1428 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1429 MI_FLUSH_DW_OP_STOREDW); 1430 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1431 MI_FLUSH_DW_USE_GTT); 1432 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1433 intel_ring_emit(signaller, seqno); 1434 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1435 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1436 intel_ring_emit(signaller, 0); 1437 } 1438 1439 return 0; 1440 } 1441 1442 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1443 unsigned int num_dwords) 1444 { 1445 struct intel_engine_cs *signaller = signaller_req->engine; 1446 struct drm_device *dev = signaller->dev; 1447 struct drm_i915_private *dev_priv = dev->dev_private; 1448 struct intel_engine_cs *useless; 1449 enum intel_engine_id id; 1450 int ret, num_rings; 1451 1452 #define MBOX_UPDATE_DWORDS 3 1453 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1454 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1455 #undef MBOX_UPDATE_DWORDS 1456 1457 ret = intel_ring_begin(signaller_req, num_dwords); 1458 if (ret) 1459 return ret; 1460 1461 for_each_engine_id(useless, dev_priv, id) { 1462 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; 1463 1464 if (i915_mmio_reg_valid(mbox_reg)) { 1465 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1466 1467 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1468 intel_ring_emit_reg(signaller, mbox_reg); 1469 intel_ring_emit(signaller, seqno); 1470 } 1471 } 1472 1473 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1474 if (num_rings % 2 == 0) 1475 intel_ring_emit(signaller, MI_NOOP); 1476 1477 return 0; 1478 } 1479 1480 /** 1481 * gen6_add_request - Update the semaphore mailbox registers 1482 * 1483 * @request - request to write to the ring 1484 * 1485 * Update the mailbox registers in the *other* rings with the current seqno. 1486 * This acts like a signal in the canonical semaphore. 1487 */ 1488 static int 1489 gen6_add_request(struct drm_i915_gem_request *req) 1490 { 1491 struct intel_engine_cs *engine = req->engine; 1492 int ret; 1493 1494 if (engine->semaphore.signal) 1495 ret = engine->semaphore.signal(req, 4); 1496 else 1497 ret = intel_ring_begin(req, 4); 1498 1499 if (ret) 1500 return ret; 1501 1502 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1503 intel_ring_emit(engine, 1504 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1505 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1506 intel_ring_emit(engine, MI_USER_INTERRUPT); 1507 __intel_ring_advance(engine); 1508 1509 return 0; 1510 } 1511 1512 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1513 u32 seqno) 1514 { 1515 struct drm_i915_private *dev_priv = dev->dev_private; 1516 return dev_priv->last_seqno < seqno; 1517 } 1518 1519 /** 1520 * intel_ring_sync - sync the waiter to the signaller on seqno 1521 * 1522 * @waiter - ring that is waiting 1523 * @signaller - ring which has, or will signal 1524 * @seqno - seqno which the waiter will block on 1525 */ 1526 1527 static int 1528 gen8_ring_sync(struct drm_i915_gem_request *waiter_req, 1529 struct intel_engine_cs *signaller, 1530 u32 seqno) 1531 { 1532 struct intel_engine_cs *waiter = waiter_req->engine; 1533 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1534 int ret; 1535 1536 ret = intel_ring_begin(waiter_req, 4); 1537 if (ret) 1538 return ret; 1539 1540 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1541 MI_SEMAPHORE_GLOBAL_GTT | 1542 MI_SEMAPHORE_POLL | 1543 MI_SEMAPHORE_SAD_GTE_SDD); 1544 intel_ring_emit(waiter, seqno); 1545 intel_ring_emit(waiter, 1546 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1547 intel_ring_emit(waiter, 1548 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1549 intel_ring_advance(waiter); 1550 return 0; 1551 } 1552 1553 static int 1554 gen6_ring_sync(struct drm_i915_gem_request *waiter_req, 1555 struct intel_engine_cs *signaller, 1556 u32 seqno) 1557 { 1558 struct intel_engine_cs *waiter = waiter_req->engine; 1559 u32 dw1 = MI_SEMAPHORE_MBOX | 1560 MI_SEMAPHORE_COMPARE | 1561 MI_SEMAPHORE_REGISTER; 1562 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1563 int ret; 1564 1565 /* Throughout all of the GEM code, seqno passed implies our current 1566 * seqno is >= the last seqno executed. However for hardware the 1567 * comparison is strictly greater than. 1568 */ 1569 seqno -= 1; 1570 1571 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1572 1573 ret = intel_ring_begin(waiter_req, 4); 1574 if (ret) 1575 return ret; 1576 1577 /* If seqno wrap happened, omit the wait with no-ops */ 1578 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1579 intel_ring_emit(waiter, dw1 | wait_mbox); 1580 intel_ring_emit(waiter, seqno); 1581 intel_ring_emit(waiter, 0); 1582 intel_ring_emit(waiter, MI_NOOP); 1583 } else { 1584 intel_ring_emit(waiter, MI_NOOP); 1585 intel_ring_emit(waiter, MI_NOOP); 1586 intel_ring_emit(waiter, MI_NOOP); 1587 intel_ring_emit(waiter, MI_NOOP); 1588 } 1589 intel_ring_advance(waiter); 1590 1591 return 0; 1592 } 1593 1594 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1595 do { \ 1596 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 1597 PIPE_CONTROL_DEPTH_STALL); \ 1598 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 1599 intel_ring_emit(ring__, 0); \ 1600 intel_ring_emit(ring__, 0); \ 1601 } while (0) 1602 1603 static int 1604 pc_render_add_request(struct drm_i915_gem_request *req) 1605 { 1606 struct intel_engine_cs *engine = req->engine; 1607 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1608 int ret; 1609 1610 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 1611 * incoherent with writes to memory, i.e. completely fubar, 1612 * so we need to use PIPE_NOTIFY instead. 1613 * 1614 * However, we also need to workaround the qword write 1615 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1616 * memory before requesting an interrupt. 1617 */ 1618 ret = intel_ring_begin(req, 32); 1619 if (ret) 1620 return ret; 1621 1622 intel_ring_emit(engine, 1623 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1624 PIPE_CONTROL_WRITE_FLUSH | 1625 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1626 intel_ring_emit(engine, 1627 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1628 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1629 intel_ring_emit(engine, 0); 1630 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1631 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1632 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1633 scratch_addr += 2 * CACHELINE_BYTES; 1634 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1635 scratch_addr += 2 * CACHELINE_BYTES; 1636 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1637 scratch_addr += 2 * CACHELINE_BYTES; 1638 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1639 scratch_addr += 2 * CACHELINE_BYTES; 1640 PIPE_CONTROL_FLUSH(engine, scratch_addr); 1641 1642 intel_ring_emit(engine, 1643 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1644 PIPE_CONTROL_WRITE_FLUSH | 1645 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1646 PIPE_CONTROL_NOTIFY); 1647 intel_ring_emit(engine, 1648 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1649 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1650 intel_ring_emit(engine, 0); 1651 __intel_ring_advance(engine); 1652 1653 return 0; 1654 } 1655 1656 static void 1657 gen6_seqno_barrier(struct intel_engine_cs *engine) 1658 { 1659 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1660 1661 /* Workaround to force correct ordering between irq and seqno writes on 1662 * ivb (and maybe also on snb) by reading from a CS register (like 1663 * ACTHD) before reading the status page. 1664 * 1665 * Note that this effectively stalls the read by the time it takes to 1666 * do a memory transaction, which more or less ensures that the write 1667 * from the GPU has sufficient time to invalidate the CPU cacheline. 1668 * Alternatively we could delay the interrupt from the CS ring to give 1669 * the write time to land, but that would incur a delay after every 1670 * batch i.e. much more frequent than a delay when waiting for the 1671 * interrupt (with the same net latency). 1672 * 1673 * Also note that to prevent whole machine hangs on gen7, we have to 1674 * take the spinlock to guard against concurrent cacheline access. 1675 */ 1676 spin_lock_irq(&dev_priv->uncore.lock); 1677 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1678 spin_unlock_irq(&dev_priv->uncore.lock); 1679 } 1680 1681 static u32 1682 ring_get_seqno(struct intel_engine_cs *engine) 1683 { 1684 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 1685 } 1686 1687 static void 1688 ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1689 { 1690 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 1691 } 1692 1693 static u32 1694 pc_render_get_seqno(struct intel_engine_cs *engine) 1695 { 1696 return engine->scratch.cpu_page[0]; 1697 } 1698 1699 static void 1700 pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1701 { 1702 engine->scratch.cpu_page[0] = seqno; 1703 } 1704 1705 static bool 1706 gen5_ring_get_irq(struct intel_engine_cs *engine) 1707 { 1708 struct drm_device *dev = engine->dev; 1709 struct drm_i915_private *dev_priv = dev->dev_private; 1710 unsigned long flags; 1711 1712 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1713 return false; 1714 1715 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1716 if (engine->irq_refcount++ == 0) 1717 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1718 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1719 1720 return true; 1721 } 1722 1723 static void 1724 gen5_ring_put_irq(struct intel_engine_cs *engine) 1725 { 1726 struct drm_device *dev = engine->dev; 1727 struct drm_i915_private *dev_priv = dev->dev_private; 1728 unsigned long flags; 1729 1730 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1731 if (--engine->irq_refcount == 0) 1732 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1733 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1734 } 1735 1736 static bool 1737 i9xx_ring_get_irq(struct intel_engine_cs *engine) 1738 { 1739 struct drm_device *dev = engine->dev; 1740 struct drm_i915_private *dev_priv = dev->dev_private; 1741 unsigned long flags; 1742 1743 if (!intel_irqs_enabled(dev_priv)) 1744 return false; 1745 1746 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1747 if (engine->irq_refcount++ == 0) { 1748 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1749 I915_WRITE(IMR, dev_priv->irq_mask); 1750 POSTING_READ(IMR); 1751 } 1752 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1753 1754 return true; 1755 } 1756 1757 static void 1758 i9xx_ring_put_irq(struct intel_engine_cs *engine) 1759 { 1760 struct drm_device *dev = engine->dev; 1761 struct drm_i915_private *dev_priv = dev->dev_private; 1762 unsigned long flags; 1763 1764 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1765 if (--engine->irq_refcount == 0) { 1766 dev_priv->irq_mask |= engine->irq_enable_mask; 1767 I915_WRITE(IMR, dev_priv->irq_mask); 1768 POSTING_READ(IMR); 1769 } 1770 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1771 } 1772 1773 static bool 1774 i8xx_ring_get_irq(struct intel_engine_cs *engine) 1775 { 1776 struct drm_device *dev = engine->dev; 1777 struct drm_i915_private *dev_priv = dev->dev_private; 1778 unsigned long flags; 1779 1780 if (!intel_irqs_enabled(dev_priv)) 1781 return false; 1782 1783 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1784 if (engine->irq_refcount++ == 0) { 1785 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1786 I915_WRITE16(IMR, dev_priv->irq_mask); 1787 POSTING_READ16(IMR); 1788 } 1789 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1790 1791 return true; 1792 } 1793 1794 static void 1795 i8xx_ring_put_irq(struct intel_engine_cs *engine) 1796 { 1797 struct drm_device *dev = engine->dev; 1798 struct drm_i915_private *dev_priv = dev->dev_private; 1799 unsigned long flags; 1800 1801 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1802 if (--engine->irq_refcount == 0) { 1803 dev_priv->irq_mask |= engine->irq_enable_mask; 1804 I915_WRITE16(IMR, dev_priv->irq_mask); 1805 POSTING_READ16(IMR); 1806 } 1807 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1808 } 1809 1810 static int 1811 bsd_ring_flush(struct drm_i915_gem_request *req, 1812 u32 invalidate_domains, 1813 u32 flush_domains) 1814 { 1815 struct intel_engine_cs *engine = req->engine; 1816 int ret; 1817 1818 ret = intel_ring_begin(req, 2); 1819 if (ret) 1820 return ret; 1821 1822 intel_ring_emit(engine, MI_FLUSH); 1823 intel_ring_emit(engine, MI_NOOP); 1824 intel_ring_advance(engine); 1825 return 0; 1826 } 1827 1828 static int 1829 i9xx_add_request(struct drm_i915_gem_request *req) 1830 { 1831 struct intel_engine_cs *engine = req->engine; 1832 int ret; 1833 1834 ret = intel_ring_begin(req, 4); 1835 if (ret) 1836 return ret; 1837 1838 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1839 intel_ring_emit(engine, 1840 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1841 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1842 intel_ring_emit(engine, MI_USER_INTERRUPT); 1843 __intel_ring_advance(engine); 1844 1845 return 0; 1846 } 1847 1848 static bool 1849 gen6_ring_get_irq(struct intel_engine_cs *engine) 1850 { 1851 struct drm_device *dev = engine->dev; 1852 struct drm_i915_private *dev_priv = dev->dev_private; 1853 unsigned long flags; 1854 1855 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1856 return false; 1857 1858 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1859 if (engine->irq_refcount++ == 0) { 1860 if (HAS_L3_DPF(dev) && engine->id == RCS) 1861 I915_WRITE_IMR(engine, 1862 ~(engine->irq_enable_mask | 1863 GT_PARITY_ERROR(dev))); 1864 else 1865 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1866 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1867 } 1868 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1869 1870 return true; 1871 } 1872 1873 static void 1874 gen6_ring_put_irq(struct intel_engine_cs *engine) 1875 { 1876 struct drm_device *dev = engine->dev; 1877 struct drm_i915_private *dev_priv = dev->dev_private; 1878 unsigned long flags; 1879 1880 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1881 if (--engine->irq_refcount == 0) { 1882 if (HAS_L3_DPF(dev) && engine->id == RCS) 1883 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1884 else 1885 I915_WRITE_IMR(engine, ~0); 1886 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1887 } 1888 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1889 } 1890 1891 static bool 1892 hsw_vebox_get_irq(struct intel_engine_cs *engine) 1893 { 1894 struct drm_device *dev = engine->dev; 1895 struct drm_i915_private *dev_priv = dev->dev_private; 1896 unsigned long flags; 1897 1898 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1899 return false; 1900 1901 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1902 if (engine->irq_refcount++ == 0) { 1903 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1904 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); 1905 } 1906 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1907 1908 return true; 1909 } 1910 1911 static void 1912 hsw_vebox_put_irq(struct intel_engine_cs *engine) 1913 { 1914 struct drm_device *dev = engine->dev; 1915 struct drm_i915_private *dev_priv = dev->dev_private; 1916 unsigned long flags; 1917 1918 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1919 if (--engine->irq_refcount == 0) { 1920 I915_WRITE_IMR(engine, ~0); 1921 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); 1922 } 1923 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1924 } 1925 1926 static bool 1927 gen8_ring_get_irq(struct intel_engine_cs *engine) 1928 { 1929 struct drm_device *dev = engine->dev; 1930 struct drm_i915_private *dev_priv = dev->dev_private; 1931 unsigned long flags; 1932 1933 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1934 return false; 1935 1936 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1937 if (engine->irq_refcount++ == 0) { 1938 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1939 I915_WRITE_IMR(engine, 1940 ~(engine->irq_enable_mask | 1941 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1942 } else { 1943 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1944 } 1945 POSTING_READ(RING_IMR(engine->mmio_base)); 1946 } 1947 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1948 1949 return true; 1950 } 1951 1952 static void 1953 gen8_ring_put_irq(struct intel_engine_cs *engine) 1954 { 1955 struct drm_device *dev = engine->dev; 1956 struct drm_i915_private *dev_priv = dev->dev_private; 1957 unsigned long flags; 1958 1959 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1960 if (--engine->irq_refcount == 0) { 1961 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1962 I915_WRITE_IMR(engine, 1963 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1964 } else { 1965 I915_WRITE_IMR(engine, ~0); 1966 } 1967 POSTING_READ(RING_IMR(engine->mmio_base)); 1968 } 1969 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1970 } 1971 1972 static int 1973 i965_dispatch_execbuffer(struct drm_i915_gem_request *req, 1974 u64 offset, u32 length, 1975 unsigned dispatch_flags) 1976 { 1977 struct intel_engine_cs *engine = req->engine; 1978 int ret; 1979 1980 ret = intel_ring_begin(req, 2); 1981 if (ret) 1982 return ret; 1983 1984 intel_ring_emit(engine, 1985 MI_BATCH_BUFFER_START | 1986 MI_BATCH_GTT | 1987 (dispatch_flags & I915_DISPATCH_SECURE ? 1988 0 : MI_BATCH_NON_SECURE_I965)); 1989 intel_ring_emit(engine, offset); 1990 intel_ring_advance(engine); 1991 1992 return 0; 1993 } 1994 1995 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1996 #define I830_BATCH_LIMIT (256*1024) 1997 #define I830_TLB_ENTRIES (2) 1998 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1999 static int 2000 i830_dispatch_execbuffer(struct drm_i915_gem_request *req, 2001 u64 offset, u32 len, 2002 unsigned dispatch_flags) 2003 { 2004 struct intel_engine_cs *engine = req->engine; 2005 u32 cs_offset = engine->scratch.gtt_offset; 2006 int ret; 2007 2008 ret = intel_ring_begin(req, 6); 2009 if (ret) 2010 return ret; 2011 2012 /* Evict the invalid PTE TLBs */ 2013 intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); 2014 intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 2015 intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 2016 intel_ring_emit(engine, cs_offset); 2017 intel_ring_emit(engine, 0xdeadbeef); 2018 intel_ring_emit(engine, MI_NOOP); 2019 intel_ring_advance(engine); 2020 2021 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 2022 if (len > I830_BATCH_LIMIT) 2023 return -ENOSPC; 2024 2025 ret = intel_ring_begin(req, 6 + 2); 2026 if (ret) 2027 return ret; 2028 2029 /* Blit the batch (which has now all relocs applied) to the 2030 * stable batch scratch bo area (so that the CS never 2031 * stumbles over its tlb invalidation bug) ... 2032 */ 2033 intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 2034 intel_ring_emit(engine, 2035 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 2036 intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); 2037 intel_ring_emit(engine, cs_offset); 2038 intel_ring_emit(engine, 4096); 2039 intel_ring_emit(engine, offset); 2040 2041 intel_ring_emit(engine, MI_FLUSH); 2042 intel_ring_emit(engine, MI_NOOP); 2043 intel_ring_advance(engine); 2044 2045 /* ... and execute it. */ 2046 offset = cs_offset; 2047 } 2048 2049 ret = intel_ring_begin(req, 2); 2050 if (ret) 2051 return ret; 2052 2053 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 2054 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 2055 0 : MI_BATCH_NON_SECURE)); 2056 intel_ring_advance(engine); 2057 2058 return 0; 2059 } 2060 2061 static int 2062 i915_dispatch_execbuffer(struct drm_i915_gem_request *req, 2063 u64 offset, u32 len, 2064 unsigned dispatch_flags) 2065 { 2066 struct intel_engine_cs *engine = req->engine; 2067 int ret; 2068 2069 ret = intel_ring_begin(req, 2); 2070 if (ret) 2071 return ret; 2072 2073 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 2074 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 2075 0 : MI_BATCH_NON_SECURE)); 2076 intel_ring_advance(engine); 2077 2078 return 0; 2079 } 2080 2081 static void cleanup_phys_status_page(struct intel_engine_cs *engine) 2082 { 2083 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2084 2085 if (!dev_priv->status_page_dmah) 2086 return; 2087 2088 drm_pci_free(engine->dev, dev_priv->status_page_dmah); 2089 engine->status_page.page_addr = NULL; 2090 } 2091 2092 static void cleanup_status_page(struct intel_engine_cs *engine) 2093 { 2094 struct drm_i915_gem_object *obj; 2095 2096 obj = engine->status_page.obj; 2097 if (obj == NULL) 2098 return; 2099 2100 kunmap(sg_page(obj->pages->sgl)); 2101 i915_gem_object_ggtt_unpin(obj); 2102 drm_gem_object_unreference(&obj->base); 2103 engine->status_page.obj = NULL; 2104 } 2105 2106 static int init_status_page(struct intel_engine_cs *engine) 2107 { 2108 struct drm_i915_gem_object *obj = engine->status_page.obj; 2109 2110 if (obj == NULL) { 2111 unsigned flags; 2112 int ret; 2113 2114 obj = i915_gem_alloc_object(engine->dev, 4096); 2115 if (obj == NULL) { 2116 DRM_ERROR("Failed to allocate status page\n"); 2117 return -ENOMEM; 2118 } 2119 2120 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2121 if (ret) 2122 goto err_unref; 2123 2124 flags = 0; 2125 if (!HAS_LLC(engine->dev)) 2126 /* On g33, we cannot place HWS above 256MiB, so 2127 * restrict its pinning to the low mappable arena. 2128 * Though this restriction is not documented for 2129 * gen4, gen5, or byt, they also behave similarly 2130 * and hang if the HWS is placed at the top of the 2131 * GTT. To generalise, it appears that all !llc 2132 * platforms have issues with us placing the HWS 2133 * above the mappable region (even though we never 2134 * actualy map it). 2135 */ 2136 flags |= PIN_MAPPABLE; 2137 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 2138 if (ret) { 2139 err_unref: 2140 drm_gem_object_unreference(&obj->base); 2141 return ret; 2142 } 2143 2144 engine->status_page.obj = obj; 2145 } 2146 2147 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 2148 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 2149 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 2150 2151 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 2152 engine->name, engine->status_page.gfx_addr); 2153 2154 return 0; 2155 } 2156 2157 static int init_phys_status_page(struct intel_engine_cs *engine) 2158 { 2159 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2160 2161 if (!dev_priv->status_page_dmah) { 2162 dev_priv->status_page_dmah = 2163 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); 2164 if (!dev_priv->status_page_dmah) 2165 return -ENOMEM; 2166 } 2167 2168 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 2169 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 2170 2171 return 0; 2172 } 2173 2174 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2175 { 2176 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 2177 i915_gem_object_unpin_map(ringbuf->obj); 2178 else 2179 iounmap(ringbuf->virtual_start); 2180 ringbuf->virtual_start = NULL; 2181 ringbuf->vma = NULL; 2182 i915_gem_object_ggtt_unpin(ringbuf->obj); 2183 } 2184 2185 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2186 struct intel_ringbuffer *ringbuf) 2187 { 2188 struct drm_i915_private *dev_priv = to_i915(dev); 2189 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2190 struct drm_i915_gem_object *obj = ringbuf->obj; 2191 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2192 unsigned flags = PIN_OFFSET_BIAS | 4096; 2193 void *addr; 2194 int ret; 2195 2196 if (HAS_LLC(dev_priv) && !obj->stolen) { 2197 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); 2198 if (ret) 2199 return ret; 2200 2201 ret = i915_gem_object_set_to_cpu_domain(obj, true); 2202 if (ret) 2203 goto err_unpin; 2204 2205 addr = i915_gem_object_pin_map(obj); 2206 if (IS_ERR(addr)) { 2207 ret = PTR_ERR(addr); 2208 goto err_unpin; 2209 } 2210 } else { 2211 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 2212 flags | PIN_MAPPABLE); 2213 if (ret) 2214 return ret; 2215 2216 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2217 if (ret) 2218 goto err_unpin; 2219 2220 /* Access through the GTT requires the device to be awake. */ 2221 assert_rpm_wakelock_held(dev_priv); 2222 2223 addr = ioremap_wc(ggtt->mappable_base + 2224 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2225 if (addr == NULL) { 2226 ret = -ENOMEM; 2227 goto err_unpin; 2228 } 2229 } 2230 2231 ringbuf->virtual_start = addr; 2232 ringbuf->vma = i915_gem_obj_to_ggtt(obj); 2233 return 0; 2234 2235 err_unpin: 2236 i915_gem_object_ggtt_unpin(obj); 2237 return ret; 2238 } 2239 2240 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2241 { 2242 drm_gem_object_unreference(&ringbuf->obj->base); 2243 ringbuf->obj = NULL; 2244 } 2245 2246 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2247 struct intel_ringbuffer *ringbuf) 2248 { 2249 struct drm_i915_gem_object *obj; 2250 2251 obj = NULL; 2252 if (!HAS_LLC(dev)) 2253 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2254 if (obj == NULL) 2255 obj = i915_gem_alloc_object(dev, ringbuf->size); 2256 if (obj == NULL) 2257 return -ENOMEM; 2258 2259 /* mark ring buffers as read-only from GPU side by default */ 2260 obj->gt_ro = 1; 2261 2262 ringbuf->obj = obj; 2263 2264 return 0; 2265 } 2266 2267 struct intel_ringbuffer * 2268 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) 2269 { 2270 struct intel_ringbuffer *ring; 2271 int ret; 2272 2273 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2274 if (ring == NULL) { 2275 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 2276 engine->name); 2277 return ERR_PTR(-ENOMEM); 2278 } 2279 2280 ring->engine = engine; 2281 list_add(&ring->link, &engine->buffers); 2282 2283 ring->size = size; 2284 /* Workaround an erratum on the i830 which causes a hang if 2285 * the TAIL pointer points to within the last 2 cachelines 2286 * of the buffer. 2287 */ 2288 ring->effective_size = size; 2289 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2290 ring->effective_size -= 2 * CACHELINE_BYTES; 2291 2292 ring->last_retired_head = -1; 2293 intel_ring_update_space(ring); 2294 2295 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2296 if (ret) { 2297 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2298 engine->name, ret); 2299 list_del(&ring->link); 2300 kfree(ring); 2301 return ERR_PTR(ret); 2302 } 2303 2304 return ring; 2305 } 2306 2307 void 2308 intel_ringbuffer_free(struct intel_ringbuffer *ring) 2309 { 2310 intel_destroy_ringbuffer_obj(ring); 2311 list_del(&ring->link); 2312 kfree(ring); 2313 } 2314 2315 static int intel_init_ring_buffer(struct drm_device *dev, 2316 struct intel_engine_cs *engine) 2317 { 2318 struct intel_ringbuffer *ringbuf; 2319 int ret; 2320 2321 WARN_ON(engine->buffer); 2322 2323 engine->dev = dev; 2324 INIT_LIST_HEAD(&engine->active_list); 2325 INIT_LIST_HEAD(&engine->request_list); 2326 INIT_LIST_HEAD(&engine->execlist_queue); 2327 INIT_LIST_HEAD(&engine->buffers); 2328 i915_gem_batch_pool_init(dev, &engine->batch_pool); 2329 memset(engine->semaphore.sync_seqno, 0, 2330 sizeof(engine->semaphore.sync_seqno)); 2331 2332 init_waitqueue_head(&engine->irq_queue); 2333 2334 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2335 if (IS_ERR(ringbuf)) { 2336 ret = PTR_ERR(ringbuf); 2337 goto error; 2338 } 2339 engine->buffer = ringbuf; 2340 2341 if (I915_NEED_GFX_HWS(dev)) { 2342 ret = init_status_page(engine); 2343 if (ret) 2344 goto error; 2345 } else { 2346 WARN_ON(engine->id != RCS); 2347 ret = init_phys_status_page(engine); 2348 if (ret) 2349 goto error; 2350 } 2351 2352 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2353 if (ret) { 2354 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2355 engine->name, ret); 2356 intel_destroy_ringbuffer_obj(ringbuf); 2357 goto error; 2358 } 2359 2360 ret = i915_cmd_parser_init_ring(engine); 2361 if (ret) 2362 goto error; 2363 2364 return 0; 2365 2366 error: 2367 intel_cleanup_engine(engine); 2368 return ret; 2369 } 2370 2371 void intel_cleanup_engine(struct intel_engine_cs *engine) 2372 { 2373 struct drm_i915_private *dev_priv; 2374 2375 if (!intel_engine_initialized(engine)) 2376 return; 2377 2378 dev_priv = to_i915(engine->dev); 2379 2380 if (engine->buffer) { 2381 intel_stop_engine(engine); 2382 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2383 2384 intel_unpin_ringbuffer_obj(engine->buffer); 2385 intel_ringbuffer_free(engine->buffer); 2386 engine->buffer = NULL; 2387 } 2388 2389 if (engine->cleanup) 2390 engine->cleanup(engine); 2391 2392 if (I915_NEED_GFX_HWS(engine->dev)) { 2393 cleanup_status_page(engine); 2394 } else { 2395 WARN_ON(engine->id != RCS); 2396 cleanup_phys_status_page(engine); 2397 } 2398 2399 i915_cmd_parser_fini_ring(engine); 2400 i915_gem_batch_pool_fini(&engine->batch_pool); 2401 engine->dev = NULL; 2402 } 2403 2404 int intel_engine_idle(struct intel_engine_cs *engine) 2405 { 2406 struct drm_i915_gem_request *req; 2407 2408 /* Wait upon the last request to be completed */ 2409 if (list_empty(&engine->request_list)) 2410 return 0; 2411 2412 req = list_entry(engine->request_list.prev, 2413 struct drm_i915_gem_request, 2414 list); 2415 2416 /* Make sure we do not trigger any retires */ 2417 return __i915_wait_request(req, 2418 req->i915->mm.interruptible, 2419 NULL, NULL); 2420 } 2421 2422 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2423 { 2424 request->ringbuf = request->engine->buffer; 2425 return 0; 2426 } 2427 2428 int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2429 { 2430 /* 2431 * The first call merely notes the reserve request and is common for 2432 * all back ends. The subsequent localised _begin() call actually 2433 * ensures that the reservation is available. Without the begin, if 2434 * the request creator immediately submitted the request without 2435 * adding any commands to it then there might not actually be 2436 * sufficient room for the submission commands. 2437 */ 2438 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2439 2440 return intel_ring_begin(request, 0); 2441 } 2442 2443 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2444 { 2445 GEM_BUG_ON(ringbuf->reserved_size); 2446 ringbuf->reserved_size = size; 2447 } 2448 2449 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) 2450 { 2451 GEM_BUG_ON(!ringbuf->reserved_size); 2452 ringbuf->reserved_size = 0; 2453 } 2454 2455 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2456 { 2457 GEM_BUG_ON(!ringbuf->reserved_size); 2458 ringbuf->reserved_size = 0; 2459 } 2460 2461 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2462 { 2463 GEM_BUG_ON(ringbuf->reserved_size); 2464 } 2465 2466 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2467 { 2468 struct intel_ringbuffer *ringbuf = req->ringbuf; 2469 struct intel_engine_cs *engine = req->engine; 2470 struct drm_i915_gem_request *target; 2471 2472 intel_ring_update_space(ringbuf); 2473 if (ringbuf->space >= bytes) 2474 return 0; 2475 2476 /* 2477 * Space is reserved in the ringbuffer for finalising the request, 2478 * as that cannot be allowed to fail. During request finalisation, 2479 * reserved_space is set to 0 to stop the overallocation and the 2480 * assumption is that then we never need to wait (which has the 2481 * risk of failing with EINTR). 2482 * 2483 * See also i915_gem_request_alloc() and i915_add_request(). 2484 */ 2485 GEM_BUG_ON(!ringbuf->reserved_size); 2486 2487 list_for_each_entry(target, &engine->request_list, list) { 2488 unsigned space; 2489 2490 /* 2491 * The request queue is per-engine, so can contain requests 2492 * from multiple ringbuffers. Here, we must ignore any that 2493 * aren't from the ringbuffer we're considering. 2494 */ 2495 if (target->ringbuf != ringbuf) 2496 continue; 2497 2498 /* Would completion of this request free enough space? */ 2499 space = __intel_ring_space(target->postfix, ringbuf->tail, 2500 ringbuf->size); 2501 if (space >= bytes) 2502 break; 2503 } 2504 2505 if (WARN_ON(&target->list == &engine->request_list)) 2506 return -ENOSPC; 2507 2508 return i915_wait_request(target); 2509 } 2510 2511 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2512 { 2513 struct intel_ringbuffer *ringbuf = req->ringbuf; 2514 int remain_actual = ringbuf->size - ringbuf->tail; 2515 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2516 int bytes = num_dwords * sizeof(u32); 2517 int total_bytes, wait_bytes; 2518 bool need_wrap = false; 2519 2520 total_bytes = bytes + ringbuf->reserved_size; 2521 2522 if (unlikely(bytes > remain_usable)) { 2523 /* 2524 * Not enough space for the basic request. So need to flush 2525 * out the remainder and then wait for base + reserved. 2526 */ 2527 wait_bytes = remain_actual + total_bytes; 2528 need_wrap = true; 2529 } else if (unlikely(total_bytes > remain_usable)) { 2530 /* 2531 * The base request will fit but the reserved space 2532 * falls off the end. So we don't need an immediate wrap 2533 * and only need to effectively wait for the reserved 2534 * size space from the start of ringbuffer. 2535 */ 2536 wait_bytes = remain_actual + ringbuf->reserved_size; 2537 } else { 2538 /* No wrapping required, just waiting. */ 2539 wait_bytes = total_bytes; 2540 } 2541 2542 if (wait_bytes > ringbuf->space) { 2543 int ret = wait_for_space(req, wait_bytes); 2544 if (unlikely(ret)) 2545 return ret; 2546 2547 intel_ring_update_space(ringbuf); 2548 if (unlikely(ringbuf->space < wait_bytes)) 2549 return -EAGAIN; 2550 } 2551 2552 if (unlikely(need_wrap)) { 2553 GEM_BUG_ON(remain_actual > ringbuf->space); 2554 GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); 2555 2556 /* Fill the tail with MI_NOOP */ 2557 memset(ringbuf->virtual_start + ringbuf->tail, 2558 0, remain_actual); 2559 ringbuf->tail = 0; 2560 ringbuf->space -= remain_actual; 2561 } 2562 2563 ringbuf->space -= bytes; 2564 GEM_BUG_ON(ringbuf->space < 0); 2565 return 0; 2566 } 2567 2568 /* Align the ring tail to a cacheline boundary */ 2569 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2570 { 2571 struct intel_engine_cs *engine = req->engine; 2572 int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2573 int ret; 2574 2575 if (num_dwords == 0) 2576 return 0; 2577 2578 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2579 ret = intel_ring_begin(req, num_dwords); 2580 if (ret) 2581 return ret; 2582 2583 while (num_dwords--) 2584 intel_ring_emit(engine, MI_NOOP); 2585 2586 intel_ring_advance(engine); 2587 2588 return 0; 2589 } 2590 2591 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2592 { 2593 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2594 2595 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2596 * so long as the semaphore value in the register/page is greater 2597 * than the sync value), so whenever we reset the seqno, 2598 * so long as we reset the tracking semaphore value to 0, it will 2599 * always be before the next request's seqno. If we don't reset 2600 * the semaphore value, then when the seqno moves backwards all 2601 * future waits will complete instantly (causing rendering corruption). 2602 */ 2603 if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { 2604 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2605 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2606 if (HAS_VEBOX(dev_priv)) 2607 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 2608 } 2609 if (dev_priv->semaphore_obj) { 2610 struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; 2611 struct vm_page *page = i915_gem_object_get_dirty_page(obj, 0); 2612 char *semaphores = kmap(page); 2613 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 2614 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); 2615 kunmap(page); 2616 } 2617 memset(engine->semaphore.sync_seqno, 0, 2618 sizeof(engine->semaphore.sync_seqno)); 2619 2620 engine->set_seqno(engine, seqno); 2621 engine->last_submitted_seqno = seqno; 2622 2623 engine->hangcheck.seqno = seqno; 2624 } 2625 2626 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2627 u32 value) 2628 { 2629 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2630 2631 /* Every tail move must follow the sequence below */ 2632 2633 /* Disable notification that the ring is IDLE. The GT 2634 * will then assume that it is busy and bring it out of rc6. 2635 */ 2636 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2637 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2638 2639 /* Clear the context id. Here be magic! */ 2640 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2641 2642 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2643 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2644 GEN6_BSD_SLEEP_INDICATOR) == 0, 2645 50)) 2646 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2647 2648 /* Now that the ring is fully powered up, update the tail */ 2649 I915_WRITE_TAIL(engine, value); 2650 POSTING_READ(RING_TAIL(engine->mmio_base)); 2651 2652 /* Let the ring send IDLE messages to the GT again, 2653 * and so let it sleep to conserve power when idle. 2654 */ 2655 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2656 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2657 } 2658 2659 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2660 u32 invalidate, u32 flush) 2661 { 2662 struct intel_engine_cs *engine = req->engine; 2663 uint32_t cmd; 2664 int ret; 2665 2666 ret = intel_ring_begin(req, 4); 2667 if (ret) 2668 return ret; 2669 2670 cmd = MI_FLUSH_DW; 2671 if (INTEL_INFO(engine->dev)->gen >= 8) 2672 cmd += 1; 2673 2674 /* We always require a command barrier so that subsequent 2675 * commands, such as breadcrumb interrupts, are strictly ordered 2676 * wrt the contents of the write cache being flushed to memory 2677 * (and thus being coherent from the CPU). 2678 */ 2679 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2680 2681 /* 2682 * Bspec vol 1c.5 - video engine command streamer: 2683 * "If ENABLED, all TLBs will be invalidated once the flush 2684 * operation is complete. This bit is only valid when the 2685 * Post-Sync Operation field is a value of 1h or 3h." 2686 */ 2687 if (invalidate & I915_GEM_GPU_DOMAINS) 2688 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2689 2690 intel_ring_emit(engine, cmd); 2691 intel_ring_emit(engine, 2692 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2693 if (INTEL_INFO(engine->dev)->gen >= 8) { 2694 intel_ring_emit(engine, 0); /* upper addr */ 2695 intel_ring_emit(engine, 0); /* value */ 2696 } else { 2697 intel_ring_emit(engine, 0); 2698 intel_ring_emit(engine, MI_NOOP); 2699 } 2700 intel_ring_advance(engine); 2701 return 0; 2702 } 2703 2704 static int 2705 gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2706 u64 offset, u32 len, 2707 unsigned dispatch_flags) 2708 { 2709 struct intel_engine_cs *engine = req->engine; 2710 bool ppgtt = USES_PPGTT(engine->dev) && 2711 !(dispatch_flags & I915_DISPATCH_SECURE); 2712 int ret; 2713 2714 ret = intel_ring_begin(req, 4); 2715 if (ret) 2716 return ret; 2717 2718 /* FIXME(BDW): Address space and security selectors. */ 2719 intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2720 (dispatch_flags & I915_DISPATCH_RS ? 2721 MI_BATCH_RESOURCE_STREAMER : 0)); 2722 intel_ring_emit(engine, lower_32_bits(offset)); 2723 intel_ring_emit(engine, upper_32_bits(offset)); 2724 intel_ring_emit(engine, MI_NOOP); 2725 intel_ring_advance(engine); 2726 2727 return 0; 2728 } 2729 2730 static int 2731 hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2732 u64 offset, u32 len, 2733 unsigned dispatch_flags) 2734 { 2735 struct intel_engine_cs *engine = req->engine; 2736 int ret; 2737 2738 ret = intel_ring_begin(req, 2); 2739 if (ret) 2740 return ret; 2741 2742 intel_ring_emit(engine, 2743 MI_BATCH_BUFFER_START | 2744 (dispatch_flags & I915_DISPATCH_SECURE ? 2745 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2746 (dispatch_flags & I915_DISPATCH_RS ? 2747 MI_BATCH_RESOURCE_STREAMER : 0)); 2748 /* bit0-7 is the length on GEN6+ */ 2749 intel_ring_emit(engine, offset); 2750 intel_ring_advance(engine); 2751 2752 return 0; 2753 } 2754 2755 static int 2756 gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2757 u64 offset, u32 len, 2758 unsigned dispatch_flags) 2759 { 2760 struct intel_engine_cs *engine = req->engine; 2761 int ret; 2762 2763 ret = intel_ring_begin(req, 2); 2764 if (ret) 2765 return ret; 2766 2767 intel_ring_emit(engine, 2768 MI_BATCH_BUFFER_START | 2769 (dispatch_flags & I915_DISPATCH_SECURE ? 2770 0 : MI_BATCH_NON_SECURE_I965)); 2771 /* bit0-7 is the length on GEN6+ */ 2772 intel_ring_emit(engine, offset); 2773 intel_ring_advance(engine); 2774 2775 return 0; 2776 } 2777 2778 /* Blitter support (SandyBridge+) */ 2779 2780 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2781 u32 invalidate, u32 flush) 2782 { 2783 struct intel_engine_cs *engine = req->engine; 2784 struct drm_device *dev = engine->dev; 2785 uint32_t cmd; 2786 int ret; 2787 2788 ret = intel_ring_begin(req, 4); 2789 if (ret) 2790 return ret; 2791 2792 cmd = MI_FLUSH_DW; 2793 if (INTEL_INFO(dev)->gen >= 8) 2794 cmd += 1; 2795 2796 /* We always require a command barrier so that subsequent 2797 * commands, such as breadcrumb interrupts, are strictly ordered 2798 * wrt the contents of the write cache being flushed to memory 2799 * (and thus being coherent from the CPU). 2800 */ 2801 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2802 2803 /* 2804 * Bspec vol 1c.3 - blitter engine command streamer: 2805 * "If ENABLED, all TLBs will be invalidated once the flush 2806 * operation is complete. This bit is only valid when the 2807 * Post-Sync Operation field is a value of 1h or 3h." 2808 */ 2809 if (invalidate & I915_GEM_DOMAIN_RENDER) 2810 cmd |= MI_INVALIDATE_TLB; 2811 intel_ring_emit(engine, cmd); 2812 intel_ring_emit(engine, 2813 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2814 if (INTEL_INFO(dev)->gen >= 8) { 2815 intel_ring_emit(engine, 0); /* upper addr */ 2816 intel_ring_emit(engine, 0); /* value */ 2817 } else { 2818 intel_ring_emit(engine, 0); 2819 intel_ring_emit(engine, MI_NOOP); 2820 } 2821 intel_ring_advance(engine); 2822 2823 return 0; 2824 } 2825 2826 int intel_init_render_ring_buffer(struct drm_device *dev) 2827 { 2828 struct drm_i915_private *dev_priv = dev->dev_private; 2829 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 2830 struct drm_i915_gem_object *obj; 2831 int ret; 2832 2833 engine->name = "render ring"; 2834 engine->id = RCS; 2835 engine->exec_id = I915_EXEC_RENDER; 2836 engine->hw_id = 0; 2837 engine->mmio_base = RENDER_RING_BASE; 2838 2839 if (INTEL_INFO(dev)->gen >= 8) { 2840 if (i915_semaphore_is_enabled(dev)) { 2841 obj = i915_gem_alloc_object(dev, 4096); 2842 if (obj == NULL) { 2843 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2844 i915.semaphores = 0; 2845 } else { 2846 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2847 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2848 if (ret != 0) { 2849 drm_gem_object_unreference(&obj->base); 2850 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2851 i915.semaphores = 0; 2852 } else 2853 dev_priv->semaphore_obj = obj; 2854 } 2855 } 2856 2857 engine->init_context = intel_rcs_ctx_init; 2858 engine->add_request = gen6_add_request; 2859 engine->flush = gen8_render_ring_flush; 2860 engine->irq_get = gen8_ring_get_irq; 2861 engine->irq_put = gen8_ring_put_irq; 2862 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2863 engine->irq_seqno_barrier = gen6_seqno_barrier; 2864 engine->get_seqno = ring_get_seqno; 2865 engine->set_seqno = ring_set_seqno; 2866 if (i915_semaphore_is_enabled(dev)) { 2867 WARN_ON(!dev_priv->semaphore_obj); 2868 engine->semaphore.sync_to = gen8_ring_sync; 2869 engine->semaphore.signal = gen8_rcs_signal; 2870 GEN8_RING_SEMAPHORE_INIT(engine); 2871 } 2872 } else if (INTEL_INFO(dev)->gen >= 6) { 2873 engine->init_context = intel_rcs_ctx_init; 2874 engine->add_request = gen6_add_request; 2875 engine->flush = gen7_render_ring_flush; 2876 if (INTEL_INFO(dev)->gen == 6) 2877 engine->flush = gen6_render_ring_flush; 2878 engine->irq_get = gen6_ring_get_irq; 2879 engine->irq_put = gen6_ring_put_irq; 2880 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2881 engine->irq_seqno_barrier = gen6_seqno_barrier; 2882 engine->get_seqno = ring_get_seqno; 2883 engine->set_seqno = ring_set_seqno; 2884 if (i915_semaphore_is_enabled(dev)) { 2885 engine->semaphore.sync_to = gen6_ring_sync; 2886 engine->semaphore.signal = gen6_signal; 2887 /* 2888 * The current semaphore is only applied on pre-gen8 2889 * platform. And there is no VCS2 ring on the pre-gen8 2890 * platform. So the semaphore between RCS and VCS2 is 2891 * initialized as INVALID. Gen8 will initialize the 2892 * sema between VCS2 and RCS later. 2893 */ 2894 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2895 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2896 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2897 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2898 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2899 engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2900 engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2901 engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2902 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2903 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2904 } 2905 } else if (IS_GEN5(dev)) { 2906 engine->add_request = pc_render_add_request; 2907 engine->flush = gen4_render_ring_flush; 2908 engine->get_seqno = pc_render_get_seqno; 2909 engine->set_seqno = pc_render_set_seqno; 2910 engine->irq_get = gen5_ring_get_irq; 2911 engine->irq_put = gen5_ring_put_irq; 2912 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2913 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2914 } else { 2915 engine->add_request = i9xx_add_request; 2916 if (INTEL_INFO(dev)->gen < 4) 2917 engine->flush = gen2_render_ring_flush; 2918 else 2919 engine->flush = gen4_render_ring_flush; 2920 engine->get_seqno = ring_get_seqno; 2921 engine->set_seqno = ring_set_seqno; 2922 if (IS_GEN2(dev)) { 2923 engine->irq_get = i8xx_ring_get_irq; 2924 engine->irq_put = i8xx_ring_put_irq; 2925 } else { 2926 engine->irq_get = i9xx_ring_get_irq; 2927 engine->irq_put = i9xx_ring_put_irq; 2928 } 2929 engine->irq_enable_mask = I915_USER_INTERRUPT; 2930 } 2931 engine->write_tail = ring_write_tail; 2932 2933 if (IS_HASWELL(dev)) 2934 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2935 else if (IS_GEN8(dev)) 2936 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2937 else if (INTEL_INFO(dev)->gen >= 6) 2938 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2939 else if (INTEL_INFO(dev)->gen >= 4) 2940 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 2941 else if (IS_I830(dev) || IS_845G(dev)) 2942 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 2943 else 2944 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 2945 engine->init_hw = init_render_ring; 2946 engine->cleanup = render_ring_cleanup; 2947 2948 /* Workaround batchbuffer to combat CS tlb bug. */ 2949 if (HAS_BROKEN_CS_TLB(dev)) { 2950 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2951 if (obj == NULL) { 2952 DRM_ERROR("Failed to allocate batch bo\n"); 2953 return -ENOMEM; 2954 } 2955 2956 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2957 if (ret != 0) { 2958 drm_gem_object_unreference(&obj->base); 2959 DRM_ERROR("Failed to ping batch bo\n"); 2960 return ret; 2961 } 2962 2963 engine->scratch.obj = obj; 2964 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2965 } 2966 2967 ret = intel_init_ring_buffer(dev, engine); 2968 if (ret) 2969 return ret; 2970 2971 if (INTEL_INFO(dev)->gen >= 5) { 2972 ret = intel_init_pipe_control(engine); 2973 if (ret) 2974 return ret; 2975 } 2976 2977 return 0; 2978 } 2979 2980 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2981 { 2982 struct drm_i915_private *dev_priv = dev->dev_private; 2983 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2984 2985 engine->name = "bsd ring"; 2986 engine->id = VCS; 2987 engine->exec_id = I915_EXEC_BSD; 2988 engine->hw_id = 1; 2989 2990 engine->write_tail = ring_write_tail; 2991 if (INTEL_INFO(dev)->gen >= 6) { 2992 engine->mmio_base = GEN6_BSD_RING_BASE; 2993 /* gen6 bsd needs a special wa for tail updates */ 2994 if (IS_GEN6(dev)) 2995 engine->write_tail = gen6_bsd_ring_write_tail; 2996 engine->flush = gen6_bsd_ring_flush; 2997 engine->add_request = gen6_add_request; 2998 engine->irq_seqno_barrier = gen6_seqno_barrier; 2999 engine->get_seqno = ring_get_seqno; 3000 engine->set_seqno = ring_set_seqno; 3001 if (INTEL_INFO(dev)->gen >= 8) { 3002 engine->irq_enable_mask = 3003 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 3004 engine->irq_get = gen8_ring_get_irq; 3005 engine->irq_put = gen8_ring_put_irq; 3006 engine->dispatch_execbuffer = 3007 gen8_ring_dispatch_execbuffer; 3008 if (i915_semaphore_is_enabled(dev)) { 3009 engine->semaphore.sync_to = gen8_ring_sync; 3010 engine->semaphore.signal = gen8_xcs_signal; 3011 GEN8_RING_SEMAPHORE_INIT(engine); 3012 } 3013 } else { 3014 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 3015 engine->irq_get = gen6_ring_get_irq; 3016 engine->irq_put = gen6_ring_put_irq; 3017 engine->dispatch_execbuffer = 3018 gen6_ring_dispatch_execbuffer; 3019 if (i915_semaphore_is_enabled(dev)) { 3020 engine->semaphore.sync_to = gen6_ring_sync; 3021 engine->semaphore.signal = gen6_signal; 3022 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 3023 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 3024 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 3025 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 3026 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 3027 engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 3028 engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 3029 engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 3030 engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 3031 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 3032 } 3033 } 3034 } else { 3035 engine->mmio_base = BSD_RING_BASE; 3036 engine->flush = bsd_ring_flush; 3037 engine->add_request = i9xx_add_request; 3038 engine->get_seqno = ring_get_seqno; 3039 engine->set_seqno = ring_set_seqno; 3040 if (IS_GEN5(dev)) { 3041 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 3042 engine->irq_get = gen5_ring_get_irq; 3043 engine->irq_put = gen5_ring_put_irq; 3044 } else { 3045 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 3046 engine->irq_get = i9xx_ring_get_irq; 3047 engine->irq_put = i9xx_ring_put_irq; 3048 } 3049 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 3050 } 3051 engine->init_hw = init_ring_common; 3052 3053 return intel_init_ring_buffer(dev, engine); 3054 } 3055 3056 /** 3057 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 3058 */ 3059 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 3060 { 3061 struct drm_i915_private *dev_priv = dev->dev_private; 3062 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 3063 3064 engine->name = "bsd2 ring"; 3065 engine->id = VCS2; 3066 engine->exec_id = I915_EXEC_BSD; 3067 engine->hw_id = 4; 3068 3069 engine->write_tail = ring_write_tail; 3070 engine->mmio_base = GEN8_BSD2_RING_BASE; 3071 engine->flush = gen6_bsd_ring_flush; 3072 engine->add_request = gen6_add_request; 3073 engine->irq_seqno_barrier = gen6_seqno_barrier; 3074 engine->get_seqno = ring_get_seqno; 3075 engine->set_seqno = ring_set_seqno; 3076 engine->irq_enable_mask = 3077 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 3078 engine->irq_get = gen8_ring_get_irq; 3079 engine->irq_put = gen8_ring_put_irq; 3080 engine->dispatch_execbuffer = 3081 gen8_ring_dispatch_execbuffer; 3082 if (i915_semaphore_is_enabled(dev)) { 3083 engine->semaphore.sync_to = gen8_ring_sync; 3084 engine->semaphore.signal = gen8_xcs_signal; 3085 GEN8_RING_SEMAPHORE_INIT(engine); 3086 } 3087 engine->init_hw = init_ring_common; 3088 3089 return intel_init_ring_buffer(dev, engine); 3090 } 3091 3092 int intel_init_blt_ring_buffer(struct drm_device *dev) 3093 { 3094 struct drm_i915_private *dev_priv = dev->dev_private; 3095 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 3096 3097 engine->name = "blitter ring"; 3098 engine->id = BCS; 3099 engine->exec_id = I915_EXEC_BLT; 3100 engine->hw_id = 2; 3101 3102 engine->mmio_base = BLT_RING_BASE; 3103 engine->write_tail = ring_write_tail; 3104 engine->flush = gen6_ring_flush; 3105 engine->add_request = gen6_add_request; 3106 engine->irq_seqno_barrier = gen6_seqno_barrier; 3107 engine->get_seqno = ring_get_seqno; 3108 engine->set_seqno = ring_set_seqno; 3109 if (INTEL_INFO(dev)->gen >= 8) { 3110 engine->irq_enable_mask = 3111 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 3112 engine->irq_get = gen8_ring_get_irq; 3113 engine->irq_put = gen8_ring_put_irq; 3114 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3115 if (i915_semaphore_is_enabled(dev)) { 3116 engine->semaphore.sync_to = gen8_ring_sync; 3117 engine->semaphore.signal = gen8_xcs_signal; 3118 GEN8_RING_SEMAPHORE_INIT(engine); 3119 } 3120 } else { 3121 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 3122 engine->irq_get = gen6_ring_get_irq; 3123 engine->irq_put = gen6_ring_put_irq; 3124 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3125 if (i915_semaphore_is_enabled(dev)) { 3126 engine->semaphore.signal = gen6_signal; 3127 engine->semaphore.sync_to = gen6_ring_sync; 3128 /* 3129 * The current semaphore is only applied on pre-gen8 3130 * platform. And there is no VCS2 ring on the pre-gen8 3131 * platform. So the semaphore between BCS and VCS2 is 3132 * initialized as INVALID. Gen8 will initialize the 3133 * sema between BCS and VCS2 later. 3134 */ 3135 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 3136 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 3137 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 3138 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 3139 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 3140 engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 3141 engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 3142 engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 3143 engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 3144 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 3145 } 3146 } 3147 engine->init_hw = init_ring_common; 3148 3149 return intel_init_ring_buffer(dev, engine); 3150 } 3151 3152 int intel_init_vebox_ring_buffer(struct drm_device *dev) 3153 { 3154 struct drm_i915_private *dev_priv = dev->dev_private; 3155 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 3156 3157 engine->name = "video enhancement ring"; 3158 engine->id = VECS; 3159 engine->exec_id = I915_EXEC_VEBOX; 3160 engine->hw_id = 3; 3161 3162 engine->mmio_base = VEBOX_RING_BASE; 3163 engine->write_tail = ring_write_tail; 3164 engine->flush = gen6_ring_flush; 3165 engine->add_request = gen6_add_request; 3166 engine->irq_seqno_barrier = gen6_seqno_barrier; 3167 engine->get_seqno = ring_get_seqno; 3168 engine->set_seqno = ring_set_seqno; 3169 3170 if (INTEL_INFO(dev)->gen >= 8) { 3171 engine->irq_enable_mask = 3172 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3173 engine->irq_get = gen8_ring_get_irq; 3174 engine->irq_put = gen8_ring_put_irq; 3175 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3176 if (i915_semaphore_is_enabled(dev)) { 3177 engine->semaphore.sync_to = gen8_ring_sync; 3178 engine->semaphore.signal = gen8_xcs_signal; 3179 GEN8_RING_SEMAPHORE_INIT(engine); 3180 } 3181 } else { 3182 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3183 engine->irq_get = hsw_vebox_get_irq; 3184 engine->irq_put = hsw_vebox_put_irq; 3185 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3186 if (i915_semaphore_is_enabled(dev)) { 3187 engine->semaphore.sync_to = gen6_ring_sync; 3188 engine->semaphore.signal = gen6_signal; 3189 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 3190 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 3191 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 3192 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 3193 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 3194 engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 3195 engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 3196 engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 3197 engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 3198 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 3199 } 3200 } 3201 engine->init_hw = init_ring_common; 3202 3203 return intel_init_ring_buffer(dev, engine); 3204 } 3205 3206 int 3207 intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 3208 { 3209 struct intel_engine_cs *engine = req->engine; 3210 int ret; 3211 3212 if (!engine->gpu_caches_dirty) 3213 return 0; 3214 3215 ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); 3216 if (ret) 3217 return ret; 3218 3219 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); 3220 3221 engine->gpu_caches_dirty = false; 3222 return 0; 3223 } 3224 3225 int 3226 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 3227 { 3228 struct intel_engine_cs *engine = req->engine; 3229 uint32_t flush_domains; 3230 int ret; 3231 3232 flush_domains = 0; 3233 if (engine->gpu_caches_dirty) 3234 flush_domains = I915_GEM_GPU_DOMAINS; 3235 3236 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3237 if (ret) 3238 return ret; 3239 3240 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3241 3242 engine->gpu_caches_dirty = false; 3243 return 0; 3244 } 3245 3246 void 3247 intel_stop_engine(struct intel_engine_cs *engine) 3248 { 3249 int ret; 3250 3251 if (!intel_engine_initialized(engine)) 3252 return; 3253 3254 ret = intel_engine_idle(engine); 3255 if (ret) 3256 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 3257 engine->name, ret); 3258 3259 stop_ring(engine); 3260 } 3261