1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include "i915_drv.h" 32 #include <drm/i915_drm.h> 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 bool 37 intel_ring_initialized(struct intel_engine_cs *ring) 38 { 39 struct drm_device *dev = ring->dev; 40 41 if (!dev) 42 return false; 43 44 if (i915.enable_execlists) { 45 struct intel_context *dctx = ring->default_context; 46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; 47 48 return ringbuf->obj; 49 } else 50 return ring->buffer && ring->buffer->obj; 51 } 52 53 int __intel_ring_space(int head, int tail, int size) 54 { 55 int space = head - (tail + I915_RING_FREE_SPACE); 56 if (space < 0) 57 space += size; 58 return space; 59 } 60 61 int intel_ring_space(struct intel_ringbuffer *ringbuf) 62 { 63 return __intel_ring_space(ringbuf->head & HEAD_ADDR, 64 ringbuf->tail, ringbuf->size); 65 } 66 67 bool intel_ring_stopped(struct intel_engine_cs *ring) 68 { 69 struct drm_i915_private *dev_priv = ring->dev->dev_private; 70 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 71 } 72 73 void __intel_ring_advance(struct intel_engine_cs *ring) 74 { 75 struct intel_ringbuffer *ringbuf = ring->buffer; 76 ringbuf->tail &= ringbuf->size - 1; 77 if (intel_ring_stopped(ring)) 78 return; 79 ring->write_tail(ring, ringbuf->tail); 80 } 81 82 static int 83 gen2_render_ring_flush(struct intel_engine_cs *ring, 84 u32 invalidate_domains, 85 u32 flush_domains) 86 { 87 u32 cmd; 88 int ret; 89 90 cmd = MI_FLUSH; 91 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 92 cmd |= MI_NO_WRITE_FLUSH; 93 94 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 95 cmd |= MI_READ_FLUSH; 96 97 ret = intel_ring_begin(ring, 2); 98 if (ret) 99 return ret; 100 101 intel_ring_emit(ring, cmd); 102 intel_ring_emit(ring, MI_NOOP); 103 intel_ring_advance(ring); 104 105 return 0; 106 } 107 108 static int 109 gen4_render_ring_flush(struct intel_engine_cs *ring, 110 u32 invalidate_domains, 111 u32 flush_domains) 112 { 113 struct drm_device *dev = ring->dev; 114 u32 cmd; 115 int ret; 116 117 /* 118 * read/write caches: 119 * 120 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 121 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 122 * also flushed at 2d versus 3d pipeline switches. 123 * 124 * read-only caches: 125 * 126 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 127 * MI_READ_FLUSH is set, and is always flushed on 965. 128 * 129 * I915_GEM_DOMAIN_COMMAND may not exist? 130 * 131 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 132 * invalidated when MI_EXE_FLUSH is set. 133 * 134 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 135 * invalidated with every MI_FLUSH. 136 * 137 * TLBs: 138 * 139 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 140 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 141 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 142 * are flushed at any MI_FLUSH. 143 */ 144 145 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 146 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 147 cmd &= ~MI_NO_WRITE_FLUSH; 148 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 149 cmd |= MI_EXE_FLUSH; 150 151 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 152 (IS_G4X(dev) || IS_GEN5(dev))) 153 cmd |= MI_INVALIDATE_ISP; 154 155 ret = intel_ring_begin(ring, 2); 156 if (ret) 157 return ret; 158 159 intel_ring_emit(ring, cmd); 160 intel_ring_emit(ring, MI_NOOP); 161 intel_ring_advance(ring); 162 163 return 0; 164 } 165 166 /** 167 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 168 * implementing two workarounds on gen6. From section 1.4.7.1 169 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 170 * 171 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 172 * produced by non-pipelined state commands), software needs to first 173 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 174 * 0. 175 * 176 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 177 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 178 * 179 * And the workaround for these two requires this workaround first: 180 * 181 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 182 * BEFORE the pipe-control with a post-sync op and no write-cache 183 * flushes. 184 * 185 * And this last workaround is tricky because of the requirements on 186 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 187 * volume 2 part 1: 188 * 189 * "1 of the following must also be set: 190 * - Render Target Cache Flush Enable ([12] of DW1) 191 * - Depth Cache Flush Enable ([0] of DW1) 192 * - Stall at Pixel Scoreboard ([1] of DW1) 193 * - Depth Stall ([13] of DW1) 194 * - Post-Sync Operation ([13] of DW1) 195 * - Notify Enable ([8] of DW1)" 196 * 197 * The cache flushes require the workaround flush that triggered this 198 * one, so we can't use it. Depth stall would trigger the same. 199 * Post-sync nonzero is what triggered this second workaround, so we 200 * can't use that one either. Notify enable is IRQs, which aren't 201 * really our business. That leaves only stall at scoreboard. 202 */ 203 static int 204 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 205 { 206 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 207 int ret; 208 209 210 ret = intel_ring_begin(ring, 6); 211 if (ret) 212 return ret; 213 214 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 215 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 216 PIPE_CONTROL_STALL_AT_SCOREBOARD); 217 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 218 intel_ring_emit(ring, 0); /* low dword */ 219 intel_ring_emit(ring, 0); /* high dword */ 220 intel_ring_emit(ring, MI_NOOP); 221 intel_ring_advance(ring); 222 223 ret = intel_ring_begin(ring, 6); 224 if (ret) 225 return ret; 226 227 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 228 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 229 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 230 intel_ring_emit(ring, 0); 231 intel_ring_emit(ring, 0); 232 intel_ring_emit(ring, MI_NOOP); 233 intel_ring_advance(ring); 234 235 return 0; 236 } 237 238 static int 239 gen6_render_ring_flush(struct intel_engine_cs *ring, 240 u32 invalidate_domains, u32 flush_domains) 241 { 242 u32 flags = 0; 243 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 244 int ret; 245 246 /* Force SNB workarounds for PIPE_CONTROL flushes */ 247 ret = intel_emit_post_sync_nonzero_flush(ring); 248 if (ret) 249 return ret; 250 251 /* Just flush everything. Experiments have shown that reducing the 252 * number of bits based on the write domains has little performance 253 * impact. 254 */ 255 if (flush_domains) { 256 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 257 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 258 /* 259 * Ensure that any following seqno writes only happen 260 * when the render cache is indeed flushed. 261 */ 262 flags |= PIPE_CONTROL_CS_STALL; 263 } 264 if (invalidate_domains) { 265 flags |= PIPE_CONTROL_TLB_INVALIDATE; 266 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 267 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 268 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 269 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 270 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 271 /* 272 * TLB invalidate requires a post-sync write. 273 */ 274 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 275 } 276 277 ret = intel_ring_begin(ring, 4); 278 if (ret) 279 return ret; 280 281 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 282 intel_ring_emit(ring, flags); 283 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 284 intel_ring_emit(ring, 0); 285 intel_ring_advance(ring); 286 287 return 0; 288 } 289 290 static int 291 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 292 { 293 int ret; 294 295 ret = intel_ring_begin(ring, 4); 296 if (ret) 297 return ret; 298 299 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 300 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 301 PIPE_CONTROL_STALL_AT_SCOREBOARD); 302 intel_ring_emit(ring, 0); 303 intel_ring_emit(ring, 0); 304 intel_ring_advance(ring); 305 306 return 0; 307 } 308 309 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 310 { 311 int ret; 312 313 if (!ring->fbc_dirty) 314 return 0; 315 316 ret = intel_ring_begin(ring, 6); 317 if (ret) 318 return ret; 319 /* WaFbcNukeOn3DBlt:ivb/hsw */ 320 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 321 intel_ring_emit(ring, MSG_FBC_REND_STATE); 322 intel_ring_emit(ring, value); 323 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 324 intel_ring_emit(ring, MSG_FBC_REND_STATE); 325 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 326 intel_ring_advance(ring); 327 328 ring->fbc_dirty = false; 329 return 0; 330 } 331 332 static int 333 gen7_render_ring_flush(struct intel_engine_cs *ring, 334 u32 invalidate_domains, u32 flush_domains) 335 { 336 u32 flags = 0; 337 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 338 int ret; 339 340 /* 341 * Ensure that any following seqno writes only happen when the render 342 * cache is indeed flushed. 343 * 344 * Workaround: 4th PIPE_CONTROL command (except the ones with only 345 * read-cache invalidate bits set) must have the CS_STALL bit set. We 346 * don't try to be clever and just set it unconditionally. 347 */ 348 flags |= PIPE_CONTROL_CS_STALL; 349 350 /* Just flush everything. Experiments have shown that reducing the 351 * number of bits based on the write domains has little performance 352 * impact. 353 */ 354 if (flush_domains) { 355 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 356 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 357 } 358 if (invalidate_domains) { 359 flags |= PIPE_CONTROL_TLB_INVALIDATE; 360 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 361 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 365 /* 366 * TLB invalidate requires a post-sync write. 367 */ 368 flags |= PIPE_CONTROL_QW_WRITE; 369 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 370 371 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 372 373 /* Workaround: we must issue a pipe_control with CS-stall bit 374 * set before a pipe_control command that has the state cache 375 * invalidate bit set. */ 376 gen7_render_ring_cs_stall_wa(ring); 377 } 378 379 ret = intel_ring_begin(ring, 4); 380 if (ret) 381 return ret; 382 383 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 384 intel_ring_emit(ring, flags); 385 intel_ring_emit(ring, scratch_addr); 386 intel_ring_emit(ring, 0); 387 intel_ring_advance(ring); 388 389 if (!invalidate_domains && flush_domains) 390 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 391 392 return 0; 393 } 394 395 static int 396 gen8_emit_pipe_control(struct intel_engine_cs *ring, 397 u32 flags, u32 scratch_addr) 398 { 399 int ret; 400 401 ret = intel_ring_begin(ring, 6); 402 if (ret) 403 return ret; 404 405 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 406 intel_ring_emit(ring, flags); 407 intel_ring_emit(ring, scratch_addr); 408 intel_ring_emit(ring, 0); 409 intel_ring_emit(ring, 0); 410 intel_ring_emit(ring, 0); 411 intel_ring_advance(ring); 412 413 return 0; 414 } 415 416 static int 417 gen8_render_ring_flush(struct intel_engine_cs *ring, 418 u32 invalidate_domains, u32 flush_domains) 419 { 420 u32 flags = 0; 421 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 422 int ret; 423 424 flags |= PIPE_CONTROL_CS_STALL; 425 426 if (flush_domains) { 427 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 428 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 429 } 430 if (invalidate_domains) { 431 flags |= PIPE_CONTROL_TLB_INVALIDATE; 432 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 433 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 434 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 435 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 436 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 437 flags |= PIPE_CONTROL_QW_WRITE; 438 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 439 440 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 441 ret = gen8_emit_pipe_control(ring, 442 PIPE_CONTROL_CS_STALL | 443 PIPE_CONTROL_STALL_AT_SCOREBOARD, 444 0); 445 if (ret) 446 return ret; 447 } 448 449 ret = gen8_emit_pipe_control(ring, flags, scratch_addr); 450 if (ret) 451 return ret; 452 453 if (!invalidate_domains && flush_domains) 454 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 455 456 return 0; 457 } 458 459 static void ring_write_tail(struct intel_engine_cs *ring, 460 u32 value) 461 { 462 struct drm_i915_private *dev_priv = ring->dev->dev_private; 463 I915_WRITE_TAIL(ring, value); 464 } 465 466 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 467 { 468 struct drm_i915_private *dev_priv = ring->dev->dev_private; 469 u64 acthd; 470 471 if (INTEL_INFO(ring->dev)->gen >= 8) 472 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 473 RING_ACTHD_UDW(ring->mmio_base)); 474 else if (INTEL_INFO(ring->dev)->gen >= 4) 475 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 476 else 477 acthd = I915_READ(ACTHD); 478 479 return acthd; 480 } 481 482 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 483 { 484 struct drm_i915_private *dev_priv = ring->dev->dev_private; 485 u32 addr; 486 487 addr = dev_priv->status_page_dmah->busaddr; 488 if (INTEL_INFO(ring->dev)->gen >= 4) 489 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 490 I915_WRITE(HWS_PGA, addr); 491 } 492 493 static bool stop_ring(struct intel_engine_cs *ring) 494 { 495 struct drm_i915_private *dev_priv = to_i915(ring->dev); 496 497 if (!IS_GEN2(ring->dev)) { 498 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 499 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 500 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); 501 /* Sometimes we observe that the idle flag is not 502 * set even though the ring is empty. So double 503 * check before giving up. 504 */ 505 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) 506 return false; 507 } 508 } 509 510 I915_WRITE_CTL(ring, 0); 511 I915_WRITE_HEAD(ring, 0); 512 ring->write_tail(ring, 0); 513 514 if (!IS_GEN2(ring->dev)) { 515 (void)I915_READ_CTL(ring); 516 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 517 } 518 519 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 520 } 521 522 static int init_ring_common(struct intel_engine_cs *ring) 523 { 524 struct drm_device *dev = ring->dev; 525 struct drm_i915_private *dev_priv = dev->dev_private; 526 struct intel_ringbuffer *ringbuf = ring->buffer; 527 struct drm_i915_gem_object *obj = ringbuf->obj; 528 int ret = 0; 529 530 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 531 532 if (!stop_ring(ring)) { 533 /* G45 ring initialization often fails to reset head to zero */ 534 DRM_DEBUG_KMS("%s head not reset to zero " 535 "ctl %08x head %08x tail %08x start %08x\n", 536 ring->name, 537 I915_READ_CTL(ring), 538 I915_READ_HEAD(ring), 539 I915_READ_TAIL(ring), 540 I915_READ_START(ring)); 541 542 if (!stop_ring(ring)) { 543 DRM_ERROR("failed to set %s head to zero " 544 "ctl %08x head %08x tail %08x start %08x\n", 545 ring->name, 546 I915_READ_CTL(ring), 547 I915_READ_HEAD(ring), 548 I915_READ_TAIL(ring), 549 I915_READ_START(ring)); 550 ret = -EIO; 551 goto out; 552 } 553 } 554 555 if (I915_NEED_GFX_HWS(dev)) 556 intel_ring_setup_status_page(ring); 557 else 558 ring_setup_phys_status_page(ring); 559 560 /* Enforce ordering by reading HEAD register back */ 561 I915_READ_HEAD(ring); 562 563 /* Initialize the ring. This must happen _after_ we've cleared the ring 564 * registers with the above sequence (the readback of the HEAD registers 565 * also enforces ordering), otherwise the hw might lose the new ring 566 * register values. */ 567 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 568 569 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 570 if (I915_READ_HEAD(ring)) 571 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 572 ring->name, I915_READ_HEAD(ring)); 573 I915_WRITE_HEAD(ring, 0); 574 (void)I915_READ_HEAD(ring); 575 576 I915_WRITE_CTL(ring, 577 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 578 | RING_VALID); 579 580 /* If the head is still not zero, the ring is dead */ 581 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 582 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 583 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 584 DRM_ERROR("%s initialization failed " 585 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 586 ring->name, 587 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 588 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 589 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 590 ret = -EIO; 591 goto out; 592 } 593 594 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 595 i915_kernel_lost_context(ring->dev); 596 else { 597 ringbuf->head = I915_READ_HEAD(ring); 598 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 599 ringbuf->space = intel_ring_space(ringbuf); 600 ringbuf->last_retired_head = -1; 601 } 602 603 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 604 605 out: 606 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 607 608 return ret; 609 } 610 611 void 612 intel_fini_pipe_control(struct intel_engine_cs *ring) 613 { 614 struct drm_device *dev = ring->dev; 615 616 if (ring->scratch.obj == NULL) 617 return; 618 619 if (INTEL_INFO(dev)->gen >= 5) { 620 kunmap(ring->scratch.obj->pages[0]); 621 i915_gem_object_ggtt_unpin(ring->scratch.obj); 622 } 623 624 drm_gem_object_unreference(&ring->scratch.obj->base); 625 ring->scratch.obj = NULL; 626 } 627 628 int 629 intel_init_pipe_control(struct intel_engine_cs *ring) 630 { 631 int ret; 632 633 if (ring->scratch.obj) 634 return 0; 635 636 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 637 if (ring->scratch.obj == NULL) { 638 DRM_ERROR("Failed to allocate seqno page\n"); 639 ret = -ENOMEM; 640 goto err; 641 } 642 643 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 644 if (ret) 645 goto err_unref; 646 647 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 648 if (ret) 649 goto err_unref; 650 651 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 652 ring->scratch.cpu_page = kmap(ring->scratch.obj->pages[0]); 653 if (ring->scratch.cpu_page == NULL) { 654 ret = -ENOMEM; 655 goto err_unpin; 656 } 657 658 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 659 ring->name, ring->scratch.gtt_offset); 660 return 0; 661 662 err_unpin: 663 i915_gem_object_ggtt_unpin(ring->scratch.obj); 664 err_unref: 665 drm_gem_object_unreference(&ring->scratch.obj->base); 666 err: 667 return ret; 668 } 669 670 static inline void intel_ring_emit_wa(struct intel_engine_cs *ring, 671 u32 addr, u32 value) 672 { 673 struct drm_device *dev = ring->dev; 674 struct drm_i915_private *dev_priv = dev->dev_private; 675 676 if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS)) 677 return; 678 679 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 680 intel_ring_emit(ring, addr); 681 intel_ring_emit(ring, value); 682 683 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr; 684 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF; 685 /* value is updated with the status of remaining bits of this 686 * register when it is read from debugfs file 687 */ 688 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value; 689 dev_priv->num_wa_regs++; 690 691 return; 692 } 693 694 static int bdw_init_workarounds(struct intel_engine_cs *ring) 695 { 696 int ret; 697 struct drm_device *dev = ring->dev; 698 struct drm_i915_private *dev_priv = dev->dev_private; 699 700 /* 701 * workarounds applied in this fn are part of register state context, 702 * they need to be re-initialized followed by gpu reset, suspend/resume, 703 * module reload. 704 */ 705 dev_priv->num_wa_regs = 0; 706 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); 707 708 /* 709 * update the number of dwords required based on the 710 * actual number of workarounds applied 711 */ 712 ret = intel_ring_begin(ring, 18); 713 if (ret) 714 return ret; 715 716 /* WaDisablePartialInstShootdown:bdw */ 717 /* WaDisableThreadStallDopClockGating:bdw */ 718 /* FIXME: Unclear whether we really need this on production bdw. */ 719 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 720 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE 721 | STALL_DOP_GATING_DISABLE)); 722 723 /* WaDisableDopClockGating:bdw May not be needed for production */ 724 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 725 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 726 727 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 728 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 729 730 /* Use Force Non-Coherent whenever executing a 3D context. This is a 731 * workaround for for a possible hang in the unlikely event a TLB 732 * invalidation occurs during a PSD flush. 733 */ 734 intel_ring_emit_wa(ring, HDC_CHICKEN0, 735 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); 736 737 /* Wa4x4STCOptimizationDisable:bdw */ 738 intel_ring_emit_wa(ring, CACHE_MODE_1, 739 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 740 741 /* 742 * BSpec recommends 8x4 when MSAA is used, 743 * however in practice 16x4 seems fastest. 744 * 745 * Note that PS/WM thread counts depend on the WIZ hashing 746 * disable bit, which we don't touch here, but it's good 747 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 748 */ 749 intel_ring_emit_wa(ring, GEN7_GT_MODE, 750 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 751 752 intel_ring_advance(ring); 753 754 DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n", 755 dev_priv->num_wa_regs); 756 757 return 0; 758 } 759 760 static int chv_init_workarounds(struct intel_engine_cs *ring) 761 { 762 int ret; 763 struct drm_device *dev = ring->dev; 764 struct drm_i915_private *dev_priv = dev->dev_private; 765 766 /* 767 * workarounds applied in this fn are part of register state context, 768 * they need to be re-initialized followed by gpu reset, suspend/resume, 769 * module reload. 770 */ 771 dev_priv->num_wa_regs = 0; 772 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); 773 774 ret = intel_ring_begin(ring, 12); 775 if (ret) 776 return ret; 777 778 /* WaDisablePartialInstShootdown:chv */ 779 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 780 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 781 782 /* WaDisableThreadStallDopClockGating:chv */ 783 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 784 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 785 786 /* WaDisableDopClockGating:chv (pre-production hw) */ 787 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 788 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 789 790 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 791 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 792 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 793 794 intel_ring_advance(ring); 795 796 return 0; 797 } 798 799 static int init_render_ring(struct intel_engine_cs *ring) 800 { 801 struct drm_device *dev = ring->dev; 802 struct drm_i915_private *dev_priv = dev->dev_private; 803 int ret = init_ring_common(ring); 804 if (ret) 805 return ret; 806 807 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 808 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 809 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 810 811 /* We need to disable the AsyncFlip performance optimisations in order 812 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 813 * programmed to '1' on all products. 814 * 815 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 816 */ 817 if (INTEL_INFO(dev)->gen >= 6) 818 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 819 820 /* Required for the hardware to program scanline values for waiting */ 821 /* WaEnableFlushTlbInvalidationMode:snb */ 822 if (INTEL_INFO(dev)->gen == 6) 823 I915_WRITE(GFX_MODE, 824 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 825 826 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 827 if (IS_GEN7(dev)) 828 I915_WRITE(GFX_MODE_GEN7, 829 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 830 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 831 832 if (INTEL_INFO(dev)->gen >= 5) { 833 ret = intel_init_pipe_control(ring); 834 if (ret) 835 return ret; 836 } 837 838 if (IS_GEN6(dev)) { 839 /* From the Sandybridge PRM, volume 1 part 3, page 24: 840 * "If this bit is set, STCunit will have LRA as replacement 841 * policy. [...] This bit must be reset. LRA replacement 842 * policy is not supported." 843 */ 844 I915_WRITE(CACHE_MODE_0, 845 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 846 } 847 848 if (INTEL_INFO(dev)->gen >= 6) 849 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 850 851 if (HAS_L3_DPF(dev)) 852 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 853 854 return ret; 855 } 856 857 static void render_ring_cleanup(struct intel_engine_cs *ring) 858 { 859 struct drm_device *dev = ring->dev; 860 struct drm_i915_private *dev_priv = dev->dev_private; 861 862 if (dev_priv->semaphore_obj) { 863 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 864 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 865 dev_priv->semaphore_obj = NULL; 866 } 867 868 intel_fini_pipe_control(ring); 869 } 870 871 static int gen8_rcs_signal(struct intel_engine_cs *signaller, 872 unsigned int num_dwords) 873 { 874 #define MBOX_UPDATE_DWORDS 8 875 struct drm_device *dev = signaller->dev; 876 struct drm_i915_private *dev_priv = dev->dev_private; 877 struct intel_engine_cs *waiter; 878 int i, ret, num_rings; 879 880 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 881 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 882 #undef MBOX_UPDATE_DWORDS 883 884 ret = intel_ring_begin(signaller, num_dwords); 885 if (ret) 886 return ret; 887 888 for_each_ring(waiter, dev_priv, i) { 889 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 890 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 891 continue; 892 893 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 894 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 895 PIPE_CONTROL_QW_WRITE | 896 PIPE_CONTROL_FLUSH_ENABLE); 897 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 898 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 899 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 900 intel_ring_emit(signaller, 0); 901 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 902 MI_SEMAPHORE_TARGET(waiter->id)); 903 intel_ring_emit(signaller, 0); 904 } 905 906 return 0; 907 } 908 909 static int gen8_xcs_signal(struct intel_engine_cs *signaller, 910 unsigned int num_dwords) 911 { 912 #define MBOX_UPDATE_DWORDS 6 913 struct drm_device *dev = signaller->dev; 914 struct drm_i915_private *dev_priv = dev->dev_private; 915 struct intel_engine_cs *waiter; 916 int i, ret, num_rings; 917 918 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 919 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 920 #undef MBOX_UPDATE_DWORDS 921 922 ret = intel_ring_begin(signaller, num_dwords); 923 if (ret) 924 return ret; 925 926 for_each_ring(waiter, dev_priv, i) { 927 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 928 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 929 continue; 930 931 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 932 MI_FLUSH_DW_OP_STOREDW); 933 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 934 MI_FLUSH_DW_USE_GTT); 935 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 936 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 937 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 938 MI_SEMAPHORE_TARGET(waiter->id)); 939 intel_ring_emit(signaller, 0); 940 } 941 942 return 0; 943 } 944 945 static int gen6_signal(struct intel_engine_cs *signaller, 946 unsigned int num_dwords) 947 { 948 struct drm_device *dev = signaller->dev; 949 struct drm_i915_private *dev_priv = dev->dev_private; 950 struct intel_engine_cs *useless; 951 int i, ret, num_rings; 952 953 #define MBOX_UPDATE_DWORDS 3 954 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 955 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 956 #undef MBOX_UPDATE_DWORDS 957 958 ret = intel_ring_begin(signaller, num_dwords); 959 if (ret) 960 return ret; 961 962 for_each_ring(useless, dev_priv, i) { 963 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 964 if (mbox_reg != GEN6_NOSYNC) { 965 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 966 intel_ring_emit(signaller, mbox_reg); 967 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 968 } 969 } 970 971 /* If num_dwords was rounded, make sure the tail pointer is correct */ 972 if (num_rings % 2 == 0) 973 intel_ring_emit(signaller, MI_NOOP); 974 975 return 0; 976 } 977 978 /** 979 * gen6_add_request - Update the semaphore mailbox registers 980 * 981 * @ring - ring that is adding a request 982 * @seqno - return seqno stuck into the ring 983 * 984 * Update the mailbox registers in the *other* rings with the current seqno. 985 * This acts like a signal in the canonical semaphore. 986 */ 987 static int 988 gen6_add_request(struct intel_engine_cs *ring) 989 { 990 int ret; 991 992 if (ring->semaphore.signal) 993 ret = ring->semaphore.signal(ring, 4); 994 else 995 ret = intel_ring_begin(ring, 4); 996 997 if (ret) 998 return ret; 999 1000 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1001 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1002 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1003 intel_ring_emit(ring, MI_USER_INTERRUPT); 1004 __intel_ring_advance(ring); 1005 1006 return 0; 1007 } 1008 1009 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1010 u32 seqno) 1011 { 1012 struct drm_i915_private *dev_priv = dev->dev_private; 1013 return dev_priv->last_seqno < seqno; 1014 } 1015 1016 /** 1017 * intel_ring_sync - sync the waiter to the signaller on seqno 1018 * 1019 * @waiter - ring that is waiting 1020 * @signaller - ring which has, or will signal 1021 * @seqno - seqno which the waiter will block on 1022 */ 1023 1024 static int 1025 gen8_ring_sync(struct intel_engine_cs *waiter, 1026 struct intel_engine_cs *signaller, 1027 u32 seqno) 1028 { 1029 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1030 int ret; 1031 1032 ret = intel_ring_begin(waiter, 4); 1033 if (ret) 1034 return ret; 1035 1036 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1037 MI_SEMAPHORE_GLOBAL_GTT | 1038 MI_SEMAPHORE_POLL | 1039 MI_SEMAPHORE_SAD_GTE_SDD); 1040 intel_ring_emit(waiter, seqno); 1041 intel_ring_emit(waiter, 1042 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1043 intel_ring_emit(waiter, 1044 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1045 intel_ring_advance(waiter); 1046 return 0; 1047 } 1048 1049 static int 1050 gen6_ring_sync(struct intel_engine_cs *waiter, 1051 struct intel_engine_cs *signaller, 1052 u32 seqno) 1053 { 1054 u32 dw1 = MI_SEMAPHORE_MBOX | 1055 MI_SEMAPHORE_COMPARE | 1056 MI_SEMAPHORE_REGISTER; 1057 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1058 int ret; 1059 1060 /* Throughout all of the GEM code, seqno passed implies our current 1061 * seqno is >= the last seqno executed. However for hardware the 1062 * comparison is strictly greater than. 1063 */ 1064 seqno -= 1; 1065 1066 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1067 1068 ret = intel_ring_begin(waiter, 4); 1069 if (ret) 1070 return ret; 1071 1072 /* If seqno wrap happened, omit the wait with no-ops */ 1073 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1074 intel_ring_emit(waiter, dw1 | wait_mbox); 1075 intel_ring_emit(waiter, seqno); 1076 intel_ring_emit(waiter, 0); 1077 intel_ring_emit(waiter, MI_NOOP); 1078 } else { 1079 intel_ring_emit(waiter, MI_NOOP); 1080 intel_ring_emit(waiter, MI_NOOP); 1081 intel_ring_emit(waiter, MI_NOOP); 1082 intel_ring_emit(waiter, MI_NOOP); 1083 } 1084 intel_ring_advance(waiter); 1085 1086 return 0; 1087 } 1088 1089 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1090 do { \ 1091 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 1092 PIPE_CONTROL_DEPTH_STALL); \ 1093 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 1094 intel_ring_emit(ring__, 0); \ 1095 intel_ring_emit(ring__, 0); \ 1096 } while (0) 1097 1098 static int 1099 pc_render_add_request(struct intel_engine_cs *ring) 1100 { 1101 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1102 int ret; 1103 1104 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 1105 * incoherent with writes to memory, i.e. completely fubar, 1106 * so we need to use PIPE_NOTIFY instead. 1107 * 1108 * However, we also need to workaround the qword write 1109 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1110 * memory before requesting an interrupt. 1111 */ 1112 ret = intel_ring_begin(ring, 32); 1113 if (ret) 1114 return ret; 1115 1116 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1117 PIPE_CONTROL_WRITE_FLUSH | 1118 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1119 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1120 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1121 intel_ring_emit(ring, 0); 1122 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1123 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1124 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1125 scratch_addr += 2 * CACHELINE_BYTES; 1126 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1127 scratch_addr += 2 * CACHELINE_BYTES; 1128 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1129 scratch_addr += 2 * CACHELINE_BYTES; 1130 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1131 scratch_addr += 2 * CACHELINE_BYTES; 1132 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1133 1134 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1135 PIPE_CONTROL_WRITE_FLUSH | 1136 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1137 PIPE_CONTROL_NOTIFY); 1138 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1139 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1140 intel_ring_emit(ring, 0); 1141 __intel_ring_advance(ring); 1142 1143 return 0; 1144 } 1145 1146 static u32 1147 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1148 { 1149 /* Workaround to force correct ordering between irq and seqno writes on 1150 * ivb (and maybe also on snb) by reading from a CS register (like 1151 * ACTHD) before reading the status page. */ 1152 if (!lazy_coherency) { 1153 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1154 POSTING_READ(RING_ACTHD(ring->mmio_base)); 1155 } 1156 1157 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1158 } 1159 1160 static u32 1161 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1162 { 1163 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1164 } 1165 1166 static void 1167 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1168 { 1169 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1170 } 1171 1172 static u32 1173 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1174 { 1175 return ring->scratch.cpu_page[0]; 1176 } 1177 1178 static void 1179 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1180 { 1181 ring->scratch.cpu_page[0] = seqno; 1182 } 1183 1184 static bool 1185 gen5_ring_get_irq(struct intel_engine_cs *ring) 1186 { 1187 struct drm_device *dev = ring->dev; 1188 struct drm_i915_private *dev_priv = dev->dev_private; 1189 1190 if (!dev->irq_enabled) 1191 return false; 1192 1193 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1194 if (ring->irq_refcount++ == 0) 1195 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1196 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1197 1198 return true; 1199 } 1200 1201 static void 1202 gen5_ring_put_irq(struct intel_engine_cs *ring) 1203 { 1204 struct drm_device *dev = ring->dev; 1205 struct drm_i915_private *dev_priv = dev->dev_private; 1206 1207 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1208 if (--ring->irq_refcount == 0) 1209 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1210 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1211 } 1212 1213 static bool 1214 i9xx_ring_get_irq(struct intel_engine_cs *ring) 1215 { 1216 struct drm_device *dev = ring->dev; 1217 struct drm_i915_private *dev_priv = dev->dev_private; 1218 1219 if (!dev->irq_enabled) 1220 return false; 1221 1222 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1223 if (ring->irq_refcount++ == 0) { 1224 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1225 I915_WRITE(IMR, dev_priv->irq_mask); 1226 POSTING_READ(IMR); 1227 } 1228 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1229 1230 return true; 1231 } 1232 1233 static void 1234 i9xx_ring_put_irq(struct intel_engine_cs *ring) 1235 { 1236 struct drm_device *dev = ring->dev; 1237 struct drm_i915_private *dev_priv = dev->dev_private; 1238 1239 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1240 if (--ring->irq_refcount == 0) { 1241 dev_priv->irq_mask |= ring->irq_enable_mask; 1242 I915_WRITE(IMR, dev_priv->irq_mask); 1243 POSTING_READ(IMR); 1244 } 1245 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1246 } 1247 1248 static bool 1249 i8xx_ring_get_irq(struct intel_engine_cs *ring) 1250 { 1251 struct drm_device *dev = ring->dev; 1252 struct drm_i915_private *dev_priv = dev->dev_private; 1253 1254 if (!dev->irq_enabled) 1255 return false; 1256 1257 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1258 if (ring->irq_refcount++ == 0) { 1259 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1260 I915_WRITE16(IMR, dev_priv->irq_mask); 1261 POSTING_READ16(IMR); 1262 } 1263 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1264 1265 return true; 1266 } 1267 1268 static void 1269 i8xx_ring_put_irq(struct intel_engine_cs *ring) 1270 { 1271 struct drm_device *dev = ring->dev; 1272 struct drm_i915_private *dev_priv = dev->dev_private; 1273 1274 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1275 if (--ring->irq_refcount == 0) { 1276 dev_priv->irq_mask |= ring->irq_enable_mask; 1277 I915_WRITE16(IMR, dev_priv->irq_mask); 1278 POSTING_READ16(IMR); 1279 } 1280 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1281 } 1282 1283 void intel_ring_setup_status_page(struct intel_engine_cs *ring) 1284 { 1285 struct drm_device *dev = ring->dev; 1286 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1287 u32 mmio = 0; 1288 1289 /* The ring status page addresses are no longer next to the rest of 1290 * the ring registers as of gen7. 1291 */ 1292 if (IS_GEN7(dev)) { 1293 switch (ring->id) { 1294 case RCS: 1295 mmio = RENDER_HWS_PGA_GEN7; 1296 break; 1297 case BCS: 1298 mmio = BLT_HWS_PGA_GEN7; 1299 break; 1300 /* 1301 * VCS2 actually doesn't exist on Gen7. Only shut up 1302 * gcc switch check warning 1303 */ 1304 case VCS2: 1305 case VCS: 1306 mmio = BSD_HWS_PGA_GEN7; 1307 break; 1308 case VECS: 1309 mmio = VEBOX_HWS_PGA_GEN7; 1310 break; 1311 } 1312 } else if (IS_GEN6(ring->dev)) { 1313 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1314 } else { 1315 /* XXX: gen8 returns to sanity */ 1316 mmio = RING_HWS_PGA(ring->mmio_base); 1317 } 1318 1319 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1320 POSTING_READ(mmio); 1321 1322 /* 1323 * Flush the TLB for this page 1324 * 1325 * FIXME: These two bits have disappeared on gen8, so a question 1326 * arises: do we still need this and if so how should we go about 1327 * invalidating the TLB? 1328 */ 1329 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1330 u32 reg = RING_INSTPM(ring->mmio_base); 1331 1332 /* ring should be idle before issuing a sync flush*/ 1333 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1334 1335 I915_WRITE(reg, 1336 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1337 INSTPM_SYNC_FLUSH)); 1338 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1339 1000)) 1340 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1341 ring->name); 1342 } 1343 } 1344 1345 static int 1346 bsd_ring_flush(struct intel_engine_cs *ring, 1347 u32 invalidate_domains, 1348 u32 flush_domains) 1349 { 1350 int ret; 1351 1352 ret = intel_ring_begin(ring, 2); 1353 if (ret) 1354 return ret; 1355 1356 intel_ring_emit(ring, MI_FLUSH); 1357 intel_ring_emit(ring, MI_NOOP); 1358 intel_ring_advance(ring); 1359 return 0; 1360 } 1361 1362 static int 1363 i9xx_add_request(struct intel_engine_cs *ring) 1364 { 1365 int ret; 1366 1367 ret = intel_ring_begin(ring, 4); 1368 if (ret) 1369 return ret; 1370 1371 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1372 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1373 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1374 intel_ring_emit(ring, MI_USER_INTERRUPT); 1375 __intel_ring_advance(ring); 1376 1377 return 0; 1378 } 1379 1380 static bool 1381 gen6_ring_get_irq(struct intel_engine_cs *ring) 1382 { 1383 struct drm_device *dev = ring->dev; 1384 struct drm_i915_private *dev_priv = dev->dev_private; 1385 1386 if (!dev->irq_enabled) 1387 return false; 1388 1389 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1390 if (ring->irq_refcount++ == 0) { 1391 if (HAS_L3_DPF(dev) && ring->id == RCS) 1392 I915_WRITE_IMR(ring, 1393 ~(ring->irq_enable_mask | 1394 GT_PARITY_ERROR(dev))); 1395 else 1396 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1397 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1398 } 1399 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1400 1401 return true; 1402 } 1403 1404 static void 1405 gen6_ring_put_irq(struct intel_engine_cs *ring) 1406 { 1407 struct drm_device *dev = ring->dev; 1408 struct drm_i915_private *dev_priv = dev->dev_private; 1409 1410 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1411 if (--ring->irq_refcount == 0) { 1412 if (HAS_L3_DPF(dev) && ring->id == RCS) 1413 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1414 else 1415 I915_WRITE_IMR(ring, ~0); 1416 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1417 } 1418 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1419 } 1420 1421 static bool 1422 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1423 { 1424 struct drm_device *dev = ring->dev; 1425 struct drm_i915_private *dev_priv = dev->dev_private; 1426 1427 if (!dev->irq_enabled) 1428 return false; 1429 1430 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1431 if (ring->irq_refcount++ == 0) { 1432 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1433 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1434 } 1435 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1436 1437 return true; 1438 } 1439 1440 static void 1441 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1442 { 1443 struct drm_device *dev = ring->dev; 1444 struct drm_i915_private *dev_priv = dev->dev_private; 1445 1446 if (!dev->irq_enabled) 1447 return; 1448 1449 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1450 if (--ring->irq_refcount == 0) { 1451 I915_WRITE_IMR(ring, ~0); 1452 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1453 } 1454 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1455 } 1456 1457 static bool 1458 gen8_ring_get_irq(struct intel_engine_cs *ring) 1459 { 1460 struct drm_device *dev = ring->dev; 1461 struct drm_i915_private *dev_priv = dev->dev_private; 1462 1463 if (!dev->irq_enabled) 1464 return false; 1465 1466 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1467 if (ring->irq_refcount++ == 0) { 1468 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1469 I915_WRITE_IMR(ring, 1470 ~(ring->irq_enable_mask | 1471 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1472 } else { 1473 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1474 } 1475 POSTING_READ(RING_IMR(ring->mmio_base)); 1476 } 1477 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1478 1479 return true; 1480 } 1481 1482 static void 1483 gen8_ring_put_irq(struct intel_engine_cs *ring) 1484 { 1485 struct drm_device *dev = ring->dev; 1486 struct drm_i915_private *dev_priv = dev->dev_private; 1487 1488 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1489 if (--ring->irq_refcount == 0) { 1490 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1491 I915_WRITE_IMR(ring, 1492 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1493 } else { 1494 I915_WRITE_IMR(ring, ~0); 1495 } 1496 POSTING_READ(RING_IMR(ring->mmio_base)); 1497 } 1498 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1499 } 1500 1501 static int 1502 i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1503 u64 offset, u32 length, 1504 unsigned flags) 1505 { 1506 int ret; 1507 1508 ret = intel_ring_begin(ring, 2); 1509 if (ret) 1510 return ret; 1511 1512 intel_ring_emit(ring, 1513 MI_BATCH_BUFFER_START | 1514 MI_BATCH_GTT | 1515 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1516 intel_ring_emit(ring, offset); 1517 intel_ring_advance(ring); 1518 1519 return 0; 1520 } 1521 1522 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1523 #define I830_BATCH_LIMIT (256*1024) 1524 #define I830_TLB_ENTRIES (2) 1525 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1526 static int 1527 i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1528 u64 offset, u32 len, 1529 unsigned flags) 1530 { 1531 u32 cs_offset = ring->scratch.gtt_offset; 1532 int ret; 1533 1534 ret = intel_ring_begin(ring, 6); 1535 if (ret) 1536 return ret; 1537 1538 /* Evict the invalid PTE TLBs */ 1539 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1540 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1541 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1542 intel_ring_emit(ring, cs_offset); 1543 intel_ring_emit(ring, 0xdeadbeef); 1544 intel_ring_emit(ring, MI_NOOP); 1545 intel_ring_advance(ring); 1546 1547 if ((flags & I915_DISPATCH_PINNED) == 0) { 1548 if (len > I830_BATCH_LIMIT) 1549 return -ENOSPC; 1550 1551 ret = intel_ring_begin(ring, 6 + 2); 1552 if (ret) 1553 return ret; 1554 1555 /* Blit the batch (which has now all relocs applied) to the 1556 * stable batch scratch bo area (so that the CS never 1557 * stumbles over its tlb invalidation bug) ... 1558 */ 1559 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1560 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1561 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1562 intel_ring_emit(ring, cs_offset); 1563 intel_ring_emit(ring, 4096); 1564 intel_ring_emit(ring, offset); 1565 1566 intel_ring_emit(ring, MI_FLUSH); 1567 intel_ring_emit(ring, MI_NOOP); 1568 intel_ring_advance(ring); 1569 1570 /* ... and execute it. */ 1571 offset = cs_offset; 1572 } 1573 1574 ret = intel_ring_begin(ring, 4); 1575 if (ret) 1576 return ret; 1577 1578 intel_ring_emit(ring, MI_BATCH_BUFFER); 1579 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1580 intel_ring_emit(ring, offset + len - 8); 1581 intel_ring_emit(ring, MI_NOOP); 1582 intel_ring_advance(ring); 1583 1584 return 0; 1585 } 1586 1587 static int 1588 i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1589 u64 offset, u32 len, 1590 unsigned flags) 1591 { 1592 int ret; 1593 1594 ret = intel_ring_begin(ring, 2); 1595 if (ret) 1596 return ret; 1597 1598 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1599 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1600 intel_ring_advance(ring); 1601 1602 return 0; 1603 } 1604 1605 static void cleanup_status_page(struct intel_engine_cs *ring) 1606 { 1607 struct drm_i915_gem_object *obj; 1608 1609 obj = ring->status_page.obj; 1610 if (obj == NULL) 1611 return; 1612 1613 kunmap(obj->pages[0]); 1614 i915_gem_object_ggtt_unpin(obj); 1615 drm_gem_object_unreference(&obj->base); 1616 ring->status_page.obj = NULL; 1617 } 1618 1619 static int init_status_page(struct intel_engine_cs *ring) 1620 { 1621 struct drm_i915_gem_object *obj; 1622 1623 if ((obj = ring->status_page.obj) == NULL) { 1624 unsigned flags; 1625 int ret; 1626 1627 obj = i915_gem_alloc_object(ring->dev, 4096); 1628 if (obj == NULL) { 1629 DRM_ERROR("Failed to allocate status page\n"); 1630 return -ENOMEM; 1631 } 1632 1633 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1634 if (ret) 1635 goto err_unref; 1636 1637 flags = 0; 1638 if (!HAS_LLC(ring->dev)) 1639 /* On g33, we cannot place HWS above 256MiB, so 1640 * restrict its pinning to the low mappable arena. 1641 * Though this restriction is not documented for 1642 * gen4, gen5, or byt, they also behave similarly 1643 * and hang if the HWS is placed at the top of the 1644 * GTT. To generalise, it appears that all !llc 1645 * platforms have issues with us placing the HWS 1646 * above the mappable region (even though we never 1647 * actualy map it). 1648 */ 1649 flags |= PIN_MAPPABLE; 1650 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1651 if (ret) { 1652 err_unref: 1653 drm_gem_object_unreference(&obj->base); 1654 return ret; 1655 } 1656 1657 ring->status_page.obj = obj; 1658 } 1659 1660 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1661 ring->status_page.page_addr = kmap(obj->pages[0]); 1662 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1663 1664 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1665 ring->name, ring->status_page.gfx_addr); 1666 1667 return 0; 1668 } 1669 1670 static int init_phys_status_page(struct intel_engine_cs *ring) 1671 { 1672 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1673 1674 if (!dev_priv->status_page_dmah) { 1675 dev_priv->status_page_dmah = 1676 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1677 if (!dev_priv->status_page_dmah) 1678 return -ENOMEM; 1679 } 1680 1681 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1682 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1683 1684 return 0; 1685 } 1686 1687 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1688 { 1689 if (!ringbuf->obj) 1690 return; 1691 1692 iounmap(ringbuf->virtual_start, ringbuf->size); 1693 i915_gem_object_ggtt_unpin(ringbuf->obj); 1694 drm_gem_object_unreference(&ringbuf->obj->base); 1695 ringbuf->obj = NULL; 1696 } 1697 1698 int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1699 struct intel_ringbuffer *ringbuf) 1700 { 1701 struct drm_i915_private *dev_priv = to_i915(dev); 1702 struct drm_i915_gem_object *obj; 1703 int ret; 1704 1705 if (ringbuf->obj) 1706 return 0; 1707 1708 obj = NULL; 1709 if (!HAS_LLC(dev)) 1710 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1711 if (obj == NULL) 1712 obj = i915_gem_alloc_object(dev, ringbuf->size); 1713 if (obj == NULL) 1714 return -ENOMEM; 1715 1716 /* mark ring buffers as read-only from GPU side by default */ 1717 obj->gt_ro = 1; 1718 1719 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1720 if (ret) 1721 goto err_unref; 1722 1723 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1724 if (ret) 1725 goto err_unpin; 1726 1727 ringbuf->virtual_start = 1728 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1729 ringbuf->size); 1730 if (ringbuf->virtual_start == NULL) { 1731 ret = -EINVAL; 1732 goto err_unpin; 1733 } 1734 1735 ringbuf->obj = obj; 1736 return 0; 1737 1738 err_unpin: 1739 i915_gem_object_ggtt_unpin(obj); 1740 err_unref: 1741 drm_gem_object_unreference(&obj->base); 1742 return ret; 1743 } 1744 1745 static int intel_init_ring_buffer(struct drm_device *dev, 1746 struct intel_engine_cs *ring) 1747 { 1748 struct intel_ringbuffer *ringbuf = ring->buffer; 1749 int ret; 1750 1751 if (ringbuf == NULL) { 1752 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1753 if (!ringbuf) 1754 return -ENOMEM; 1755 ring->buffer = ringbuf; 1756 } 1757 1758 ring->dev = dev; 1759 INIT_LIST_HEAD(&ring->active_list); 1760 INIT_LIST_HEAD(&ring->request_list); 1761 INIT_LIST_HEAD(&ring->execlist_queue); 1762 ringbuf->size = 32 * PAGE_SIZE; 1763 ringbuf->ring = ring; 1764 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1765 1766 init_waitqueue_head(&ring->irq_queue); 1767 1768 if (I915_NEED_GFX_HWS(dev)) { 1769 ret = init_status_page(ring); 1770 if (ret) 1771 goto error; 1772 } else { 1773 BUG_ON(ring->id != RCS); 1774 ret = init_phys_status_page(ring); 1775 if (ret) 1776 goto error; 1777 } 1778 1779 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1780 if (ret) { 1781 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1782 goto error; 1783 } 1784 1785 /* Workaround an erratum on the i830 which causes a hang if 1786 * the TAIL pointer points to within the last 2 cachelines 1787 * of the buffer. 1788 */ 1789 ringbuf->effective_size = ringbuf->size; 1790 if (IS_I830(dev) || IS_845G(dev)) 1791 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1792 1793 ret = i915_cmd_parser_init_ring(ring); 1794 if (ret) 1795 goto error; 1796 1797 ret = ring->init(ring); 1798 if (ret) 1799 goto error; 1800 1801 return 0; 1802 1803 error: 1804 kfree(ringbuf); 1805 ring->buffer = NULL; 1806 return ret; 1807 } 1808 1809 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1810 { 1811 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1812 struct intel_ringbuffer *ringbuf = ring->buffer; 1813 1814 if (!intel_ring_initialized(ring)) 1815 return; 1816 1817 intel_stop_ring_buffer(ring); 1818 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1819 1820 intel_destroy_ringbuffer_obj(ringbuf); 1821 ring->preallocated_lazy_request = NULL; 1822 ring->outstanding_lazy_seqno = 0; 1823 1824 if (ring->cleanup) 1825 ring->cleanup(ring); 1826 1827 cleanup_status_page(ring); 1828 1829 i915_cmd_parser_fini_ring(ring); 1830 1831 kfree(ringbuf); 1832 ring->buffer = NULL; 1833 } 1834 1835 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1836 { 1837 struct intel_ringbuffer *ringbuf = ring->buffer; 1838 struct drm_i915_gem_request *request; 1839 u32 seqno = 0; 1840 int ret; 1841 1842 if (ringbuf->last_retired_head != -1) { 1843 ringbuf->head = ringbuf->last_retired_head; 1844 ringbuf->last_retired_head = -1; 1845 1846 ringbuf->space = intel_ring_space(ringbuf); 1847 if (ringbuf->space >= n) 1848 return 0; 1849 } 1850 1851 list_for_each_entry(request, &ring->request_list, list) { 1852 if (__intel_ring_space(request->tail, ringbuf->tail, 1853 ringbuf->size) >= n) { 1854 seqno = request->seqno; 1855 break; 1856 } 1857 } 1858 1859 if (seqno == 0) 1860 return -ENOSPC; 1861 1862 ret = i915_wait_seqno(ring, seqno); 1863 if (ret) 1864 return ret; 1865 1866 i915_gem_retire_requests_ring(ring); 1867 ringbuf->head = ringbuf->last_retired_head; 1868 ringbuf->last_retired_head = -1; 1869 1870 ringbuf->space = intel_ring_space(ringbuf); 1871 return 0; 1872 } 1873 1874 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1875 { 1876 struct drm_device *dev = ring->dev; 1877 struct drm_i915_private *dev_priv = dev->dev_private; 1878 struct intel_ringbuffer *ringbuf = ring->buffer; 1879 unsigned long end; 1880 int ret; 1881 1882 ret = intel_ring_wait_request(ring, n); 1883 if (ret != -ENOSPC) 1884 return ret; 1885 1886 /* force the tail write in case we have been skipping them */ 1887 __intel_ring_advance(ring); 1888 1889 /* With GEM the hangcheck timer should kick us out of the loop, 1890 * leaving it early runs the risk of corrupting GEM state (due 1891 * to running on almost untested codepaths). But on resume 1892 * timers don't work yet, so prevent a complete hang in that 1893 * case by choosing an insanely large timeout. */ 1894 end = jiffies + 60 * HZ; 1895 1896 trace_i915_ring_wait_begin(ring); 1897 do { 1898 ringbuf->head = I915_READ_HEAD(ring); 1899 ringbuf->space = intel_ring_space(ringbuf); 1900 if (ringbuf->space >= n) { 1901 ret = 0; 1902 break; 1903 } 1904 1905 #if 0 1906 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1907 dev->primary->master) { 1908 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1909 if (master_priv->sarea_priv) 1910 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1911 } 1912 #else 1913 if (dev_priv->sarea_priv) 1914 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1915 #endif 1916 1917 1918 msleep(1); 1919 1920 #if 0 1921 if (dev_priv->mm.interruptible && signal_pending(current)) { 1922 ret = -ERESTARTSYS; 1923 break; 1924 } 1925 #endif 1926 1927 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1928 dev_priv->mm.interruptible); 1929 if (ret) 1930 break; 1931 1932 if (time_after(jiffies, end)) { 1933 ret = -EBUSY; 1934 break; 1935 } 1936 } while (1); 1937 trace_i915_ring_wait_end(ring); 1938 return ret; 1939 } 1940 1941 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1942 { 1943 uint32_t __iomem *virt; 1944 struct intel_ringbuffer *ringbuf = ring->buffer; 1945 int rem = ringbuf->size - ringbuf->tail; 1946 1947 if (ringbuf->space < rem) { 1948 int ret = ring_wait_for_space(ring, rem); 1949 if (ret) 1950 return ret; 1951 } 1952 1953 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 1954 rem /= 4; 1955 while (rem--) 1956 iowrite32(MI_NOOP, virt++); 1957 1958 ringbuf->tail = 0; 1959 ringbuf->space = intel_ring_space(ringbuf); 1960 1961 return 0; 1962 } 1963 1964 int intel_ring_idle(struct intel_engine_cs *ring) 1965 { 1966 u32 seqno; 1967 int ret; 1968 1969 /* We need to add any requests required to flush the objects and ring */ 1970 if (ring->outstanding_lazy_seqno) { 1971 ret = i915_add_request(ring, NULL); 1972 if (ret) 1973 return ret; 1974 } 1975 1976 /* Wait upon the last request to be completed */ 1977 if (list_empty(&ring->request_list)) 1978 return 0; 1979 1980 seqno = list_entry(ring->request_list.prev, 1981 struct drm_i915_gem_request, 1982 list)->seqno; 1983 1984 return i915_wait_seqno(ring, seqno); 1985 } 1986 1987 static int 1988 intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1989 { 1990 if (ring->outstanding_lazy_seqno) 1991 return 0; 1992 1993 if (ring->preallocated_lazy_request == NULL) { 1994 struct drm_i915_gem_request *request; 1995 1996 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK); 1997 if (request == NULL) 1998 return -ENOMEM; 1999 2000 ring->preallocated_lazy_request = request; 2001 } 2002 2003 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 2004 } 2005 2006 static int __intel_ring_prepare(struct intel_engine_cs *ring, 2007 int bytes) 2008 { 2009 struct intel_ringbuffer *ringbuf = ring->buffer; 2010 int ret; 2011 2012 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 2013 ret = intel_wrap_ring_buffer(ring); 2014 if (unlikely(ret)) 2015 return ret; 2016 } 2017 2018 if (unlikely(ringbuf->space < bytes)) { 2019 ret = ring_wait_for_space(ring, bytes); 2020 if (unlikely(ret)) 2021 return ret; 2022 } 2023 2024 return 0; 2025 } 2026 2027 int intel_ring_begin(struct intel_engine_cs *ring, 2028 int num_dwords) 2029 { 2030 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2031 int ret; 2032 2033 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 2034 dev_priv->mm.interruptible); 2035 if (ret) 2036 return ret; 2037 2038 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 2039 if (ret) 2040 return ret; 2041 2042 /* Preallocate the olr before touching the ring */ 2043 ret = intel_ring_alloc_seqno(ring); 2044 if (ret) 2045 return ret; 2046 2047 ring->buffer->space -= num_dwords * sizeof(uint32_t); 2048 return 0; 2049 } 2050 2051 /* Align the ring tail to a cacheline boundary */ 2052 int intel_ring_cacheline_align(struct intel_engine_cs *ring) 2053 { 2054 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2055 int ret; 2056 2057 if (num_dwords == 0) 2058 return 0; 2059 2060 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2061 ret = intel_ring_begin(ring, num_dwords); 2062 if (ret) 2063 return ret; 2064 2065 while (num_dwords--) 2066 intel_ring_emit(ring, MI_NOOP); 2067 2068 intel_ring_advance(ring); 2069 2070 return 0; 2071 } 2072 2073 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 2074 { 2075 struct drm_device *dev = ring->dev; 2076 struct drm_i915_private *dev_priv = dev->dev_private; 2077 2078 BUG_ON(ring->outstanding_lazy_seqno); 2079 2080 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 2081 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 2082 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 2083 if (HAS_VEBOX(dev)) 2084 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 2085 } 2086 2087 ring->set_seqno(ring, seqno); 2088 ring->hangcheck.seqno = seqno; 2089 } 2090 2091 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 2092 u32 value) 2093 { 2094 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2095 2096 /* Every tail move must follow the sequence below */ 2097 2098 /* Disable notification that the ring is IDLE. The GT 2099 * will then assume that it is busy and bring it out of rc6. 2100 */ 2101 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2102 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2103 2104 /* Clear the context id. Here be magic! */ 2105 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2106 2107 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2108 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2109 GEN6_BSD_SLEEP_INDICATOR) == 0, 2110 50)) 2111 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2112 2113 /* Now that the ring is fully powered up, update the tail */ 2114 I915_WRITE_TAIL(ring, value); 2115 POSTING_READ(RING_TAIL(ring->mmio_base)); 2116 2117 /* Let the ring send IDLE messages to the GT again, 2118 * and so let it sleep to conserve power when idle. 2119 */ 2120 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2121 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2122 } 2123 2124 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 2125 u32 invalidate, u32 flush) 2126 { 2127 uint32_t cmd; 2128 int ret; 2129 2130 ret = intel_ring_begin(ring, 4); 2131 if (ret) 2132 return ret; 2133 2134 cmd = MI_FLUSH_DW; 2135 if (INTEL_INFO(ring->dev)->gen >= 8) 2136 cmd += 1; 2137 /* 2138 * Bspec vol 1c.5 - video engine command streamer: 2139 * "If ENABLED, all TLBs will be invalidated once the flush 2140 * operation is complete. This bit is only valid when the 2141 * Post-Sync Operation field is a value of 1h or 3h." 2142 */ 2143 if (invalidate & I915_GEM_GPU_DOMAINS) 2144 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 2145 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2146 intel_ring_emit(ring, cmd); 2147 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2148 if (INTEL_INFO(ring->dev)->gen >= 8) { 2149 intel_ring_emit(ring, 0); /* upper addr */ 2150 intel_ring_emit(ring, 0); /* value */ 2151 } else { 2152 intel_ring_emit(ring, 0); 2153 intel_ring_emit(ring, MI_NOOP); 2154 } 2155 intel_ring_advance(ring); 2156 return 0; 2157 } 2158 2159 static int 2160 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2161 u64 offset, u32 len, 2162 unsigned flags) 2163 { 2164 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); 2165 int ret; 2166 2167 ret = intel_ring_begin(ring, 4); 2168 if (ret) 2169 return ret; 2170 2171 /* FIXME(BDW): Address space and security selectors. */ 2172 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 2173 intel_ring_emit(ring, lower_32_bits(offset)); 2174 intel_ring_emit(ring, upper_32_bits(offset)); 2175 intel_ring_emit(ring, MI_NOOP); 2176 intel_ring_advance(ring); 2177 2178 return 0; 2179 } 2180 2181 static int 2182 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2183 u64 offset, u32 len, 2184 unsigned flags) 2185 { 2186 int ret; 2187 2188 ret = intel_ring_begin(ring, 2); 2189 if (ret) 2190 return ret; 2191 2192 intel_ring_emit(ring, 2193 MI_BATCH_BUFFER_START | 2194 (flags & I915_DISPATCH_SECURE ? 2195 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); 2196 /* bit0-7 is the length on GEN6+ */ 2197 intel_ring_emit(ring, offset); 2198 intel_ring_advance(ring); 2199 2200 return 0; 2201 } 2202 2203 static int 2204 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2205 u64 offset, u32 len, 2206 unsigned flags) 2207 { 2208 int ret; 2209 2210 ret = intel_ring_begin(ring, 2); 2211 if (ret) 2212 return ret; 2213 2214 intel_ring_emit(ring, 2215 MI_BATCH_BUFFER_START | 2216 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2217 /* bit0-7 is the length on GEN6+ */ 2218 intel_ring_emit(ring, offset); 2219 intel_ring_advance(ring); 2220 2221 return 0; 2222 } 2223 2224 /* Blitter support (SandyBridge+) */ 2225 2226 static int gen6_ring_flush(struct intel_engine_cs *ring, 2227 u32 invalidate, u32 flush) 2228 { 2229 struct drm_device *dev = ring->dev; 2230 uint32_t cmd; 2231 int ret; 2232 2233 ret = intel_ring_begin(ring, 4); 2234 if (ret) 2235 return ret; 2236 2237 cmd = MI_FLUSH_DW; 2238 if (INTEL_INFO(ring->dev)->gen >= 8) 2239 cmd += 1; 2240 /* 2241 * Bspec vol 1c.3 - blitter engine command streamer: 2242 * "If ENABLED, all TLBs will be invalidated once the flush 2243 * operation is complete. This bit is only valid when the 2244 * Post-Sync Operation field is a value of 1h or 3h." 2245 */ 2246 if (invalidate & I915_GEM_DOMAIN_RENDER) 2247 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 2248 MI_FLUSH_DW_OP_STOREDW; 2249 intel_ring_emit(ring, cmd); 2250 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2251 if (INTEL_INFO(ring->dev)->gen >= 8) { 2252 intel_ring_emit(ring, 0); /* upper addr */ 2253 intel_ring_emit(ring, 0); /* value */ 2254 } else { 2255 intel_ring_emit(ring, 0); 2256 intel_ring_emit(ring, MI_NOOP); 2257 } 2258 intel_ring_advance(ring); 2259 2260 if (IS_GEN7(dev) && !invalidate && flush) 2261 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 2262 2263 return 0; 2264 } 2265 2266 int intel_init_render_ring_buffer(struct drm_device *dev) 2267 { 2268 struct drm_i915_private *dev_priv = dev->dev_private; 2269 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2270 struct drm_i915_gem_object *obj; 2271 int ret; 2272 2273 ring->name = "render ring"; 2274 ring->id = RCS; 2275 ring->mmio_base = RENDER_RING_BASE; 2276 2277 if (INTEL_INFO(dev)->gen >= 8) { 2278 if (i915_semaphore_is_enabled(dev)) { 2279 obj = i915_gem_alloc_object(dev, 4096); 2280 if (obj == NULL) { 2281 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2282 i915.semaphores = 0; 2283 } else { 2284 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2285 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2286 if (ret != 0) { 2287 drm_gem_object_unreference(&obj->base); 2288 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2289 i915.semaphores = 0; 2290 } else 2291 dev_priv->semaphore_obj = obj; 2292 } 2293 } 2294 if (IS_CHERRYVIEW(dev)) 2295 ring->init_context = chv_init_workarounds; 2296 else 2297 ring->init_context = bdw_init_workarounds; 2298 ring->add_request = gen6_add_request; 2299 ring->flush = gen8_render_ring_flush; 2300 ring->irq_get = gen8_ring_get_irq; 2301 ring->irq_put = gen8_ring_put_irq; 2302 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2303 ring->get_seqno = gen6_ring_get_seqno; 2304 ring->set_seqno = ring_set_seqno; 2305 if (i915_semaphore_is_enabled(dev)) { 2306 WARN_ON(!dev_priv->semaphore_obj); 2307 ring->semaphore.sync_to = gen8_ring_sync; 2308 ring->semaphore.signal = gen8_rcs_signal; 2309 GEN8_RING_SEMAPHORE_INIT; 2310 } 2311 } else if (INTEL_INFO(dev)->gen >= 6) { 2312 ring->add_request = gen6_add_request; 2313 ring->flush = gen7_render_ring_flush; 2314 if (INTEL_INFO(dev)->gen == 6) 2315 ring->flush = gen6_render_ring_flush; 2316 ring->irq_get = gen6_ring_get_irq; 2317 ring->irq_put = gen6_ring_put_irq; 2318 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2319 ring->get_seqno = gen6_ring_get_seqno; 2320 ring->set_seqno = ring_set_seqno; 2321 if (i915_semaphore_is_enabled(dev)) { 2322 ring->semaphore.sync_to = gen6_ring_sync; 2323 ring->semaphore.signal = gen6_signal; 2324 /* 2325 * The current semaphore is only applied on pre-gen8 2326 * platform. And there is no VCS2 ring on the pre-gen8 2327 * platform. So the semaphore between RCS and VCS2 is 2328 * initialized as INVALID. Gen8 will initialize the 2329 * sema between VCS2 and RCS later. 2330 */ 2331 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2332 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2333 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2334 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2335 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2336 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2337 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2338 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2339 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2340 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2341 } 2342 } else if (IS_GEN5(dev)) { 2343 ring->add_request = pc_render_add_request; 2344 ring->flush = gen4_render_ring_flush; 2345 ring->get_seqno = pc_render_get_seqno; 2346 ring->set_seqno = pc_render_set_seqno; 2347 ring->irq_get = gen5_ring_get_irq; 2348 ring->irq_put = gen5_ring_put_irq; 2349 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2350 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2351 } else { 2352 ring->add_request = i9xx_add_request; 2353 if (INTEL_INFO(dev)->gen < 4) 2354 ring->flush = gen2_render_ring_flush; 2355 else 2356 ring->flush = gen4_render_ring_flush; 2357 ring->get_seqno = ring_get_seqno; 2358 ring->set_seqno = ring_set_seqno; 2359 if (IS_GEN2(dev)) { 2360 ring->irq_get = i8xx_ring_get_irq; 2361 ring->irq_put = i8xx_ring_put_irq; 2362 } else { 2363 ring->irq_get = i9xx_ring_get_irq; 2364 ring->irq_put = i9xx_ring_put_irq; 2365 } 2366 ring->irq_enable_mask = I915_USER_INTERRUPT; 2367 } 2368 ring->write_tail = ring_write_tail; 2369 2370 if (IS_HASWELL(dev)) 2371 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2372 else if (IS_GEN8(dev)) 2373 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2374 else if (INTEL_INFO(dev)->gen >= 6) 2375 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2376 else if (INTEL_INFO(dev)->gen >= 4) 2377 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2378 else if (IS_I830(dev) || IS_845G(dev)) 2379 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2380 else 2381 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2382 ring->init = init_render_ring; 2383 ring->cleanup = render_ring_cleanup; 2384 2385 /* Workaround batchbuffer to combat CS tlb bug. */ 2386 if (HAS_BROKEN_CS_TLB(dev)) { 2387 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2388 if (obj == NULL) { 2389 DRM_ERROR("Failed to allocate batch bo\n"); 2390 return -ENOMEM; 2391 } 2392 2393 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2394 if (ret != 0) { 2395 drm_gem_object_unreference(&obj->base); 2396 DRM_ERROR("Failed to ping batch bo\n"); 2397 return ret; 2398 } 2399 2400 ring->scratch.obj = obj; 2401 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2402 } 2403 2404 return intel_init_ring_buffer(dev, ring); 2405 } 2406 2407 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2408 { 2409 struct drm_i915_private *dev_priv = dev->dev_private; 2410 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2411 struct intel_ringbuffer *ringbuf = ring->buffer; 2412 int ret; 2413 2414 if (ringbuf == NULL) { 2415 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2416 if (!ringbuf) 2417 return -ENOMEM; 2418 ring->buffer = ringbuf; 2419 } 2420 2421 ring->name = "render ring"; 2422 ring->id = RCS; 2423 ring->mmio_base = RENDER_RING_BASE; 2424 2425 if (INTEL_INFO(dev)->gen >= 6) { 2426 /* non-kms not supported on gen6+ */ 2427 ret = -ENODEV; 2428 goto err_ringbuf; 2429 } 2430 2431 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2432 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2433 * the special gen5 functions. */ 2434 ring->add_request = i9xx_add_request; 2435 if (INTEL_INFO(dev)->gen < 4) 2436 ring->flush = gen2_render_ring_flush; 2437 else 2438 ring->flush = gen4_render_ring_flush; 2439 ring->get_seqno = ring_get_seqno; 2440 ring->set_seqno = ring_set_seqno; 2441 if (IS_GEN2(dev)) { 2442 ring->irq_get = i8xx_ring_get_irq; 2443 ring->irq_put = i8xx_ring_put_irq; 2444 } else { 2445 ring->irq_get = i9xx_ring_get_irq; 2446 ring->irq_put = i9xx_ring_put_irq; 2447 } 2448 ring->irq_enable_mask = I915_USER_INTERRUPT; 2449 ring->write_tail = ring_write_tail; 2450 if (INTEL_INFO(dev)->gen >= 4) 2451 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2452 else if (IS_I830(dev) || IS_845G(dev)) 2453 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2454 else 2455 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2456 ring->init = init_render_ring; 2457 ring->cleanup = render_ring_cleanup; 2458 2459 ring->dev = dev; 2460 INIT_LIST_HEAD(&ring->active_list); 2461 INIT_LIST_HEAD(&ring->request_list); 2462 2463 ringbuf->size = size; 2464 ringbuf->effective_size = ringbuf->size; 2465 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2466 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2467 2468 ringbuf->virtual_start = ioremap_wc(start, size); 2469 if (ringbuf->virtual_start == NULL) { 2470 DRM_ERROR("can not ioremap virtual address for" 2471 " ring buffer\n"); 2472 ret = -ENOMEM; 2473 goto err_ringbuf; 2474 } 2475 2476 if (!I915_NEED_GFX_HWS(dev)) { 2477 ret = init_phys_status_page(ring); 2478 if (ret) 2479 goto err_vstart; 2480 } 2481 2482 return 0; 2483 2484 err_vstart: 2485 pmap_unmapdev((vm_offset_t)ring->buffer->virtual_start, size); 2486 err_ringbuf: 2487 kfree(ringbuf); 2488 ring->buffer = NULL; 2489 return ret; 2490 } 2491 2492 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2493 { 2494 struct drm_i915_private *dev_priv = dev->dev_private; 2495 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2496 2497 ring->name = "bsd ring"; 2498 ring->id = VCS; 2499 2500 ring->write_tail = ring_write_tail; 2501 if (INTEL_INFO(dev)->gen >= 6) { 2502 ring->mmio_base = GEN6_BSD_RING_BASE; 2503 /* gen6 bsd needs a special wa for tail updates */ 2504 if (IS_GEN6(dev)) 2505 ring->write_tail = gen6_bsd_ring_write_tail; 2506 ring->flush = gen6_bsd_ring_flush; 2507 ring->add_request = gen6_add_request; 2508 ring->get_seqno = gen6_ring_get_seqno; 2509 ring->set_seqno = ring_set_seqno; 2510 if (INTEL_INFO(dev)->gen >= 8) { 2511 ring->irq_enable_mask = 2512 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2513 ring->irq_get = gen8_ring_get_irq; 2514 ring->irq_put = gen8_ring_put_irq; 2515 ring->dispatch_execbuffer = 2516 gen8_ring_dispatch_execbuffer; 2517 if (i915_semaphore_is_enabled(dev)) { 2518 ring->semaphore.sync_to = gen8_ring_sync; 2519 ring->semaphore.signal = gen8_xcs_signal; 2520 GEN8_RING_SEMAPHORE_INIT; 2521 } 2522 } else { 2523 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2524 ring->irq_get = gen6_ring_get_irq; 2525 ring->irq_put = gen6_ring_put_irq; 2526 ring->dispatch_execbuffer = 2527 gen6_ring_dispatch_execbuffer; 2528 if (i915_semaphore_is_enabled(dev)) { 2529 ring->semaphore.sync_to = gen6_ring_sync; 2530 ring->semaphore.signal = gen6_signal; 2531 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2532 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2533 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2534 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2535 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2536 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2537 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2538 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2539 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2540 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2541 } 2542 } 2543 } else { 2544 ring->mmio_base = BSD_RING_BASE; 2545 ring->flush = bsd_ring_flush; 2546 ring->add_request = i9xx_add_request; 2547 ring->get_seqno = ring_get_seqno; 2548 ring->set_seqno = ring_set_seqno; 2549 if (IS_GEN5(dev)) { 2550 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2551 ring->irq_get = gen5_ring_get_irq; 2552 ring->irq_put = gen5_ring_put_irq; 2553 } else { 2554 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2555 ring->irq_get = i9xx_ring_get_irq; 2556 ring->irq_put = i9xx_ring_put_irq; 2557 } 2558 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2559 } 2560 ring->init = init_ring_common; 2561 2562 return intel_init_ring_buffer(dev, ring); 2563 } 2564 2565 /** 2566 * Initialize the second BSD ring for Broadwell GT3. 2567 * It is noted that this only exists on Broadwell GT3. 2568 */ 2569 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2570 { 2571 struct drm_i915_private *dev_priv = dev->dev_private; 2572 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2573 2574 if ((INTEL_INFO(dev)->gen != 8)) { 2575 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2576 return -EINVAL; 2577 } 2578 2579 ring->name = "bsd2 ring"; 2580 ring->id = VCS2; 2581 2582 ring->write_tail = ring_write_tail; 2583 ring->mmio_base = GEN8_BSD2_RING_BASE; 2584 ring->flush = gen6_bsd_ring_flush; 2585 ring->add_request = gen6_add_request; 2586 ring->get_seqno = gen6_ring_get_seqno; 2587 ring->set_seqno = ring_set_seqno; 2588 ring->irq_enable_mask = 2589 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2590 ring->irq_get = gen8_ring_get_irq; 2591 ring->irq_put = gen8_ring_put_irq; 2592 ring->dispatch_execbuffer = 2593 gen8_ring_dispatch_execbuffer; 2594 if (i915_semaphore_is_enabled(dev)) { 2595 ring->semaphore.sync_to = gen8_ring_sync; 2596 ring->semaphore.signal = gen8_xcs_signal; 2597 GEN8_RING_SEMAPHORE_INIT; 2598 } 2599 ring->init = init_ring_common; 2600 2601 return intel_init_ring_buffer(dev, ring); 2602 } 2603 2604 int intel_init_blt_ring_buffer(struct drm_device *dev) 2605 { 2606 struct drm_i915_private *dev_priv = dev->dev_private; 2607 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2608 2609 ring->name = "blitter ring"; 2610 ring->id = BCS; 2611 2612 ring->mmio_base = BLT_RING_BASE; 2613 ring->write_tail = ring_write_tail; 2614 ring->flush = gen6_ring_flush; 2615 ring->add_request = gen6_add_request; 2616 ring->get_seqno = gen6_ring_get_seqno; 2617 ring->set_seqno = ring_set_seqno; 2618 if (INTEL_INFO(dev)->gen >= 8) { 2619 ring->irq_enable_mask = 2620 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2621 ring->irq_get = gen8_ring_get_irq; 2622 ring->irq_put = gen8_ring_put_irq; 2623 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2624 if (i915_semaphore_is_enabled(dev)) { 2625 ring->semaphore.sync_to = gen8_ring_sync; 2626 ring->semaphore.signal = gen8_xcs_signal; 2627 GEN8_RING_SEMAPHORE_INIT; 2628 } 2629 } else { 2630 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2631 ring->irq_get = gen6_ring_get_irq; 2632 ring->irq_put = gen6_ring_put_irq; 2633 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2634 if (i915_semaphore_is_enabled(dev)) { 2635 ring->semaphore.signal = gen6_signal; 2636 ring->semaphore.sync_to = gen6_ring_sync; 2637 /* 2638 * The current semaphore is only applied on pre-gen8 2639 * platform. And there is no VCS2 ring on the pre-gen8 2640 * platform. So the semaphore between BCS and VCS2 is 2641 * initialized as INVALID. Gen8 will initialize the 2642 * sema between BCS and VCS2 later. 2643 */ 2644 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2645 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2646 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2647 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2648 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2649 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2650 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2651 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2652 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2653 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2654 } 2655 } 2656 ring->init = init_ring_common; 2657 2658 return intel_init_ring_buffer(dev, ring); 2659 } 2660 2661 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2662 { 2663 struct drm_i915_private *dev_priv = dev->dev_private; 2664 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2665 2666 ring->name = "video enhancement ring"; 2667 ring->id = VECS; 2668 2669 ring->mmio_base = VEBOX_RING_BASE; 2670 ring->write_tail = ring_write_tail; 2671 ring->flush = gen6_ring_flush; 2672 ring->add_request = gen6_add_request; 2673 ring->get_seqno = gen6_ring_get_seqno; 2674 ring->set_seqno = ring_set_seqno; 2675 2676 if (INTEL_INFO(dev)->gen >= 8) { 2677 ring->irq_enable_mask = 2678 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2679 ring->irq_get = gen8_ring_get_irq; 2680 ring->irq_put = gen8_ring_put_irq; 2681 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2682 if (i915_semaphore_is_enabled(dev)) { 2683 ring->semaphore.sync_to = gen8_ring_sync; 2684 ring->semaphore.signal = gen8_xcs_signal; 2685 GEN8_RING_SEMAPHORE_INIT; 2686 } 2687 } else { 2688 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2689 ring->irq_get = hsw_vebox_get_irq; 2690 ring->irq_put = hsw_vebox_put_irq; 2691 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2692 if (i915_semaphore_is_enabled(dev)) { 2693 ring->semaphore.sync_to = gen6_ring_sync; 2694 ring->semaphore.signal = gen6_signal; 2695 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2696 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2697 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2698 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2699 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2700 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2701 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2702 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2703 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2704 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2705 } 2706 } 2707 ring->init = init_ring_common; 2708 2709 return intel_init_ring_buffer(dev, ring); 2710 } 2711 2712 int 2713 intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2714 { 2715 int ret; 2716 2717 if (!ring->gpu_caches_dirty) 2718 return 0; 2719 2720 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2721 if (ret) 2722 return ret; 2723 2724 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2725 2726 ring->gpu_caches_dirty = false; 2727 return 0; 2728 } 2729 2730 int 2731 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2732 { 2733 uint32_t flush_domains; 2734 int ret; 2735 2736 flush_domains = 0; 2737 if (ring->gpu_caches_dirty) 2738 flush_domains = I915_GEM_GPU_DOMAINS; 2739 2740 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2741 if (ret) 2742 return ret; 2743 2744 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2745 2746 ring->gpu_caches_dirty = false; 2747 return 0; 2748 } 2749 2750 void 2751 intel_stop_ring_buffer(struct intel_engine_cs *ring) 2752 { 2753 int ret; 2754 2755 if (!intel_ring_initialized(ring)) 2756 return; 2757 2758 ret = intel_ring_idle(ring); 2759 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2760 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2761 ring->name, ret); 2762 2763 stop_ring(ring); 2764 } 2765