1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include "i915_drv.h" 32 #include <drm/i915_drm.h> 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 38 * to give some inclination as to some of the magic values used in the various 39 * workarounds! 40 */ 41 #define CACHELINE_BYTES 64 42 43 static inline int __ring_space(int head, int tail, int size) 44 { 45 int space = head - (tail + I915_RING_FREE_SPACE); 46 if (space < 0) 47 space += size; 48 return space; 49 } 50 51 static inline int ring_space(struct intel_ringbuffer *ringbuf) 52 { 53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 54 } 55 56 static bool intel_ring_stopped(struct intel_engine_cs *ring) 57 { 58 struct drm_i915_private *dev_priv = ring->dev->dev_private; 59 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 60 } 61 62 void __intel_ring_advance(struct intel_engine_cs *ring) 63 { 64 struct intel_ringbuffer *ringbuf = ring->buffer; 65 ringbuf->tail &= ringbuf->size - 1; 66 if (intel_ring_stopped(ring)) 67 return; 68 ring->write_tail(ring, ringbuf->tail); 69 } 70 71 static int 72 gen2_render_ring_flush(struct intel_engine_cs *ring, 73 u32 invalidate_domains, 74 u32 flush_domains) 75 { 76 u32 cmd; 77 int ret; 78 79 cmd = MI_FLUSH; 80 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 81 cmd |= MI_NO_WRITE_FLUSH; 82 83 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 84 cmd |= MI_READ_FLUSH; 85 86 ret = intel_ring_begin(ring, 2); 87 if (ret) 88 return ret; 89 90 intel_ring_emit(ring, cmd); 91 intel_ring_emit(ring, MI_NOOP); 92 intel_ring_advance(ring); 93 94 return 0; 95 } 96 97 static int 98 gen4_render_ring_flush(struct intel_engine_cs *ring, 99 u32 invalidate_domains, 100 u32 flush_domains) 101 { 102 struct drm_device *dev = ring->dev; 103 u32 cmd; 104 int ret; 105 106 /* 107 * read/write caches: 108 * 109 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 110 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 111 * also flushed at 2d versus 3d pipeline switches. 112 * 113 * read-only caches: 114 * 115 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 116 * MI_READ_FLUSH is set, and is always flushed on 965. 117 * 118 * I915_GEM_DOMAIN_COMMAND may not exist? 119 * 120 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 121 * invalidated when MI_EXE_FLUSH is set. 122 * 123 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 124 * invalidated with every MI_FLUSH. 125 * 126 * TLBs: 127 * 128 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 129 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 130 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 131 * are flushed at any MI_FLUSH. 132 */ 133 134 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 135 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 136 cmd &= ~MI_NO_WRITE_FLUSH; 137 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 138 cmd |= MI_EXE_FLUSH; 139 140 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 141 (IS_G4X(dev) || IS_GEN5(dev))) 142 cmd |= MI_INVALIDATE_ISP; 143 144 ret = intel_ring_begin(ring, 2); 145 if (ret) 146 return ret; 147 148 intel_ring_emit(ring, cmd); 149 intel_ring_emit(ring, MI_NOOP); 150 intel_ring_advance(ring); 151 152 return 0; 153 } 154 155 /** 156 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 157 * implementing two workarounds on gen6. From section 1.4.7.1 158 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 159 * 160 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 161 * produced by non-pipelined state commands), software needs to first 162 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 163 * 0. 164 * 165 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 166 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 167 * 168 * And the workaround for these two requires this workaround first: 169 * 170 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 171 * BEFORE the pipe-control with a post-sync op and no write-cache 172 * flushes. 173 * 174 * And this last workaround is tricky because of the requirements on 175 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 176 * volume 2 part 1: 177 * 178 * "1 of the following must also be set: 179 * - Render Target Cache Flush Enable ([12] of DW1) 180 * - Depth Cache Flush Enable ([0] of DW1) 181 * - Stall at Pixel Scoreboard ([1] of DW1) 182 * - Depth Stall ([13] of DW1) 183 * - Post-Sync Operation ([13] of DW1) 184 * - Notify Enable ([8] of DW1)" 185 * 186 * The cache flushes require the workaround flush that triggered this 187 * one, so we can't use it. Depth stall would trigger the same. 188 * Post-sync nonzero is what triggered this second workaround, so we 189 * can't use that one either. Notify enable is IRQs, which aren't 190 * really our business. That leaves only stall at scoreboard. 191 */ 192 static int 193 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 194 { 195 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 196 int ret; 197 198 199 ret = intel_ring_begin(ring, 6); 200 if (ret) 201 return ret; 202 203 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 204 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 205 PIPE_CONTROL_STALL_AT_SCOREBOARD); 206 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 207 intel_ring_emit(ring, 0); /* low dword */ 208 intel_ring_emit(ring, 0); /* high dword */ 209 intel_ring_emit(ring, MI_NOOP); 210 intel_ring_advance(ring); 211 212 ret = intel_ring_begin(ring, 6); 213 if (ret) 214 return ret; 215 216 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 217 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 218 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 219 intel_ring_emit(ring, 0); 220 intel_ring_emit(ring, 0); 221 intel_ring_emit(ring, MI_NOOP); 222 intel_ring_advance(ring); 223 224 return 0; 225 } 226 227 static int 228 gen6_render_ring_flush(struct intel_engine_cs *ring, 229 u32 invalidate_domains, u32 flush_domains) 230 { 231 u32 flags = 0; 232 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 233 int ret; 234 235 /* Force SNB workarounds for PIPE_CONTROL flushes */ 236 ret = intel_emit_post_sync_nonzero_flush(ring); 237 if (ret) 238 return ret; 239 240 /* Just flush everything. Experiments have shown that reducing the 241 * number of bits based on the write domains has little performance 242 * impact. 243 */ 244 if (flush_domains) { 245 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 246 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 247 /* 248 * Ensure that any following seqno writes only happen 249 * when the render cache is indeed flushed. 250 */ 251 flags |= PIPE_CONTROL_CS_STALL; 252 } 253 if (invalidate_domains) { 254 flags |= PIPE_CONTROL_TLB_INVALIDATE; 255 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 256 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 260 /* 261 * TLB invalidate requires a post-sync write. 262 */ 263 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 264 } 265 266 ret = intel_ring_begin(ring, 4); 267 if (ret) 268 return ret; 269 270 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 271 intel_ring_emit(ring, flags); 272 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 273 intel_ring_emit(ring, 0); 274 intel_ring_advance(ring); 275 276 return 0; 277 } 278 279 static int 280 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 281 { 282 int ret; 283 284 ret = intel_ring_begin(ring, 4); 285 if (ret) 286 return ret; 287 288 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 289 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 290 PIPE_CONTROL_STALL_AT_SCOREBOARD); 291 intel_ring_emit(ring, 0); 292 intel_ring_emit(ring, 0); 293 intel_ring_advance(ring); 294 295 return 0; 296 } 297 298 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 299 { 300 int ret; 301 302 if (!ring->fbc_dirty) 303 return 0; 304 305 ret = intel_ring_begin(ring, 6); 306 if (ret) 307 return ret; 308 /* WaFbcNukeOn3DBlt:ivb/hsw */ 309 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 310 intel_ring_emit(ring, MSG_FBC_REND_STATE); 311 intel_ring_emit(ring, value); 312 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 313 intel_ring_emit(ring, MSG_FBC_REND_STATE); 314 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 315 intel_ring_advance(ring); 316 317 ring->fbc_dirty = false; 318 return 0; 319 } 320 321 static int 322 gen7_render_ring_flush(struct intel_engine_cs *ring, 323 u32 invalidate_domains, u32 flush_domains) 324 { 325 u32 flags = 0; 326 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 327 int ret; 328 329 /* 330 * Ensure that any following seqno writes only happen when the render 331 * cache is indeed flushed. 332 * 333 * Workaround: 4th PIPE_CONTROL command (except the ones with only 334 * read-cache invalidate bits set) must have the CS_STALL bit set. We 335 * don't try to be clever and just set it unconditionally. 336 */ 337 flags |= PIPE_CONTROL_CS_STALL; 338 339 /* Just flush everything. Experiments have shown that reducing the 340 * number of bits based on the write domains has little performance 341 * impact. 342 */ 343 if (flush_domains) { 344 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 345 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 346 } 347 if (invalidate_domains) { 348 flags |= PIPE_CONTROL_TLB_INVALIDATE; 349 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 350 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 351 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 352 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 353 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 354 /* 355 * TLB invalidate requires a post-sync write. 356 */ 357 flags |= PIPE_CONTROL_QW_WRITE; 358 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 359 360 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 361 362 /* Workaround: we must issue a pipe_control with CS-stall bit 363 * set before a pipe_control command that has the state cache 364 * invalidate bit set. */ 365 gen7_render_ring_cs_stall_wa(ring); 366 } 367 368 ret = intel_ring_begin(ring, 4); 369 if (ret) 370 return ret; 371 372 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 373 intel_ring_emit(ring, flags); 374 intel_ring_emit(ring, scratch_addr); 375 intel_ring_emit(ring, 0); 376 intel_ring_advance(ring); 377 378 if (!invalidate_domains && flush_domains) 379 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 380 381 return 0; 382 } 383 384 static int 385 gen8_emit_pipe_control(struct intel_engine_cs *ring, 386 u32 flags, u32 scratch_addr) 387 { 388 int ret; 389 390 ret = intel_ring_begin(ring, 6); 391 if (ret) 392 return ret; 393 394 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 395 intel_ring_emit(ring, flags); 396 intel_ring_emit(ring, scratch_addr); 397 intel_ring_emit(ring, 0); 398 intel_ring_emit(ring, 0); 399 intel_ring_emit(ring, 0); 400 intel_ring_advance(ring); 401 402 return 0; 403 } 404 405 static int 406 gen8_render_ring_flush(struct intel_engine_cs *ring, 407 u32 invalidate_domains, u32 flush_domains) 408 { 409 u32 flags = 0; 410 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 411 int ret; 412 413 flags |= PIPE_CONTROL_CS_STALL; 414 415 if (flush_domains) { 416 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 417 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 418 } 419 if (invalidate_domains) { 420 flags |= PIPE_CONTROL_TLB_INVALIDATE; 421 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 422 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 423 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 424 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 425 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 426 flags |= PIPE_CONTROL_QW_WRITE; 427 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 428 429 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 430 ret = gen8_emit_pipe_control(ring, 431 PIPE_CONTROL_CS_STALL | 432 PIPE_CONTROL_STALL_AT_SCOREBOARD, 433 0); 434 if (ret) 435 return ret; 436 } 437 438 return gen8_emit_pipe_control(ring, flags, scratch_addr); 439 } 440 441 static void ring_write_tail(struct intel_engine_cs *ring, 442 u32 value) 443 { 444 struct drm_i915_private *dev_priv = ring->dev->dev_private; 445 I915_WRITE_TAIL(ring, value); 446 } 447 448 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 449 { 450 struct drm_i915_private *dev_priv = ring->dev->dev_private; 451 u64 acthd; 452 453 if (INTEL_INFO(ring->dev)->gen >= 8) 454 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 455 RING_ACTHD_UDW(ring->mmio_base)); 456 else if (INTEL_INFO(ring->dev)->gen >= 4) 457 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 458 else 459 acthd = I915_READ(ACTHD); 460 461 return acthd; 462 } 463 464 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 465 { 466 struct drm_i915_private *dev_priv = ring->dev->dev_private; 467 u32 addr; 468 469 addr = dev_priv->status_page_dmah->busaddr; 470 if (INTEL_INFO(ring->dev)->gen >= 4) 471 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 472 I915_WRITE(HWS_PGA, addr); 473 } 474 475 static bool stop_ring(struct intel_engine_cs *ring) 476 { 477 struct drm_i915_private *dev_priv = to_i915(ring->dev); 478 479 if (!IS_GEN2(ring->dev)) { 480 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 481 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 482 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 483 return false; 484 } 485 } 486 487 I915_WRITE_CTL(ring, 0); 488 I915_WRITE_HEAD(ring, 0); 489 ring->write_tail(ring, 0); 490 491 if (!IS_GEN2(ring->dev)) { 492 (void)I915_READ_CTL(ring); 493 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 494 } 495 496 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 497 } 498 499 static int init_ring_common(struct intel_engine_cs *ring) 500 { 501 struct drm_device *dev = ring->dev; 502 struct drm_i915_private *dev_priv = dev->dev_private; 503 struct intel_ringbuffer *ringbuf = ring->buffer; 504 struct drm_i915_gem_object *obj = ringbuf->obj; 505 int ret = 0; 506 507 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 508 509 if (!stop_ring(ring)) { 510 /* G45 ring initialization often fails to reset head to zero */ 511 DRM_DEBUG_KMS("%s head not reset to zero " 512 "ctl %08x head %08x tail %08x start %08x\n", 513 ring->name, 514 I915_READ_CTL(ring), 515 I915_READ_HEAD(ring), 516 I915_READ_TAIL(ring), 517 I915_READ_START(ring)); 518 519 if (!stop_ring(ring)) { 520 DRM_ERROR("failed to set %s head to zero " 521 "ctl %08x head %08x tail %08x start %08x\n", 522 ring->name, 523 I915_READ_CTL(ring), 524 I915_READ_HEAD(ring), 525 I915_READ_TAIL(ring), 526 I915_READ_START(ring)); 527 ret = -EIO; 528 goto out; 529 } 530 } 531 532 if (I915_NEED_GFX_HWS(dev)) 533 intel_ring_setup_status_page(ring); 534 else 535 ring_setup_phys_status_page(ring); 536 537 /* Enforce ordering by reading HEAD register back */ 538 I915_READ_HEAD(ring); 539 540 /* Initialize the ring. This must happen _after_ we've cleared the ring 541 * registers with the above sequence (the readback of the HEAD registers 542 * also enforces ordering), otherwise the hw might lose the new ring 543 * register values. */ 544 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 545 I915_WRITE_CTL(ring, 546 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 547 | RING_VALID); 548 549 /* If the head is still not zero, the ring is dead */ 550 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 551 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 552 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 553 DRM_ERROR("%s initialization failed " 554 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 555 ring->name, 556 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 557 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 558 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 559 ret = -EIO; 560 goto out; 561 } 562 563 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 564 i915_kernel_lost_context(ring->dev); 565 else { 566 ringbuf->head = I915_READ_HEAD(ring); 567 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 568 ringbuf->space = ring_space(ringbuf); 569 ringbuf->last_retired_head = -1; 570 } 571 572 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 573 574 out: 575 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 576 577 return ret; 578 } 579 580 static int 581 init_pipe_control(struct intel_engine_cs *ring) 582 { 583 int ret; 584 585 if (ring->scratch.obj) 586 return 0; 587 588 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 589 if (ring->scratch.obj == NULL) { 590 DRM_ERROR("Failed to allocate seqno page\n"); 591 ret = -ENOMEM; 592 goto err; 593 } 594 595 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 596 if (ret) 597 goto err_unref; 598 599 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 600 if (ret) 601 goto err_unref; 602 603 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 604 ring->scratch.cpu_page = kmap(ring->scratch.obj->pages[0]); 605 if (ring->scratch.cpu_page == NULL) { 606 ret = -ENOMEM; 607 goto err_unpin; 608 } 609 610 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 611 ring->name, ring->scratch.gtt_offset); 612 return 0; 613 614 err_unpin: 615 i915_gem_object_ggtt_unpin(ring->scratch.obj); 616 err_unref: 617 drm_gem_object_unreference(&ring->scratch.obj->base); 618 err: 619 return ret; 620 } 621 622 static int init_render_ring(struct intel_engine_cs *ring) 623 { 624 struct drm_device *dev = ring->dev; 625 struct drm_i915_private *dev_priv = dev->dev_private; 626 int ret = init_ring_common(ring); 627 if (ret) 628 return ret; 629 630 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 631 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 632 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 633 634 /* We need to disable the AsyncFlip performance optimisations in order 635 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 636 * programmed to '1' on all products. 637 * 638 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 639 */ 640 if (INTEL_INFO(dev)->gen >= 6) 641 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 642 643 /* Required for the hardware to program scanline values for waiting */ 644 /* WaEnableFlushTlbInvalidationMode:snb */ 645 if (INTEL_INFO(dev)->gen == 6) 646 I915_WRITE(GFX_MODE, 647 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 648 649 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 650 if (IS_GEN7(dev)) 651 I915_WRITE(GFX_MODE_GEN7, 652 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 653 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 654 655 if (INTEL_INFO(dev)->gen >= 5) { 656 ret = init_pipe_control(ring); 657 if (ret) 658 return ret; 659 } 660 661 if (IS_GEN6(dev)) { 662 /* From the Sandybridge PRM, volume 1 part 3, page 24: 663 * "If this bit is set, STCunit will have LRA as replacement 664 * policy. [...] This bit must be reset. LRA replacement 665 * policy is not supported." 666 */ 667 I915_WRITE(CACHE_MODE_0, 668 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 669 } 670 671 if (INTEL_INFO(dev)->gen >= 6) 672 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 673 674 if (HAS_L3_DPF(dev)) 675 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 676 677 return ret; 678 } 679 680 static void render_ring_cleanup(struct intel_engine_cs *ring) 681 { 682 struct drm_device *dev = ring->dev; 683 struct drm_i915_private *dev_priv = dev->dev_private; 684 685 if (dev_priv->semaphore_obj) { 686 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 687 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 688 dev_priv->semaphore_obj = NULL; 689 } 690 691 if (ring->scratch.obj == NULL) 692 return; 693 694 if (INTEL_INFO(dev)->gen >= 5) { 695 kunmap(ring->scratch.obj->pages[0]); 696 i915_gem_object_ggtt_unpin(ring->scratch.obj); 697 } 698 699 drm_gem_object_unreference(&ring->scratch.obj->base); 700 ring->scratch.obj = NULL; 701 } 702 703 static int gen8_rcs_signal(struct intel_engine_cs *signaller, 704 unsigned int num_dwords) 705 { 706 #define MBOX_UPDATE_DWORDS 8 707 struct drm_device *dev = signaller->dev; 708 struct drm_i915_private *dev_priv = dev->dev_private; 709 struct intel_engine_cs *waiter; 710 int i, ret, num_rings; 711 712 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 713 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 714 #undef MBOX_UPDATE_DWORDS 715 716 ret = intel_ring_begin(signaller, num_dwords); 717 if (ret) 718 return ret; 719 720 for_each_ring(waiter, dev_priv, i) { 721 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 722 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 723 continue; 724 725 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 726 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 727 PIPE_CONTROL_QW_WRITE | 728 PIPE_CONTROL_FLUSH_ENABLE); 729 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 730 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 731 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 732 intel_ring_emit(signaller, 0); 733 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 734 MI_SEMAPHORE_TARGET(waiter->id)); 735 intel_ring_emit(signaller, 0); 736 } 737 738 return 0; 739 } 740 741 static int gen8_xcs_signal(struct intel_engine_cs *signaller, 742 unsigned int num_dwords) 743 { 744 #define MBOX_UPDATE_DWORDS 6 745 struct drm_device *dev = signaller->dev; 746 struct drm_i915_private *dev_priv = dev->dev_private; 747 struct intel_engine_cs *waiter; 748 int i, ret, num_rings; 749 750 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 751 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 752 #undef MBOX_UPDATE_DWORDS 753 754 ret = intel_ring_begin(signaller, num_dwords); 755 if (ret) 756 return ret; 757 758 for_each_ring(waiter, dev_priv, i) { 759 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 760 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 761 continue; 762 763 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 764 MI_FLUSH_DW_OP_STOREDW); 765 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 766 MI_FLUSH_DW_USE_GTT); 767 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 768 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 769 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 770 MI_SEMAPHORE_TARGET(waiter->id)); 771 intel_ring_emit(signaller, 0); 772 } 773 774 return 0; 775 } 776 777 static int gen6_signal(struct intel_engine_cs *signaller, 778 unsigned int num_dwords) 779 { 780 struct drm_device *dev = signaller->dev; 781 struct drm_i915_private *dev_priv = dev->dev_private; 782 struct intel_engine_cs *useless; 783 int i, ret, num_rings; 784 785 #define MBOX_UPDATE_DWORDS 3 786 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 787 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 788 #undef MBOX_UPDATE_DWORDS 789 790 ret = intel_ring_begin(signaller, num_dwords); 791 if (ret) 792 return ret; 793 794 for_each_ring(useless, dev_priv, i) { 795 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 796 if (mbox_reg != GEN6_NOSYNC) { 797 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 798 intel_ring_emit(signaller, mbox_reg); 799 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 800 } 801 } 802 803 /* If num_dwords was rounded, make sure the tail pointer is correct */ 804 if (num_rings % 2 == 0) 805 intel_ring_emit(signaller, MI_NOOP); 806 807 return 0; 808 } 809 810 /** 811 * gen6_add_request - Update the semaphore mailbox registers 812 * 813 * @ring - ring that is adding a request 814 * @seqno - return seqno stuck into the ring 815 * 816 * Update the mailbox registers in the *other* rings with the current seqno. 817 * This acts like a signal in the canonical semaphore. 818 */ 819 static int 820 gen6_add_request(struct intel_engine_cs *ring) 821 { 822 int ret; 823 824 if (ring->semaphore.signal) 825 ret = ring->semaphore.signal(ring, 4); 826 else 827 ret = intel_ring_begin(ring, 4); 828 829 if (ret) 830 return ret; 831 832 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 833 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 834 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 835 intel_ring_emit(ring, MI_USER_INTERRUPT); 836 __intel_ring_advance(ring); 837 838 return 0; 839 } 840 841 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 842 u32 seqno) 843 { 844 struct drm_i915_private *dev_priv = dev->dev_private; 845 return dev_priv->last_seqno < seqno; 846 } 847 848 /** 849 * intel_ring_sync - sync the waiter to the signaller on seqno 850 * 851 * @waiter - ring that is waiting 852 * @signaller - ring which has, or will signal 853 * @seqno - seqno which the waiter will block on 854 */ 855 856 static int 857 gen8_ring_sync(struct intel_engine_cs *waiter, 858 struct intel_engine_cs *signaller, 859 u32 seqno) 860 { 861 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 862 int ret; 863 864 ret = intel_ring_begin(waiter, 4); 865 if (ret) 866 return ret; 867 868 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 869 MI_SEMAPHORE_GLOBAL_GTT | 870 MI_SEMAPHORE_POLL | 871 MI_SEMAPHORE_SAD_GTE_SDD); 872 intel_ring_emit(waiter, seqno); 873 intel_ring_emit(waiter, 874 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 875 intel_ring_emit(waiter, 876 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 877 intel_ring_advance(waiter); 878 return 0; 879 } 880 881 static int 882 gen6_ring_sync(struct intel_engine_cs *waiter, 883 struct intel_engine_cs *signaller, 884 u32 seqno) 885 { 886 u32 dw1 = MI_SEMAPHORE_MBOX | 887 MI_SEMAPHORE_COMPARE | 888 MI_SEMAPHORE_REGISTER; 889 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 890 int ret; 891 892 /* Throughout all of the GEM code, seqno passed implies our current 893 * seqno is >= the last seqno executed. However for hardware the 894 * comparison is strictly greater than. 895 */ 896 seqno -= 1; 897 898 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 899 900 ret = intel_ring_begin(waiter, 4); 901 if (ret) 902 return ret; 903 904 /* If seqno wrap happened, omit the wait with no-ops */ 905 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 906 intel_ring_emit(waiter, dw1 | wait_mbox); 907 intel_ring_emit(waiter, seqno); 908 intel_ring_emit(waiter, 0); 909 intel_ring_emit(waiter, MI_NOOP); 910 } else { 911 intel_ring_emit(waiter, MI_NOOP); 912 intel_ring_emit(waiter, MI_NOOP); 913 intel_ring_emit(waiter, MI_NOOP); 914 intel_ring_emit(waiter, MI_NOOP); 915 } 916 intel_ring_advance(waiter); 917 918 return 0; 919 } 920 921 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 922 do { \ 923 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 924 PIPE_CONTROL_DEPTH_STALL); \ 925 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 926 intel_ring_emit(ring__, 0); \ 927 intel_ring_emit(ring__, 0); \ 928 } while (0) 929 930 static int 931 pc_render_add_request(struct intel_engine_cs *ring) 932 { 933 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 934 int ret; 935 936 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 937 * incoherent with writes to memory, i.e. completely fubar, 938 * so we need to use PIPE_NOTIFY instead. 939 * 940 * However, we also need to workaround the qword write 941 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 942 * memory before requesting an interrupt. 943 */ 944 ret = intel_ring_begin(ring, 32); 945 if (ret) 946 return ret; 947 948 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 949 PIPE_CONTROL_WRITE_FLUSH | 950 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 951 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 952 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 953 intel_ring_emit(ring, 0); 954 PIPE_CONTROL_FLUSH(ring, scratch_addr); 955 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 956 PIPE_CONTROL_FLUSH(ring, scratch_addr); 957 scratch_addr += 2 * CACHELINE_BYTES; 958 PIPE_CONTROL_FLUSH(ring, scratch_addr); 959 scratch_addr += 2 * CACHELINE_BYTES; 960 PIPE_CONTROL_FLUSH(ring, scratch_addr); 961 scratch_addr += 2 * CACHELINE_BYTES; 962 PIPE_CONTROL_FLUSH(ring, scratch_addr); 963 scratch_addr += 2 * CACHELINE_BYTES; 964 PIPE_CONTROL_FLUSH(ring, scratch_addr); 965 966 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 967 PIPE_CONTROL_WRITE_FLUSH | 968 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 969 PIPE_CONTROL_NOTIFY); 970 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 971 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 972 intel_ring_emit(ring, 0); 973 __intel_ring_advance(ring); 974 975 return 0; 976 } 977 978 static u32 979 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 980 { 981 /* Workaround to force correct ordering between irq and seqno writes on 982 * ivb (and maybe also on snb) by reading from a CS register (like 983 * ACTHD) before reading the status page. */ 984 if (!lazy_coherency) { 985 struct drm_i915_private *dev_priv = ring->dev->dev_private; 986 POSTING_READ(RING_ACTHD(ring->mmio_base)); 987 } 988 989 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 990 } 991 992 static u32 993 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 994 { 995 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 996 } 997 998 static void 999 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1000 { 1001 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1002 } 1003 1004 static u32 1005 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1006 { 1007 return ring->scratch.cpu_page[0]; 1008 } 1009 1010 static void 1011 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1012 { 1013 ring->scratch.cpu_page[0] = seqno; 1014 } 1015 1016 static bool 1017 gen5_ring_get_irq(struct intel_engine_cs *ring) 1018 { 1019 struct drm_device *dev = ring->dev; 1020 struct drm_i915_private *dev_priv = dev->dev_private; 1021 1022 if (!dev->irq_enabled) 1023 return false; 1024 1025 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1026 if (ring->irq_refcount++ == 0) 1027 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1028 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1029 1030 return true; 1031 } 1032 1033 static void 1034 gen5_ring_put_irq(struct intel_engine_cs *ring) 1035 { 1036 struct drm_device *dev = ring->dev; 1037 struct drm_i915_private *dev_priv = dev->dev_private; 1038 1039 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1040 if (--ring->irq_refcount == 0) 1041 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1042 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1043 } 1044 1045 static bool 1046 i9xx_ring_get_irq(struct intel_engine_cs *ring) 1047 { 1048 struct drm_device *dev = ring->dev; 1049 struct drm_i915_private *dev_priv = dev->dev_private; 1050 1051 if (!dev->irq_enabled) 1052 return false; 1053 1054 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1055 if (ring->irq_refcount++ == 0) { 1056 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1057 I915_WRITE(IMR, dev_priv->irq_mask); 1058 POSTING_READ(IMR); 1059 } 1060 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1061 1062 return true; 1063 } 1064 1065 static void 1066 i9xx_ring_put_irq(struct intel_engine_cs *ring) 1067 { 1068 struct drm_device *dev = ring->dev; 1069 struct drm_i915_private *dev_priv = dev->dev_private; 1070 1071 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1072 if (--ring->irq_refcount == 0) { 1073 dev_priv->irq_mask |= ring->irq_enable_mask; 1074 I915_WRITE(IMR, dev_priv->irq_mask); 1075 POSTING_READ(IMR); 1076 } 1077 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1078 } 1079 1080 static bool 1081 i8xx_ring_get_irq(struct intel_engine_cs *ring) 1082 { 1083 struct drm_device *dev = ring->dev; 1084 struct drm_i915_private *dev_priv = dev->dev_private; 1085 1086 if (!dev->irq_enabled) 1087 return false; 1088 1089 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1090 if (ring->irq_refcount++ == 0) { 1091 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1092 I915_WRITE16(IMR, dev_priv->irq_mask); 1093 POSTING_READ16(IMR); 1094 } 1095 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1096 1097 return true; 1098 } 1099 1100 static void 1101 i8xx_ring_put_irq(struct intel_engine_cs *ring) 1102 { 1103 struct drm_device *dev = ring->dev; 1104 struct drm_i915_private *dev_priv = dev->dev_private; 1105 1106 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1107 if (--ring->irq_refcount == 0) { 1108 dev_priv->irq_mask |= ring->irq_enable_mask; 1109 I915_WRITE16(IMR, dev_priv->irq_mask); 1110 POSTING_READ16(IMR); 1111 } 1112 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1113 } 1114 1115 void intel_ring_setup_status_page(struct intel_engine_cs *ring) 1116 { 1117 struct drm_device *dev = ring->dev; 1118 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1119 u32 mmio = 0; 1120 1121 /* The ring status page addresses are no longer next to the rest of 1122 * the ring registers as of gen7. 1123 */ 1124 if (IS_GEN7(dev)) { 1125 switch (ring->id) { 1126 case RCS: 1127 mmio = RENDER_HWS_PGA_GEN7; 1128 break; 1129 case BCS: 1130 mmio = BLT_HWS_PGA_GEN7; 1131 break; 1132 /* 1133 * VCS2 actually doesn't exist on Gen7. Only shut up 1134 * gcc switch check warning 1135 */ 1136 case VCS2: 1137 case VCS: 1138 mmio = BSD_HWS_PGA_GEN7; 1139 break; 1140 case VECS: 1141 mmio = VEBOX_HWS_PGA_GEN7; 1142 break; 1143 } 1144 } else if (IS_GEN6(ring->dev)) { 1145 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1146 } else { 1147 /* XXX: gen8 returns to sanity */ 1148 mmio = RING_HWS_PGA(ring->mmio_base); 1149 } 1150 1151 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1152 POSTING_READ(mmio); 1153 1154 /* 1155 * Flush the TLB for this page 1156 * 1157 * FIXME: These two bits have disappeared on gen8, so a question 1158 * arises: do we still need this and if so how should we go about 1159 * invalidating the TLB? 1160 */ 1161 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1162 u32 reg = RING_INSTPM(ring->mmio_base); 1163 1164 /* ring should be idle before issuing a sync flush*/ 1165 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1166 1167 I915_WRITE(reg, 1168 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1169 INSTPM_SYNC_FLUSH)); 1170 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1171 1000)) 1172 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1173 ring->name); 1174 } 1175 } 1176 1177 static int 1178 bsd_ring_flush(struct intel_engine_cs *ring, 1179 u32 invalidate_domains, 1180 u32 flush_domains) 1181 { 1182 int ret; 1183 1184 ret = intel_ring_begin(ring, 2); 1185 if (ret) 1186 return ret; 1187 1188 intel_ring_emit(ring, MI_FLUSH); 1189 intel_ring_emit(ring, MI_NOOP); 1190 intel_ring_advance(ring); 1191 return 0; 1192 } 1193 1194 static int 1195 i9xx_add_request(struct intel_engine_cs *ring) 1196 { 1197 int ret; 1198 1199 ret = intel_ring_begin(ring, 4); 1200 if (ret) 1201 return ret; 1202 1203 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1204 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1205 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1206 intel_ring_emit(ring, MI_USER_INTERRUPT); 1207 __intel_ring_advance(ring); 1208 1209 return 0; 1210 } 1211 1212 static bool 1213 gen6_ring_get_irq(struct intel_engine_cs *ring) 1214 { 1215 struct drm_device *dev = ring->dev; 1216 struct drm_i915_private *dev_priv = dev->dev_private; 1217 1218 if (!dev->irq_enabled) 1219 return false; 1220 1221 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1222 if (ring->irq_refcount++ == 0) { 1223 if (HAS_L3_DPF(dev) && ring->id == RCS) 1224 I915_WRITE_IMR(ring, 1225 ~(ring->irq_enable_mask | 1226 GT_PARITY_ERROR(dev))); 1227 else 1228 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1229 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1230 } 1231 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1232 1233 return true; 1234 } 1235 1236 static void 1237 gen6_ring_put_irq(struct intel_engine_cs *ring) 1238 { 1239 struct drm_device *dev = ring->dev; 1240 struct drm_i915_private *dev_priv = dev->dev_private; 1241 1242 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1243 if (--ring->irq_refcount == 0) { 1244 if (HAS_L3_DPF(dev) && ring->id == RCS) 1245 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1246 else 1247 I915_WRITE_IMR(ring, ~0); 1248 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1249 } 1250 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1251 } 1252 1253 static bool 1254 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1255 { 1256 struct drm_device *dev = ring->dev; 1257 struct drm_i915_private *dev_priv = dev->dev_private; 1258 1259 if (!dev->irq_enabled) 1260 return false; 1261 1262 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1263 if (ring->irq_refcount++ == 0) { 1264 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1265 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1266 } 1267 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1268 1269 return true; 1270 } 1271 1272 static void 1273 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1274 { 1275 struct drm_device *dev = ring->dev; 1276 struct drm_i915_private *dev_priv = dev->dev_private; 1277 1278 if (!dev->irq_enabled) 1279 return; 1280 1281 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1282 if (--ring->irq_refcount == 0) { 1283 I915_WRITE_IMR(ring, ~0); 1284 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1285 } 1286 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1287 } 1288 1289 static bool 1290 gen8_ring_get_irq(struct intel_engine_cs *ring) 1291 { 1292 struct drm_device *dev = ring->dev; 1293 struct drm_i915_private *dev_priv = dev->dev_private; 1294 1295 if (!dev->irq_enabled) 1296 return false; 1297 1298 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1299 if (ring->irq_refcount++ == 0) { 1300 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1301 I915_WRITE_IMR(ring, 1302 ~(ring->irq_enable_mask | 1303 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1304 } else { 1305 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1306 } 1307 POSTING_READ(RING_IMR(ring->mmio_base)); 1308 } 1309 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1310 1311 return true; 1312 } 1313 1314 static void 1315 gen8_ring_put_irq(struct intel_engine_cs *ring) 1316 { 1317 struct drm_device *dev = ring->dev; 1318 struct drm_i915_private *dev_priv = dev->dev_private; 1319 1320 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1321 if (--ring->irq_refcount == 0) { 1322 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1323 I915_WRITE_IMR(ring, 1324 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1325 } else { 1326 I915_WRITE_IMR(ring, ~0); 1327 } 1328 POSTING_READ(RING_IMR(ring->mmio_base)); 1329 } 1330 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1331 } 1332 1333 static int 1334 i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1335 u64 offset, u32 length, 1336 unsigned flags) 1337 { 1338 int ret; 1339 1340 ret = intel_ring_begin(ring, 2); 1341 if (ret) 1342 return ret; 1343 1344 intel_ring_emit(ring, 1345 MI_BATCH_BUFFER_START | 1346 MI_BATCH_GTT | 1347 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1348 intel_ring_emit(ring, offset); 1349 intel_ring_advance(ring); 1350 1351 return 0; 1352 } 1353 1354 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1355 #define I830_BATCH_LIMIT (256*1024) 1356 #define I830_TLB_ENTRIES (2) 1357 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1358 static int 1359 i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1360 u64 offset, u32 len, 1361 unsigned flags) 1362 { 1363 u32 cs_offset = ring->scratch.gtt_offset; 1364 int ret; 1365 1366 ret = intel_ring_begin(ring, 6); 1367 if (ret) 1368 return ret; 1369 1370 /* Evict the invalid PTE TLBs */ 1371 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1372 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1373 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1374 intel_ring_emit(ring, cs_offset); 1375 intel_ring_emit(ring, 0xdeadbeef); 1376 intel_ring_emit(ring, MI_NOOP); 1377 intel_ring_advance(ring); 1378 1379 if ((flags & I915_DISPATCH_PINNED) == 0) { 1380 if (len > I830_BATCH_LIMIT) 1381 return -ENOSPC; 1382 1383 ret = intel_ring_begin(ring, 6 + 2); 1384 if (ret) 1385 return ret; 1386 1387 /* Blit the batch (which has now all relocs applied) to the 1388 * stable batch scratch bo area (so that the CS never 1389 * stumbles over its tlb invalidation bug) ... 1390 */ 1391 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1392 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1393 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1394 intel_ring_emit(ring, cs_offset); 1395 intel_ring_emit(ring, 4096); 1396 intel_ring_emit(ring, offset); 1397 1398 intel_ring_emit(ring, MI_FLUSH); 1399 intel_ring_emit(ring, MI_NOOP); 1400 intel_ring_advance(ring); 1401 1402 /* ... and execute it. */ 1403 offset = cs_offset; 1404 } 1405 1406 ret = intel_ring_begin(ring, 4); 1407 if (ret) 1408 return ret; 1409 1410 intel_ring_emit(ring, MI_BATCH_BUFFER); 1411 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1412 intel_ring_emit(ring, offset + len - 8); 1413 intel_ring_emit(ring, MI_NOOP); 1414 intel_ring_advance(ring); 1415 1416 return 0; 1417 } 1418 1419 static int 1420 i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1421 u64 offset, u32 len, 1422 unsigned flags) 1423 { 1424 int ret; 1425 1426 ret = intel_ring_begin(ring, 2); 1427 if (ret) 1428 return ret; 1429 1430 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1431 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1432 intel_ring_advance(ring); 1433 1434 return 0; 1435 } 1436 1437 static void cleanup_status_page(struct intel_engine_cs *ring) 1438 { 1439 struct drm_i915_gem_object *obj; 1440 1441 obj = ring->status_page.obj; 1442 if (obj == NULL) 1443 return; 1444 1445 kunmap(obj->pages[0]); 1446 i915_gem_object_ggtt_unpin(obj); 1447 drm_gem_object_unreference(&obj->base); 1448 ring->status_page.obj = NULL; 1449 } 1450 1451 static int init_status_page(struct intel_engine_cs *ring) 1452 { 1453 struct drm_i915_gem_object *obj; 1454 1455 if ((obj = ring->status_page.obj) == NULL) { 1456 unsigned flags; 1457 int ret; 1458 1459 obj = i915_gem_alloc_object(ring->dev, 4096); 1460 if (obj == NULL) { 1461 DRM_ERROR("Failed to allocate status page\n"); 1462 return -ENOMEM; 1463 } 1464 1465 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1466 if (ret) 1467 goto err_unref; 1468 1469 flags = 0; 1470 if (!HAS_LLC(ring->dev)) 1471 /* On g33, we cannot place HWS above 256MiB, so 1472 * restrict its pinning to the low mappable arena. 1473 * Though this restriction is not documented for 1474 * gen4, gen5, or byt, they also behave similarly 1475 * and hang if the HWS is placed at the top of the 1476 * GTT. To generalise, it appears that all !llc 1477 * platforms have issues with us placing the HWS 1478 * above the mappable region (even though we never 1479 * actualy map it). 1480 */ 1481 flags |= PIN_MAPPABLE; 1482 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1483 if (ret) { 1484 err_unref: 1485 drm_gem_object_unreference(&obj->base); 1486 return ret; 1487 } 1488 1489 ring->status_page.obj = obj; 1490 } 1491 1492 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1493 ring->status_page.page_addr = kmap(obj->pages[0]); 1494 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1495 1496 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1497 ring->name, ring->status_page.gfx_addr); 1498 1499 return 0; 1500 } 1501 1502 static int init_phys_status_page(struct intel_engine_cs *ring) 1503 { 1504 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1505 1506 if (!dev_priv->status_page_dmah) { 1507 dev_priv->status_page_dmah = 1508 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1509 if (!dev_priv->status_page_dmah) 1510 return -ENOMEM; 1511 } 1512 1513 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1514 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1515 1516 return 0; 1517 } 1518 1519 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1520 { 1521 if (!ringbuf->obj) 1522 return; 1523 1524 iounmap(ringbuf->virtual_start, ringbuf->size); 1525 i915_gem_object_ggtt_unpin(ringbuf->obj); 1526 drm_gem_object_unreference(&ringbuf->obj->base); 1527 ringbuf->obj = NULL; 1528 } 1529 1530 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1531 struct intel_ringbuffer *ringbuf) 1532 { 1533 struct drm_i915_private *dev_priv = to_i915(dev); 1534 struct drm_i915_gem_object *obj; 1535 int ret; 1536 1537 if (ringbuf->obj) 1538 return 0; 1539 1540 obj = NULL; 1541 if (!HAS_LLC(dev)) 1542 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1543 if (obj == NULL) 1544 obj = i915_gem_alloc_object(dev, ringbuf->size); 1545 if (obj == NULL) 1546 return -ENOMEM; 1547 1548 /* mark ring buffers as read-only from GPU side by default */ 1549 obj->gt_ro = 1; 1550 1551 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1552 if (ret) 1553 goto err_unref; 1554 1555 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1556 if (ret) 1557 goto err_unpin; 1558 1559 ringbuf->virtual_start = 1560 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1561 ringbuf->size); 1562 if (ringbuf->virtual_start == NULL) { 1563 ret = -EINVAL; 1564 goto err_unpin; 1565 } 1566 1567 ringbuf->obj = obj; 1568 return 0; 1569 1570 err_unpin: 1571 i915_gem_object_ggtt_unpin(obj); 1572 err_unref: 1573 drm_gem_object_unreference(&obj->base); 1574 return ret; 1575 } 1576 1577 static int intel_init_ring_buffer(struct drm_device *dev, 1578 struct intel_engine_cs *ring) 1579 { 1580 struct intel_ringbuffer *ringbuf = ring->buffer; 1581 int ret; 1582 1583 if (ringbuf == NULL) { 1584 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1585 if (!ringbuf) 1586 return -ENOMEM; 1587 ring->buffer = ringbuf; 1588 } 1589 1590 ring->dev = dev; 1591 INIT_LIST_HEAD(&ring->active_list); 1592 INIT_LIST_HEAD(&ring->request_list); 1593 ringbuf->size = 32 * PAGE_SIZE; 1594 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1595 1596 init_waitqueue_head(&ring->irq_queue); 1597 1598 if (I915_NEED_GFX_HWS(dev)) { 1599 ret = init_status_page(ring); 1600 if (ret) 1601 goto error; 1602 } else { 1603 BUG_ON(ring->id != RCS); 1604 ret = init_phys_status_page(ring); 1605 if (ret) 1606 goto error; 1607 } 1608 1609 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1610 if (ret) { 1611 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1612 goto error; 1613 } 1614 1615 /* Workaround an erratum on the i830 which causes a hang if 1616 * the TAIL pointer points to within the last 2 cachelines 1617 * of the buffer. 1618 */ 1619 ringbuf->effective_size = ringbuf->size; 1620 if (IS_I830(dev) || IS_845G(dev)) 1621 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1622 1623 ret = i915_cmd_parser_init_ring(ring); 1624 if (ret) 1625 goto error; 1626 1627 ret = ring->init(ring); 1628 if (ret) 1629 goto error; 1630 1631 return 0; 1632 1633 error: 1634 kfree(ringbuf); 1635 ring->buffer = NULL; 1636 return ret; 1637 } 1638 1639 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1640 { 1641 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1642 struct intel_ringbuffer *ringbuf = ring->buffer; 1643 1644 if (!intel_ring_initialized(ring)) 1645 return; 1646 1647 intel_stop_ring_buffer(ring); 1648 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1649 1650 intel_destroy_ringbuffer_obj(ringbuf); 1651 ring->preallocated_lazy_request = NULL; 1652 ring->outstanding_lazy_seqno = 0; 1653 1654 if (ring->cleanup) 1655 ring->cleanup(ring); 1656 1657 cleanup_status_page(ring); 1658 1659 i915_cmd_parser_fini_ring(ring); 1660 1661 kfree(ringbuf); 1662 ring->buffer = NULL; 1663 } 1664 1665 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1666 { 1667 struct intel_ringbuffer *ringbuf = ring->buffer; 1668 struct drm_i915_gem_request *request; 1669 u32 seqno = 0; 1670 int ret; 1671 1672 if (ringbuf->last_retired_head != -1) { 1673 ringbuf->head = ringbuf->last_retired_head; 1674 ringbuf->last_retired_head = -1; 1675 1676 ringbuf->space = ring_space(ringbuf); 1677 if (ringbuf->space >= n) 1678 return 0; 1679 } 1680 1681 list_for_each_entry(request, &ring->request_list, list) { 1682 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1683 seqno = request->seqno; 1684 break; 1685 } 1686 } 1687 1688 if (seqno == 0) 1689 return -ENOSPC; 1690 1691 ret = i915_wait_seqno(ring, seqno); 1692 if (ret) 1693 return ret; 1694 1695 i915_gem_retire_requests_ring(ring); 1696 ringbuf->head = ringbuf->last_retired_head; 1697 ringbuf->last_retired_head = -1; 1698 1699 ringbuf->space = ring_space(ringbuf); 1700 return 0; 1701 } 1702 1703 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1704 { 1705 struct drm_device *dev = ring->dev; 1706 struct drm_i915_private *dev_priv = dev->dev_private; 1707 struct intel_ringbuffer *ringbuf = ring->buffer; 1708 unsigned long end; 1709 int ret; 1710 1711 ret = intel_ring_wait_request(ring, n); 1712 if (ret != -ENOSPC) 1713 return ret; 1714 1715 /* force the tail write in case we have been skipping them */ 1716 __intel_ring_advance(ring); 1717 1718 /* With GEM the hangcheck timer should kick us out of the loop, 1719 * leaving it early runs the risk of corrupting GEM state (due 1720 * to running on almost untested codepaths). But on resume 1721 * timers don't work yet, so prevent a complete hang in that 1722 * case by choosing an insanely large timeout. */ 1723 end = jiffies + 60 * HZ; 1724 1725 trace_i915_ring_wait_begin(ring); 1726 do { 1727 ringbuf->head = I915_READ_HEAD(ring); 1728 ringbuf->space = ring_space(ringbuf); 1729 if (ringbuf->space >= n) { 1730 ret = 0; 1731 break; 1732 } 1733 1734 #if 0 1735 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1736 dev->primary->master) { 1737 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1738 if (master_priv->sarea_priv) 1739 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1740 } 1741 #else 1742 if (dev_priv->sarea_priv) 1743 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1744 #endif 1745 1746 1747 msleep(1); 1748 1749 #if 0 1750 if (dev_priv->mm.interruptible && signal_pending(current)) { 1751 ret = -ERESTARTSYS; 1752 break; 1753 } 1754 #endif 1755 1756 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1757 dev_priv->mm.interruptible); 1758 if (ret) 1759 break; 1760 1761 if (time_after(jiffies, end)) { 1762 ret = -EBUSY; 1763 break; 1764 } 1765 } while (1); 1766 trace_i915_ring_wait_end(ring); 1767 return ret; 1768 } 1769 1770 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1771 { 1772 uint32_t __iomem *virt; 1773 struct intel_ringbuffer *ringbuf = ring->buffer; 1774 int rem = ringbuf->size - ringbuf->tail; 1775 1776 if (ringbuf->space < rem) { 1777 int ret = ring_wait_for_space(ring, rem); 1778 if (ret) 1779 return ret; 1780 } 1781 1782 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 1783 rem /= 4; 1784 while (rem--) 1785 iowrite32(MI_NOOP, virt++); 1786 1787 ringbuf->tail = 0; 1788 ringbuf->space = ring_space(ringbuf); 1789 1790 return 0; 1791 } 1792 1793 int intel_ring_idle(struct intel_engine_cs *ring) 1794 { 1795 u32 seqno; 1796 int ret; 1797 1798 /* We need to add any requests required to flush the objects and ring */ 1799 if (ring->outstanding_lazy_seqno) { 1800 ret = i915_add_request(ring, NULL); 1801 if (ret) 1802 return ret; 1803 } 1804 1805 /* Wait upon the last request to be completed */ 1806 if (list_empty(&ring->request_list)) 1807 return 0; 1808 1809 seqno = list_entry(ring->request_list.prev, 1810 struct drm_i915_gem_request, 1811 list)->seqno; 1812 1813 return i915_wait_seqno(ring, seqno); 1814 } 1815 1816 static int 1817 intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1818 { 1819 if (ring->outstanding_lazy_seqno) 1820 return 0; 1821 1822 if (ring->preallocated_lazy_request == NULL) { 1823 struct drm_i915_gem_request *request; 1824 1825 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK); 1826 if (request == NULL) 1827 return -ENOMEM; 1828 1829 ring->preallocated_lazy_request = request; 1830 } 1831 1832 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1833 } 1834 1835 static int __intel_ring_prepare(struct intel_engine_cs *ring, 1836 int bytes) 1837 { 1838 struct intel_ringbuffer *ringbuf = ring->buffer; 1839 int ret; 1840 1841 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 1842 ret = intel_wrap_ring_buffer(ring); 1843 if (unlikely(ret)) 1844 return ret; 1845 } 1846 1847 if (unlikely(ringbuf->space < bytes)) { 1848 ret = ring_wait_for_space(ring, bytes); 1849 if (unlikely(ret)) 1850 return ret; 1851 } 1852 1853 return 0; 1854 } 1855 1856 int intel_ring_begin(struct intel_engine_cs *ring, 1857 int num_dwords) 1858 { 1859 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1860 int ret; 1861 1862 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1863 dev_priv->mm.interruptible); 1864 if (ret) 1865 return ret; 1866 1867 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 1868 if (ret) 1869 return ret; 1870 1871 /* Preallocate the olr before touching the ring */ 1872 ret = intel_ring_alloc_seqno(ring); 1873 if (ret) 1874 return ret; 1875 1876 ring->buffer->space -= num_dwords * sizeof(uint32_t); 1877 return 0; 1878 } 1879 1880 /* Align the ring tail to a cacheline boundary */ 1881 int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1882 { 1883 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1884 int ret; 1885 1886 if (num_dwords == 0) 1887 return 0; 1888 1889 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1890 ret = intel_ring_begin(ring, num_dwords); 1891 if (ret) 1892 return ret; 1893 1894 while (num_dwords--) 1895 intel_ring_emit(ring, MI_NOOP); 1896 1897 intel_ring_advance(ring); 1898 1899 return 0; 1900 } 1901 1902 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1903 { 1904 struct drm_device *dev = ring->dev; 1905 struct drm_i915_private *dev_priv = dev->dev_private; 1906 1907 BUG_ON(ring->outstanding_lazy_seqno); 1908 1909 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 1910 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1911 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1912 if (HAS_VEBOX(dev)) 1913 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1914 } 1915 1916 ring->set_seqno(ring, seqno); 1917 ring->hangcheck.seqno = seqno; 1918 } 1919 1920 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 1921 u32 value) 1922 { 1923 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1924 1925 /* Every tail move must follow the sequence below */ 1926 1927 /* Disable notification that the ring is IDLE. The GT 1928 * will then assume that it is busy and bring it out of rc6. 1929 */ 1930 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1931 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1932 1933 /* Clear the context id. Here be magic! */ 1934 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1935 1936 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1937 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1938 GEN6_BSD_SLEEP_INDICATOR) == 0, 1939 50)) 1940 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1941 1942 /* Now that the ring is fully powered up, update the tail */ 1943 I915_WRITE_TAIL(ring, value); 1944 POSTING_READ(RING_TAIL(ring->mmio_base)); 1945 1946 /* Let the ring send IDLE messages to the GT again, 1947 * and so let it sleep to conserve power when idle. 1948 */ 1949 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1950 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1951 } 1952 1953 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 1954 u32 invalidate, u32 flush) 1955 { 1956 uint32_t cmd; 1957 int ret; 1958 1959 ret = intel_ring_begin(ring, 4); 1960 if (ret) 1961 return ret; 1962 1963 cmd = MI_FLUSH_DW; 1964 if (INTEL_INFO(ring->dev)->gen >= 8) 1965 cmd += 1; 1966 /* 1967 * Bspec vol 1c.5 - video engine command streamer: 1968 * "If ENABLED, all TLBs will be invalidated once the flush 1969 * operation is complete. This bit is only valid when the 1970 * Post-Sync Operation field is a value of 1h or 3h." 1971 */ 1972 if (invalidate & I915_GEM_GPU_DOMAINS) 1973 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1974 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1975 intel_ring_emit(ring, cmd); 1976 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1977 if (INTEL_INFO(ring->dev)->gen >= 8) { 1978 intel_ring_emit(ring, 0); /* upper addr */ 1979 intel_ring_emit(ring, 0); /* value */ 1980 } else { 1981 intel_ring_emit(ring, 0); 1982 intel_ring_emit(ring, MI_NOOP); 1983 } 1984 intel_ring_advance(ring); 1985 return 0; 1986 } 1987 1988 static int 1989 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1990 u64 offset, u32 len, 1991 unsigned flags) 1992 { 1993 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1994 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && 1995 !(flags & I915_DISPATCH_SECURE); 1996 int ret; 1997 1998 ret = intel_ring_begin(ring, 4); 1999 if (ret) 2000 return ret; 2001 2002 /* FIXME(BDW): Address space and security selectors. */ 2003 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 2004 intel_ring_emit(ring, lower_32_bits(offset)); 2005 intel_ring_emit(ring, upper_32_bits(offset)); 2006 intel_ring_emit(ring, MI_NOOP); 2007 intel_ring_advance(ring); 2008 2009 return 0; 2010 } 2011 2012 static int 2013 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2014 u64 offset, u32 len, 2015 unsigned flags) 2016 { 2017 int ret; 2018 2019 ret = intel_ring_begin(ring, 2); 2020 if (ret) 2021 return ret; 2022 2023 intel_ring_emit(ring, 2024 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 2025 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 2026 /* bit0-7 is the length on GEN6+ */ 2027 intel_ring_emit(ring, offset); 2028 intel_ring_advance(ring); 2029 2030 return 0; 2031 } 2032 2033 static int 2034 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2035 u64 offset, u32 len, 2036 unsigned flags) 2037 { 2038 int ret; 2039 2040 ret = intel_ring_begin(ring, 2); 2041 if (ret) 2042 return ret; 2043 2044 intel_ring_emit(ring, 2045 MI_BATCH_BUFFER_START | 2046 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2047 /* bit0-7 is the length on GEN6+ */ 2048 intel_ring_emit(ring, offset); 2049 intel_ring_advance(ring); 2050 2051 return 0; 2052 } 2053 2054 /* Blitter support (SandyBridge+) */ 2055 2056 static int gen6_ring_flush(struct intel_engine_cs *ring, 2057 u32 invalidate, u32 flush) 2058 { 2059 struct drm_device *dev = ring->dev; 2060 uint32_t cmd; 2061 int ret; 2062 2063 ret = intel_ring_begin(ring, 4); 2064 if (ret) 2065 return ret; 2066 2067 cmd = MI_FLUSH_DW; 2068 if (INTEL_INFO(ring->dev)->gen >= 8) 2069 cmd += 1; 2070 /* 2071 * Bspec vol 1c.3 - blitter engine command streamer: 2072 * "If ENABLED, all TLBs will be invalidated once the flush 2073 * operation is complete. This bit is only valid when the 2074 * Post-Sync Operation field is a value of 1h or 3h." 2075 */ 2076 if (invalidate & I915_GEM_DOMAIN_RENDER) 2077 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 2078 MI_FLUSH_DW_OP_STOREDW; 2079 intel_ring_emit(ring, cmd); 2080 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2081 if (INTEL_INFO(ring->dev)->gen >= 8) { 2082 intel_ring_emit(ring, 0); /* upper addr */ 2083 intel_ring_emit(ring, 0); /* value */ 2084 } else { 2085 intel_ring_emit(ring, 0); 2086 intel_ring_emit(ring, MI_NOOP); 2087 } 2088 intel_ring_advance(ring); 2089 2090 if (IS_GEN7(dev) && !invalidate && flush) 2091 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 2092 2093 return 0; 2094 } 2095 2096 int intel_init_render_ring_buffer(struct drm_device *dev) 2097 { 2098 struct drm_i915_private *dev_priv = dev->dev_private; 2099 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2100 struct drm_i915_gem_object *obj; 2101 int ret; 2102 2103 ring->name = "render ring"; 2104 ring->id = RCS; 2105 ring->mmio_base = RENDER_RING_BASE; 2106 2107 if (INTEL_INFO(dev)->gen >= 8) { 2108 if (i915_semaphore_is_enabled(dev)) { 2109 obj = i915_gem_alloc_object(dev, 4096); 2110 if (obj == NULL) { 2111 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2112 i915.semaphores = 0; 2113 } else { 2114 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2115 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2116 if (ret != 0) { 2117 drm_gem_object_unreference(&obj->base); 2118 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2119 i915.semaphores = 0; 2120 } else 2121 dev_priv->semaphore_obj = obj; 2122 } 2123 } 2124 ring->add_request = gen6_add_request; 2125 ring->flush = gen8_render_ring_flush; 2126 ring->irq_get = gen8_ring_get_irq; 2127 ring->irq_put = gen8_ring_put_irq; 2128 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2129 ring->get_seqno = gen6_ring_get_seqno; 2130 ring->set_seqno = ring_set_seqno; 2131 if (i915_semaphore_is_enabled(dev)) { 2132 WARN_ON(!dev_priv->semaphore_obj); 2133 ring->semaphore.sync_to = gen8_ring_sync; 2134 ring->semaphore.signal = gen8_rcs_signal; 2135 GEN8_RING_SEMAPHORE_INIT; 2136 } 2137 } else if (INTEL_INFO(dev)->gen >= 6) { 2138 ring->add_request = gen6_add_request; 2139 ring->flush = gen7_render_ring_flush; 2140 if (INTEL_INFO(dev)->gen == 6) 2141 ring->flush = gen6_render_ring_flush; 2142 ring->irq_get = gen6_ring_get_irq; 2143 ring->irq_put = gen6_ring_put_irq; 2144 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2145 ring->get_seqno = gen6_ring_get_seqno; 2146 ring->set_seqno = ring_set_seqno; 2147 if (i915_semaphore_is_enabled(dev)) { 2148 ring->semaphore.sync_to = gen6_ring_sync; 2149 ring->semaphore.signal = gen6_signal; 2150 /* 2151 * The current semaphore is only applied on pre-gen8 2152 * platform. And there is no VCS2 ring on the pre-gen8 2153 * platform. So the semaphore between RCS and VCS2 is 2154 * initialized as INVALID. Gen8 will initialize the 2155 * sema between VCS2 and RCS later. 2156 */ 2157 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2158 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2159 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2160 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2161 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2162 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2163 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2164 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2165 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2166 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2167 } 2168 } else if (IS_GEN5(dev)) { 2169 ring->add_request = pc_render_add_request; 2170 ring->flush = gen4_render_ring_flush; 2171 ring->get_seqno = pc_render_get_seqno; 2172 ring->set_seqno = pc_render_set_seqno; 2173 ring->irq_get = gen5_ring_get_irq; 2174 ring->irq_put = gen5_ring_put_irq; 2175 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2176 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2177 } else { 2178 ring->add_request = i9xx_add_request; 2179 if (INTEL_INFO(dev)->gen < 4) 2180 ring->flush = gen2_render_ring_flush; 2181 else 2182 ring->flush = gen4_render_ring_flush; 2183 ring->get_seqno = ring_get_seqno; 2184 ring->set_seqno = ring_set_seqno; 2185 if (IS_GEN2(dev)) { 2186 ring->irq_get = i8xx_ring_get_irq; 2187 ring->irq_put = i8xx_ring_put_irq; 2188 } else { 2189 ring->irq_get = i9xx_ring_get_irq; 2190 ring->irq_put = i9xx_ring_put_irq; 2191 } 2192 ring->irq_enable_mask = I915_USER_INTERRUPT; 2193 } 2194 ring->write_tail = ring_write_tail; 2195 2196 if (IS_HASWELL(dev)) 2197 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2198 else if (IS_GEN8(dev)) 2199 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2200 else if (INTEL_INFO(dev)->gen >= 6) 2201 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2202 else if (INTEL_INFO(dev)->gen >= 4) 2203 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2204 else if (IS_I830(dev) || IS_845G(dev)) 2205 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2206 else 2207 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2208 ring->init = init_render_ring; 2209 ring->cleanup = render_ring_cleanup; 2210 2211 /* Workaround batchbuffer to combat CS tlb bug. */ 2212 if (HAS_BROKEN_CS_TLB(dev)) { 2213 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2214 if (obj == NULL) { 2215 DRM_ERROR("Failed to allocate batch bo\n"); 2216 return -ENOMEM; 2217 } 2218 2219 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2220 if (ret != 0) { 2221 drm_gem_object_unreference(&obj->base); 2222 DRM_ERROR("Failed to ping batch bo\n"); 2223 return ret; 2224 } 2225 2226 ring->scratch.obj = obj; 2227 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2228 } 2229 2230 return intel_init_ring_buffer(dev, ring); 2231 } 2232 2233 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2234 { 2235 struct drm_i915_private *dev_priv = dev->dev_private; 2236 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2237 struct intel_ringbuffer *ringbuf = ring->buffer; 2238 int ret; 2239 2240 if (ringbuf == NULL) { 2241 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2242 if (!ringbuf) 2243 return -ENOMEM; 2244 ring->buffer = ringbuf; 2245 } 2246 2247 ring->name = "render ring"; 2248 ring->id = RCS; 2249 ring->mmio_base = RENDER_RING_BASE; 2250 2251 if (INTEL_INFO(dev)->gen >= 6) { 2252 /* non-kms not supported on gen6+ */ 2253 ret = -ENODEV; 2254 goto err_ringbuf; 2255 } 2256 2257 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2258 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2259 * the special gen5 functions. */ 2260 ring->add_request = i9xx_add_request; 2261 if (INTEL_INFO(dev)->gen < 4) 2262 ring->flush = gen2_render_ring_flush; 2263 else 2264 ring->flush = gen4_render_ring_flush; 2265 ring->get_seqno = ring_get_seqno; 2266 ring->set_seqno = ring_set_seqno; 2267 if (IS_GEN2(dev)) { 2268 ring->irq_get = i8xx_ring_get_irq; 2269 ring->irq_put = i8xx_ring_put_irq; 2270 } else { 2271 ring->irq_get = i9xx_ring_get_irq; 2272 ring->irq_put = i9xx_ring_put_irq; 2273 } 2274 ring->irq_enable_mask = I915_USER_INTERRUPT; 2275 ring->write_tail = ring_write_tail; 2276 if (INTEL_INFO(dev)->gen >= 4) 2277 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2278 else if (IS_I830(dev) || IS_845G(dev)) 2279 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2280 else 2281 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2282 ring->init = init_render_ring; 2283 ring->cleanup = render_ring_cleanup; 2284 2285 ring->dev = dev; 2286 INIT_LIST_HEAD(&ring->active_list); 2287 INIT_LIST_HEAD(&ring->request_list); 2288 2289 ringbuf->size = size; 2290 ringbuf->effective_size = ringbuf->size; 2291 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2292 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2293 2294 ringbuf->virtual_start = ioremap_wc(start, size); 2295 if (ringbuf->virtual_start == NULL) { 2296 DRM_ERROR("can not ioremap virtual address for" 2297 " ring buffer\n"); 2298 ret = -ENOMEM; 2299 goto err_ringbuf; 2300 } 2301 2302 if (!I915_NEED_GFX_HWS(dev)) { 2303 ret = init_phys_status_page(ring); 2304 if (ret) 2305 goto err_vstart; 2306 } 2307 2308 return 0; 2309 2310 err_vstart: 2311 pmap_unmapdev((vm_offset_t)ring->buffer->virtual_start, size); 2312 err_ringbuf: 2313 kfree(ringbuf); 2314 ring->buffer = NULL; 2315 return ret; 2316 } 2317 2318 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2319 { 2320 struct drm_i915_private *dev_priv = dev->dev_private; 2321 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2322 2323 ring->name = "bsd ring"; 2324 ring->id = VCS; 2325 2326 ring->write_tail = ring_write_tail; 2327 if (INTEL_INFO(dev)->gen >= 6) { 2328 ring->mmio_base = GEN6_BSD_RING_BASE; 2329 /* gen6 bsd needs a special wa for tail updates */ 2330 if (IS_GEN6(dev)) 2331 ring->write_tail = gen6_bsd_ring_write_tail; 2332 ring->flush = gen6_bsd_ring_flush; 2333 ring->add_request = gen6_add_request; 2334 ring->get_seqno = gen6_ring_get_seqno; 2335 ring->set_seqno = ring_set_seqno; 2336 if (INTEL_INFO(dev)->gen >= 8) { 2337 ring->irq_enable_mask = 2338 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2339 ring->irq_get = gen8_ring_get_irq; 2340 ring->irq_put = gen8_ring_put_irq; 2341 ring->dispatch_execbuffer = 2342 gen8_ring_dispatch_execbuffer; 2343 if (i915_semaphore_is_enabled(dev)) { 2344 ring->semaphore.sync_to = gen8_ring_sync; 2345 ring->semaphore.signal = gen8_xcs_signal; 2346 GEN8_RING_SEMAPHORE_INIT; 2347 } 2348 } else { 2349 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2350 ring->irq_get = gen6_ring_get_irq; 2351 ring->irq_put = gen6_ring_put_irq; 2352 ring->dispatch_execbuffer = 2353 gen6_ring_dispatch_execbuffer; 2354 if (i915_semaphore_is_enabled(dev)) { 2355 ring->semaphore.sync_to = gen6_ring_sync; 2356 ring->semaphore.signal = gen6_signal; 2357 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2358 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2359 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2360 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2361 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2362 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2363 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2364 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2365 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2366 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2367 } 2368 } 2369 } else { 2370 ring->mmio_base = BSD_RING_BASE; 2371 ring->flush = bsd_ring_flush; 2372 ring->add_request = i9xx_add_request; 2373 ring->get_seqno = ring_get_seqno; 2374 ring->set_seqno = ring_set_seqno; 2375 if (IS_GEN5(dev)) { 2376 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2377 ring->irq_get = gen5_ring_get_irq; 2378 ring->irq_put = gen5_ring_put_irq; 2379 } else { 2380 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2381 ring->irq_get = i9xx_ring_get_irq; 2382 ring->irq_put = i9xx_ring_put_irq; 2383 } 2384 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2385 } 2386 ring->init = init_ring_common; 2387 2388 return intel_init_ring_buffer(dev, ring); 2389 } 2390 2391 /** 2392 * Initialize the second BSD ring for Broadwell GT3. 2393 * It is noted that this only exists on Broadwell GT3. 2394 */ 2395 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2396 { 2397 struct drm_i915_private *dev_priv = dev->dev_private; 2398 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2399 2400 if ((INTEL_INFO(dev)->gen != 8)) { 2401 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2402 return -EINVAL; 2403 } 2404 2405 ring->name = "bsd2 ring"; 2406 ring->id = VCS2; 2407 2408 ring->write_tail = ring_write_tail; 2409 ring->mmio_base = GEN8_BSD2_RING_BASE; 2410 ring->flush = gen6_bsd_ring_flush; 2411 ring->add_request = gen6_add_request; 2412 ring->get_seqno = gen6_ring_get_seqno; 2413 ring->set_seqno = ring_set_seqno; 2414 ring->irq_enable_mask = 2415 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2416 ring->irq_get = gen8_ring_get_irq; 2417 ring->irq_put = gen8_ring_put_irq; 2418 ring->dispatch_execbuffer = 2419 gen8_ring_dispatch_execbuffer; 2420 if (i915_semaphore_is_enabled(dev)) { 2421 ring->semaphore.sync_to = gen8_ring_sync; 2422 ring->semaphore.signal = gen8_xcs_signal; 2423 GEN8_RING_SEMAPHORE_INIT; 2424 } 2425 ring->init = init_ring_common; 2426 2427 return intel_init_ring_buffer(dev, ring); 2428 } 2429 2430 int intel_init_blt_ring_buffer(struct drm_device *dev) 2431 { 2432 struct drm_i915_private *dev_priv = dev->dev_private; 2433 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2434 2435 ring->name = "blitter ring"; 2436 ring->id = BCS; 2437 2438 ring->mmio_base = BLT_RING_BASE; 2439 ring->write_tail = ring_write_tail; 2440 ring->flush = gen6_ring_flush; 2441 ring->add_request = gen6_add_request; 2442 ring->get_seqno = gen6_ring_get_seqno; 2443 ring->set_seqno = ring_set_seqno; 2444 if (INTEL_INFO(dev)->gen >= 8) { 2445 ring->irq_enable_mask = 2446 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2447 ring->irq_get = gen8_ring_get_irq; 2448 ring->irq_put = gen8_ring_put_irq; 2449 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2450 if (i915_semaphore_is_enabled(dev)) { 2451 ring->semaphore.sync_to = gen8_ring_sync; 2452 ring->semaphore.signal = gen8_xcs_signal; 2453 GEN8_RING_SEMAPHORE_INIT; 2454 } 2455 } else { 2456 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2457 ring->irq_get = gen6_ring_get_irq; 2458 ring->irq_put = gen6_ring_put_irq; 2459 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2460 if (i915_semaphore_is_enabled(dev)) { 2461 ring->semaphore.signal = gen6_signal; 2462 ring->semaphore.sync_to = gen6_ring_sync; 2463 /* 2464 * The current semaphore is only applied on pre-gen8 2465 * platform. And there is no VCS2 ring on the pre-gen8 2466 * platform. So the semaphore between BCS and VCS2 is 2467 * initialized as INVALID. Gen8 will initialize the 2468 * sema between BCS and VCS2 later. 2469 */ 2470 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2471 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2472 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2473 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2474 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2475 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2476 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2477 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2478 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2479 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2480 } 2481 } 2482 ring->init = init_ring_common; 2483 2484 return intel_init_ring_buffer(dev, ring); 2485 } 2486 2487 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2488 { 2489 struct drm_i915_private *dev_priv = dev->dev_private; 2490 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2491 2492 ring->name = "video enhancement ring"; 2493 ring->id = VECS; 2494 2495 ring->mmio_base = VEBOX_RING_BASE; 2496 ring->write_tail = ring_write_tail; 2497 ring->flush = gen6_ring_flush; 2498 ring->add_request = gen6_add_request; 2499 ring->get_seqno = gen6_ring_get_seqno; 2500 ring->set_seqno = ring_set_seqno; 2501 2502 if (INTEL_INFO(dev)->gen >= 8) { 2503 ring->irq_enable_mask = 2504 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2505 ring->irq_get = gen8_ring_get_irq; 2506 ring->irq_put = gen8_ring_put_irq; 2507 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2508 if (i915_semaphore_is_enabled(dev)) { 2509 ring->semaphore.sync_to = gen8_ring_sync; 2510 ring->semaphore.signal = gen8_xcs_signal; 2511 GEN8_RING_SEMAPHORE_INIT; 2512 } 2513 } else { 2514 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2515 ring->irq_get = hsw_vebox_get_irq; 2516 ring->irq_put = hsw_vebox_put_irq; 2517 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2518 if (i915_semaphore_is_enabled(dev)) { 2519 ring->semaphore.sync_to = gen6_ring_sync; 2520 ring->semaphore.signal = gen6_signal; 2521 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2522 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2523 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2524 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2525 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2526 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2527 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2528 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2529 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2530 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2531 } 2532 } 2533 ring->init = init_ring_common; 2534 2535 return intel_init_ring_buffer(dev, ring); 2536 } 2537 2538 int 2539 intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2540 { 2541 int ret; 2542 2543 if (!ring->gpu_caches_dirty) 2544 return 0; 2545 2546 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2547 if (ret) 2548 return ret; 2549 2550 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2551 2552 ring->gpu_caches_dirty = false; 2553 return 0; 2554 } 2555 2556 int 2557 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2558 { 2559 uint32_t flush_domains; 2560 int ret; 2561 2562 flush_domains = 0; 2563 if (ring->gpu_caches_dirty) 2564 flush_domains = I915_GEM_GPU_DOMAINS; 2565 2566 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2567 if (ret) 2568 return ret; 2569 2570 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2571 2572 ring->gpu_caches_dirty = false; 2573 return 0; 2574 } 2575 2576 void 2577 intel_stop_ring_buffer(struct intel_engine_cs *ring) 2578 { 2579 int ret; 2580 2581 if (!intel_ring_initialized(ring)) 2582 return; 2583 2584 ret = intel_ring_idle(ring); 2585 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2586 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2587 ring->name, ret); 2588 2589 stop_ring(ring); 2590 } 2591