1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include <drm/drmP.h> 137 #include <drm/i915_drm.h> 138 #include "i915_drv.h" 139 #include "intel_mocs.h" 140 141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) 144 145 #define RING_EXECLIST_QFULL (1 << 0x2) 146 #define RING_EXECLIST1_VALID (1 << 0x3) 147 #define RING_EXECLIST0_VALID (1 << 0x4) 148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 149 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 150 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 151 152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 158 159 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 160 (GEN8_CTX_STATUS_ACTIVE_IDLE | \ 161 GEN8_CTX_STATUS_PREEMPTED | \ 162 GEN8_CTX_STATUS_ELEMENT_SWITCH) 163 164 #define CTX_LRI_HEADER_0 0x01 165 #define CTX_CONTEXT_CONTROL 0x02 166 #define CTX_RING_HEAD 0x04 167 #define CTX_RING_TAIL 0x06 168 #define CTX_RING_BUFFER_START 0x08 169 #define CTX_RING_BUFFER_CONTROL 0x0a 170 #define CTX_BB_HEAD_U 0x0c 171 #define CTX_BB_HEAD_L 0x0e 172 #define CTX_BB_STATE 0x10 173 #define CTX_SECOND_BB_HEAD_U 0x12 174 #define CTX_SECOND_BB_HEAD_L 0x14 175 #define CTX_SECOND_BB_STATE 0x16 176 #define CTX_BB_PER_CTX_PTR 0x18 177 #define CTX_RCS_INDIRECT_CTX 0x1a 178 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c 179 #define CTX_LRI_HEADER_1 0x21 180 #define CTX_CTX_TIMESTAMP 0x22 181 #define CTX_PDP3_UDW 0x24 182 #define CTX_PDP3_LDW 0x26 183 #define CTX_PDP2_UDW 0x28 184 #define CTX_PDP2_LDW 0x2a 185 #define CTX_PDP1_UDW 0x2c 186 #define CTX_PDP1_LDW 0x2e 187 #define CTX_PDP0_UDW 0x30 188 #define CTX_PDP0_LDW 0x32 189 #define CTX_LRI_HEADER_2 0x41 190 #define CTX_R_PWR_CLK_STATE 0x42 191 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 192 193 #define GEN8_CTX_VALID (1<<0) 194 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1) 195 #define GEN8_CTX_FORCE_RESTORE (1<<2) 196 #define GEN8_CTX_L3LLC_COHERENT (1<<5) 197 #define GEN8_CTX_PRIVILEGE (1<<8) 198 199 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \ 200 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \ 201 (reg_state)[(pos)+1] = (val); \ 202 } while (0) 203 204 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ 205 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ 206 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ 207 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 208 } while (0) 209 210 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ 211 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ 212 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ 213 } while (0) 214 215 enum { 216 FAULT_AND_HANG = 0, 217 FAULT_AND_HALT, /* Debug only */ 218 FAULT_AND_STREAM, 219 FAULT_AND_CONTINUE /* Unsupported */ 220 }; 221 #define GEN8_CTX_ID_SHIFT 32 222 #define GEN8_CTX_ID_WIDTH 21 223 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 224 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 225 226 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 227 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 228 229 #define WA_TAIL_DWORDS 2 230 231 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 232 struct intel_engine_cs *engine); 233 static int intel_lr_context_pin(struct i915_gem_context *ctx, 234 struct intel_engine_cs *engine); 235 static void execlists_init_reg_state(u32 *reg_state, 236 struct i915_gem_context *ctx, 237 struct intel_engine_cs *engine, 238 struct intel_ring *ring); 239 240 /** 241 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 242 * @dev_priv: i915 device private 243 * @enable_execlists: value of i915.enable_execlists module parameter. 244 * 245 * Only certain platforms support Execlists (the prerequisites being 246 * support for Logical Ring Contexts and Aliasing PPGTT or better). 247 * 248 * Return: 1 if Execlists is supported and has to be enabled. 249 */ 250 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) 251 { 252 /* On platforms with execlist available, vGPU will only 253 * support execlist mode, no ring buffer mode. 254 */ 255 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) 256 return 1; 257 258 if (INTEL_GEN(dev_priv) >= 9) 259 return 1; 260 261 if (enable_execlists == 0) 262 return 0; 263 264 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && 265 USES_PPGTT(dev_priv) && 266 i915.use_mmio_flip >= 0) 267 return 1; 268 269 return 0; 270 } 271 272 static void 273 logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 274 { 275 struct drm_i915_private *dev_priv = engine->i915; 276 277 engine->disable_lite_restore_wa = 278 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && 279 (engine->id == VCS || engine->id == VCS2); 280 281 engine->ctx_desc_template = GEN8_CTX_VALID; 282 if (IS_GEN8(dev_priv)) 283 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 284 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 285 286 /* TODO: WaDisableLiteRestore when we start using semaphore 287 * signalling between Command Streamers */ 288 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ 289 290 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 291 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ 292 if (engine->disable_lite_restore_wa) 293 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; 294 } 295 296 /** 297 * intel_lr_context_descriptor_update() - calculate & cache the descriptor 298 * descriptor for a pinned context 299 * @ctx: Context to work on 300 * @engine: Engine the descriptor will be used with 301 * 302 * The context descriptor encodes various attributes of a context, 303 * including its GTT address and some flags. Because it's fairly 304 * expensive to calculate, we'll just do it once and cache the result, 305 * which remains valid until the context is unpinned. 306 * 307 * This is what a descriptor looks like, from LSB to MSB:: 308 * 309 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 310 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 311 * bits 32-52: ctx ID, a globally unique tag 312 * bits 53-54: mbz, reserved for use by hardware 313 * bits 55-63: group ID, currently unused and set to 0 314 */ 315 static void 316 intel_lr_context_descriptor_update(struct i915_gem_context *ctx, 317 struct intel_engine_cs *engine) 318 { 319 struct intel_context *ce = &ctx->engine[engine->id]; 320 u64 desc; 321 322 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); 323 324 desc = ctx->desc_template; /* bits 3-4 */ 325 desc |= engine->ctx_desc_template; /* bits 0-11 */ 326 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; 327 /* bits 12-31 */ 328 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 329 330 ce->lrc_desc = desc; 331 } 332 333 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, 334 struct intel_engine_cs *engine) 335 { 336 return ctx->engine[engine->id].lrc_desc; 337 } 338 339 static inline void 340 execlists_context_status_change(struct drm_i915_gem_request *rq, 341 unsigned long status) 342 { 343 /* 344 * Only used when GVT-g is enabled now. When GVT-g is disabled, 345 * The compiler should eliminate this function as dead-code. 346 */ 347 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 348 return; 349 350 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); 351 } 352 353 static void 354 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) 355 { 356 ASSIGN_CTX_PDP(ppgtt, reg_state, 3); 357 ASSIGN_CTX_PDP(ppgtt, reg_state, 2); 358 ASSIGN_CTX_PDP(ppgtt, reg_state, 1); 359 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 360 } 361 362 static u64 execlists_update_context(struct drm_i915_gem_request *rq) 363 { 364 struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; 365 struct i915_hw_ppgtt *ppgtt = 366 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 367 u32 *reg_state = ce->lrc_reg_state; 368 369 reg_state[CTX_RING_TAIL+1] = rq->tail; 370 371 /* True 32b PPGTT with dynamic page allocation: update PDP 372 * registers and point the unallocated PDPs to scratch page. 373 * PML4 is allocated during ppgtt init, so this is not needed 374 * in 48-bit mode. 375 */ 376 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 377 execlists_update_context_pdps(ppgtt, reg_state); 378 379 return ce->lrc_desc; 380 } 381 382 static void execlists_submit_ports(struct intel_engine_cs *engine) 383 { 384 struct drm_i915_private *dev_priv = engine->i915; 385 struct execlist_port *port = engine->execlist_port; 386 u32 __iomem *elsp = 387 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 388 u64 desc[2]; 389 390 if (!port[0].count) 391 execlists_context_status_change(port[0].request, 392 INTEL_CONTEXT_SCHEDULE_IN); 393 desc[0] = execlists_update_context(port[0].request); 394 engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */ 395 396 if (port[1].request) { 397 GEM_BUG_ON(port[1].count); 398 execlists_context_status_change(port[1].request, 399 INTEL_CONTEXT_SCHEDULE_IN); 400 desc[1] = execlists_update_context(port[1].request); 401 port[1].count = 1; 402 } else { 403 desc[1] = 0; 404 } 405 GEM_BUG_ON(desc[0] == desc[1]); 406 407 /* You must always write both descriptors in the order below. */ 408 writel(upper_32_bits(desc[1]), elsp); 409 writel(lower_32_bits(desc[1]), elsp); 410 411 writel(upper_32_bits(desc[0]), elsp); 412 /* The context is automatically loaded after the following */ 413 writel(lower_32_bits(desc[0]), elsp); 414 } 415 416 static bool ctx_single_port_submission(const struct i915_gem_context *ctx) 417 { 418 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 419 ctx->execlists_force_single_submission); 420 } 421 422 static bool can_merge_ctx(const struct i915_gem_context *prev, 423 const struct i915_gem_context *next) 424 { 425 if (prev != next) 426 return false; 427 428 if (ctx_single_port_submission(prev)) 429 return false; 430 431 return true; 432 } 433 434 static void execlists_dequeue(struct intel_engine_cs *engine) 435 { 436 struct drm_i915_gem_request *last; 437 struct execlist_port *port = engine->execlist_port; 438 unsigned long flags; 439 struct rb_node *rb; 440 bool submit = false; 441 442 last = port->request; 443 if (last) 444 /* WaIdleLiteRestore:bdw,skl 445 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL 446 * as we resubmit the request. See gen8_emit_breadcrumb() 447 * for where we prepare the padding after the end of the 448 * request. 449 */ 450 last->tail = last->wa_tail; 451 452 GEM_BUG_ON(port[1].request); 453 454 /* Hardware submission is through 2 ports. Conceptually each port 455 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 456 * static for a context, and unique to each, so we only execute 457 * requests belonging to a single context from each ring. RING_HEAD 458 * is maintained by the CS in the context image, it marks the place 459 * where it got up to last time, and through RING_TAIL we tell the CS 460 * where we want to execute up to this time. 461 * 462 * In this list the requests are in order of execution. Consecutive 463 * requests from the same context are adjacent in the ringbuffer. We 464 * can combine these requests into a single RING_TAIL update: 465 * 466 * RING_HEAD...req1...req2 467 * ^- RING_TAIL 468 * since to execute req2 the CS must first execute req1. 469 * 470 * Our goal then is to point each port to the end of a consecutive 471 * sequence of requests as being the most optimal (fewest wake ups 472 * and context switches) submission. 473 */ 474 475 spin_lock_irqsave(&engine->timeline->lock, flags); 476 rb = engine->execlist_first; 477 while (rb) { 478 struct drm_i915_gem_request *cursor = 479 rb_entry(rb, typeof(*cursor), priotree.node); 480 481 /* Can we combine this request with the current port? It has to 482 * be the same context/ringbuffer and not have any exceptions 483 * (e.g. GVT saying never to combine contexts). 484 * 485 * If we can combine the requests, we can execute both by 486 * updating the RING_TAIL to point to the end of the second 487 * request, and so we never need to tell the hardware about 488 * the first. 489 */ 490 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) { 491 /* If we are on the second port and cannot combine 492 * this request with the last, then we are done. 493 */ 494 if (port != engine->execlist_port) 495 break; 496 497 /* If GVT overrides us we only ever submit port[0], 498 * leaving port[1] empty. Note that we also have 499 * to be careful that we don't queue the same 500 * context (even though a different request) to 501 * the second port. 502 */ 503 if (ctx_single_port_submission(last->ctx) || 504 ctx_single_port_submission(cursor->ctx)) 505 break; 506 507 GEM_BUG_ON(last->ctx == cursor->ctx); 508 509 i915_gem_request_assign(&port->request, last); 510 port++; 511 } 512 513 rb = rb_next(rb); 514 rb_erase(&cursor->priotree.node, &engine->execlist_queue); 515 RB_CLEAR_NODE(&cursor->priotree.node); 516 cursor->priotree.priority = INT_MAX; 517 518 /* We keep the previous context alive until we retire the 519 * following request. This ensures that any the context object 520 * is still pinned for any residual writes the HW makes into it 521 * on the context switch into the next object following the 522 * breadcrumb. Otherwise, we may retire the context too early. 523 */ 524 cursor->previous_context = engine->last_context; 525 engine->last_context = cursor->ctx; 526 527 __i915_gem_request_submit(cursor); 528 last = cursor; 529 submit = true; 530 } 531 if (submit) { 532 i915_gem_request_assign(&port->request, last); 533 engine->execlist_first = rb; 534 } 535 spin_unlock_irqrestore(&engine->timeline->lock, flags); 536 537 if (submit) 538 execlists_submit_ports(engine); 539 } 540 541 static bool execlists_elsp_idle(struct intel_engine_cs *engine) 542 { 543 return !engine->execlist_port[0].request; 544 } 545 546 /** 547 * intel_execlists_idle() - Determine if all engine submission ports are idle 548 * @dev_priv: i915 device private 549 * 550 * Return true if there are no requests pending on any of the submission ports 551 * of any engines. 552 */ 553 bool intel_execlists_idle(struct drm_i915_private *dev_priv) 554 { 555 struct intel_engine_cs *engine; 556 enum intel_engine_id id; 557 558 if (!i915.enable_execlists) 559 return true; 560 561 for_each_engine(engine, dev_priv, id) 562 if (!execlists_elsp_idle(engine)) 563 return false; 564 565 return true; 566 } 567 568 static bool execlists_elsp_ready(struct intel_engine_cs *engine) 569 { 570 int port; 571 572 port = 1; /* wait for a free slot */ 573 if (engine->disable_lite_restore_wa || engine->preempt_wa) 574 port = 0; /* wait for GPU to be idle before continuing */ 575 576 return !engine->execlist_port[port].request; 577 } 578 579 /* 580 * Check the unread Context Status Buffers and manage the submission of new 581 * contexts to the ELSP accordingly. 582 */ 583 static void intel_lrc_irq_handler(unsigned long data) 584 { 585 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 586 struct execlist_port *port = engine->execlist_port; 587 struct drm_i915_private *dev_priv = engine->i915; 588 589 intel_uncore_forcewake_get(dev_priv, engine->fw_domains); 590 591 if (!execlists_elsp_idle(engine)) { 592 u32 __iomem *csb_mmio = 593 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); 594 u32 __iomem *buf = 595 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); 596 unsigned int csb, head, tail; 597 598 csb = readl(csb_mmio); 599 head = GEN8_CSB_READ_PTR(csb); 600 tail = GEN8_CSB_WRITE_PTR(csb); 601 if (tail < head) 602 tail += GEN8_CSB_ENTRIES; 603 while (head < tail) { 604 unsigned int idx = ++head % GEN8_CSB_ENTRIES; 605 unsigned int status = readl(buf + 2 * idx); 606 607 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) 608 continue; 609 610 GEM_BUG_ON(port[0].count == 0); 611 if (--port[0].count == 0) { 612 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); 613 execlists_context_status_change(port[0].request, 614 INTEL_CONTEXT_SCHEDULE_OUT); 615 616 i915_gem_request_put(port[0].request); 617 port[0] = port[1]; 618 memset(&port[1], 0, sizeof(port[1])); 619 620 engine->preempt_wa = false; 621 } 622 623 GEM_BUG_ON(port[0].count == 0 && 624 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); 625 } 626 627 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 628 GEN8_CSB_WRITE_PTR(csb) << 8), 629 csb_mmio); 630 } 631 632 if (execlists_elsp_ready(engine)) 633 execlists_dequeue(engine); 634 635 intel_uncore_forcewake_put(dev_priv, engine->fw_domains); 636 } 637 638 static bool insert_request(struct i915_priotree *pt, struct rb_root *root) 639 { 640 struct rb_node **p, *rb; 641 bool first = true; 642 643 /* most positive priority is scheduled first, equal priorities fifo */ 644 rb = NULL; 645 p = &root->rb_node; 646 while (*p) { 647 struct i915_priotree *pos; 648 649 rb = *p; 650 pos = rb_entry(rb, typeof(*pos), node); 651 if (pt->priority > pos->priority) { 652 p = &rb->rb_left; 653 } else { 654 p = &rb->rb_right; 655 first = false; 656 } 657 } 658 rb_link_node(&pt->node, rb, p); 659 rb_insert_color(&pt->node, root); 660 661 return first; 662 } 663 664 static void execlists_submit_request(struct drm_i915_gem_request *request) 665 { 666 struct intel_engine_cs *engine = request->engine; 667 unsigned long flags; 668 669 /* Will be called from irq-context when using foreign fences. */ 670 spin_lock_irqsave(&engine->timeline->lock, flags); 671 672 if (insert_request(&request->priotree, &engine->execlist_queue)) 673 engine->execlist_first = &request->priotree.node; 674 if (execlists_elsp_idle(engine)) 675 tasklet_hi_schedule(&engine->irq_tasklet); 676 677 spin_unlock_irqrestore(&engine->timeline->lock, flags); 678 } 679 680 static struct intel_engine_cs * 681 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) 682 { 683 struct intel_engine_cs *engine; 684 685 engine = container_of(pt, 686 struct drm_i915_gem_request, 687 priotree)->engine; 688 if (engine != locked) { 689 if (locked) 690 spin_unlock_irq(&locked->timeline->lock); 691 spin_lock_irq(&engine->timeline->lock); 692 } 693 694 return engine; 695 } 696 697 static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 698 { 699 static DEFINE_MUTEX(lock); 700 struct intel_engine_cs *engine = NULL; 701 struct i915_dependency *dep, *p; 702 struct i915_dependency stack; 703 LINUX_LIST_HEAD(dfs); 704 705 if (prio <= READ_ONCE(request->priotree.priority)) 706 return; 707 708 /* Need global lock to use the temporary link inside i915_dependency */ 709 mutex_lock(&lock); 710 711 stack.signaler = &request->priotree; 712 list_add(&stack.dfs_link, &dfs); 713 714 /* Recursively bump all dependent priorities to match the new request. 715 * 716 * A naive approach would be to use recursion: 717 * static void update_priorities(struct i915_priotree *pt, prio) { 718 * list_for_each_entry(dep, &pt->signalers_list, signal_link) 719 * update_priorities(dep->signal, prio) 720 * insert_request(pt); 721 * } 722 * but that may have unlimited recursion depth and so runs a very 723 * real risk of overunning the kernel stack. Instead, we build 724 * a flat list of all dependencies starting with the current request. 725 * As we walk the list of dependencies, we add all of its dependencies 726 * to the end of the list (this may include an already visited 727 * request) and continue to walk onwards onto the new dependencies. The 728 * end result is a topological list of requests in reverse order, the 729 * last element in the list is the request we must execute first. 730 */ 731 list_for_each_entry_safe(dep, p, &dfs, dfs_link) { 732 struct i915_priotree *pt = dep->signaler; 733 734 list_for_each_entry(p, &pt->signalers_list, signal_link) 735 if (prio > READ_ONCE(p->signaler->priority)) 736 list_move_tail(&p->dfs_link, &dfs); 737 738 p = list_next_entry(dep, dfs_link); 739 if (!RB_EMPTY_NODE(&pt->node)) 740 continue; 741 742 engine = pt_lock_engine(pt, engine); 743 744 /* If it is not already in the rbtree, we can update the 745 * priority inplace and skip over it (and its dependencies) 746 * if it is referenced *again* as we descend the dfs. 747 */ 748 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) { 749 pt->priority = prio; 750 list_del_init(&dep->dfs_link); 751 } 752 } 753 754 /* Fifo and depth-first replacement ensure our deps execute before us */ 755 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { 756 struct i915_priotree *pt = dep->signaler; 757 758 INIT_LIST_HEAD(&dep->dfs_link); 759 760 engine = pt_lock_engine(pt, engine); 761 762 if (prio <= pt->priority) 763 continue; 764 765 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node)); 766 767 pt->priority = prio; 768 rb_erase(&pt->node, &engine->execlist_queue); 769 if (insert_request(pt, &engine->execlist_queue)) 770 engine->execlist_first = &pt->node; 771 } 772 773 if (engine) 774 spin_unlock_irq(&engine->timeline->lock); 775 776 mutex_unlock(&lock); 777 778 /* XXX Do we need to preempt to make room for us and our deps? */ 779 } 780 781 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 782 { 783 struct intel_engine_cs *engine = request->engine; 784 struct intel_context *ce = &request->ctx->engine[engine->id]; 785 int ret; 786 787 /* Flush enough space to reduce the likelihood of waiting after 788 * we start building the request - in which case we will just 789 * have to repeat work. 790 */ 791 request->reserved_space += EXECLISTS_REQUEST_SIZE; 792 793 if (!ce->state) { 794 ret = execlists_context_deferred_alloc(request->ctx, engine); 795 if (ret) 796 return ret; 797 } 798 799 request->ring = ce->ring; 800 801 ret = intel_lr_context_pin(request->ctx, engine); 802 if (ret) 803 return ret; 804 805 if (i915.enable_guc_submission) { 806 /* 807 * Check that the GuC has space for the request before 808 * going any further, as the i915_add_request() call 809 * later on mustn't fail ... 810 */ 811 ret = i915_guc_wq_reserve(request); 812 if (ret) 813 goto err_unpin; 814 } 815 816 ret = intel_ring_begin(request, 0); 817 if (ret) 818 goto err_unreserve; 819 820 if (!ce->initialised) { 821 ret = engine->init_context(request); 822 if (ret) 823 goto err_unreserve; 824 825 ce->initialised = true; 826 } 827 828 /* Note that after this point, we have committed to using 829 * this request as it is being used to both track the 830 * state of engine initialisation and liveness of the 831 * golden renderstate above. Think twice before you try 832 * to cancel/unwind this request now. 833 */ 834 835 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 836 return 0; 837 838 err_unreserve: 839 if (i915.enable_guc_submission) 840 i915_guc_wq_unreserve(request); 841 err_unpin: 842 intel_lr_context_unpin(request->ctx, engine); 843 return ret; 844 } 845 846 static int intel_lr_context_pin(struct i915_gem_context *ctx, 847 struct intel_engine_cs *engine) 848 { 849 struct intel_context *ce = &ctx->engine[engine->id]; 850 void *vaddr; 851 int ret; 852 853 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 854 855 if (ce->pin_count++) 856 return 0; 857 858 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, 859 PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); 860 if (ret) 861 goto err; 862 863 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 864 if (IS_ERR(vaddr)) { 865 ret = PTR_ERR(vaddr); 866 goto unpin_vma; 867 } 868 869 ret = intel_ring_pin(ce->ring); 870 if (ret) 871 goto unpin_map; 872 873 intel_lr_context_descriptor_update(ctx, engine); 874 875 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 876 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = 877 i915_ggtt_offset(ce->ring->vma); 878 879 ce->state->obj->mm.dirty = true; 880 881 /* Invalidate GuC TLB. */ 882 if (i915.enable_guc_submission) { 883 struct drm_i915_private *dev_priv = ctx->i915; 884 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 885 } 886 887 i915_gem_context_get(ctx); 888 return 0; 889 890 unpin_map: 891 i915_gem_object_unpin_map(ce->state->obj); 892 unpin_vma: 893 __i915_vma_unpin(ce->state); 894 err: 895 ce->pin_count = 0; 896 return ret; 897 } 898 899 void intel_lr_context_unpin(struct i915_gem_context *ctx, 900 struct intel_engine_cs *engine) 901 { 902 struct intel_context *ce = &ctx->engine[engine->id]; 903 904 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 905 GEM_BUG_ON(ce->pin_count == 0); 906 907 if (--ce->pin_count) 908 return; 909 910 intel_ring_unpin(ce->ring); 911 912 i915_gem_object_unpin_map(ce->state->obj); 913 i915_vma_unpin(ce->state); 914 915 i915_gem_context_put(ctx); 916 } 917 918 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 919 { 920 int ret, i; 921 struct intel_ring *ring = req->ring; 922 struct i915_workarounds *w = &req->i915->workarounds; 923 924 if (w->count == 0) 925 return 0; 926 927 ret = req->engine->emit_flush(req, EMIT_BARRIER); 928 if (ret) 929 return ret; 930 931 ret = intel_ring_begin(req, w->count * 2 + 2); 932 if (ret) 933 return ret; 934 935 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 936 for (i = 0; i < w->count; i++) { 937 intel_ring_emit_reg(ring, w->reg[i].addr); 938 intel_ring_emit(ring, w->reg[i].value); 939 } 940 intel_ring_emit(ring, MI_NOOP); 941 942 intel_ring_advance(ring); 943 944 ret = req->engine->emit_flush(req, EMIT_BARRIER); 945 if (ret) 946 return ret; 947 948 return 0; 949 } 950 951 #define wa_ctx_emit(batch, index, cmd) \ 952 do { \ 953 int __index = (index)++; \ 954 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \ 955 return -ENOSPC; \ 956 } \ 957 batch[__index] = (cmd); \ 958 } while (0) 959 960 #define wa_ctx_emit_reg(batch, index, reg) \ 961 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) 962 963 /* 964 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 965 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 966 * but there is a slight complication as this is applied in WA batch where the 967 * values are only initialized once so we cannot take register value at the 968 * beginning and reuse it further; hence we save its value to memory, upload a 969 * constant value with bit21 set and then we restore it back with the saved value. 970 * To simplify the WA, a constant value is formed by using the default value 971 * of this register. This shouldn't be a problem because we are only modifying 972 * it for a short period and this batch in non-premptible. We can ofcourse 973 * use additional instructions that read the actual value of the register 974 * at that time and set our bit of interest but it makes the WA complicated. 975 * 976 * This WA is also required for Gen9 so extracting as a function avoids 977 * code duplication. 978 */ 979 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, 980 uint32_t *batch, 981 uint32_t index) 982 { 983 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 984 985 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 986 MI_SRM_LRM_GLOBAL_GTT)); 987 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 988 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); 989 wa_ctx_emit(batch, index, 0); 990 991 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 992 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 993 wa_ctx_emit(batch, index, l3sqc4_flush); 994 995 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 996 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL | 997 PIPE_CONTROL_DC_FLUSH_ENABLE)); 998 wa_ctx_emit(batch, index, 0); 999 wa_ctx_emit(batch, index, 0); 1000 wa_ctx_emit(batch, index, 0); 1001 wa_ctx_emit(batch, index, 0); 1002 1003 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1004 MI_SRM_LRM_GLOBAL_GTT)); 1005 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1006 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); 1007 wa_ctx_emit(batch, index, 0); 1008 1009 return index; 1010 } 1011 1012 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx, 1013 uint32_t offset, 1014 uint32_t start_alignment) 1015 { 1016 return wa_ctx->offset = ALIGN(offset, start_alignment); 1017 } 1018 1019 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, 1020 uint32_t offset, 1021 uint32_t size_alignment) 1022 { 1023 wa_ctx->size = offset - wa_ctx->offset; 1024 1025 WARN(wa_ctx->size % size_alignment, 1026 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n", 1027 wa_ctx->size, size_alignment); 1028 return 0; 1029 } 1030 1031 /* 1032 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 1033 * initialized at the beginning and shared across all contexts but this field 1034 * helps us to have multiple batches at different offsets and select them based 1035 * on a criteria. At the moment this batch always start at the beginning of the page 1036 * and at this point we don't have multiple wa_ctx batch buffers. 1037 * 1038 * The number of WA applied are not known at the beginning; we use this field 1039 * to return the no of DWORDS written. 1040 * 1041 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 1042 * so it adds NOOPs as padding to make it cacheline aligned. 1043 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 1044 * makes a complete batch buffer. 1045 */ 1046 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, 1047 struct i915_wa_ctx_bb *wa_ctx, 1048 uint32_t *batch, 1049 uint32_t *offset) 1050 { 1051 uint32_t scratch_addr; 1052 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1053 1054 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1055 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1056 1057 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1058 if (IS_BROADWELL(engine->i915)) { 1059 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1060 if (rc < 0) 1061 return rc; 1062 index = rc; 1063 } 1064 1065 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1066 /* Actual scratch location is at 128 bytes offset */ 1067 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 1068 1069 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1070 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1071 PIPE_CONTROL_GLOBAL_GTT_IVB | 1072 PIPE_CONTROL_CS_STALL | 1073 PIPE_CONTROL_QW_WRITE)); 1074 wa_ctx_emit(batch, index, scratch_addr); 1075 wa_ctx_emit(batch, index, 0); 1076 wa_ctx_emit(batch, index, 0); 1077 wa_ctx_emit(batch, index, 0); 1078 1079 /* Pad to end of cacheline */ 1080 while (index % CACHELINE_DWORDS) 1081 wa_ctx_emit(batch, index, MI_NOOP); 1082 1083 /* 1084 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 1085 * execution depends on the length specified in terms of cache lines 1086 * in the register CTX_RCS_INDIRECT_CTX 1087 */ 1088 1089 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1090 } 1091 1092 /* 1093 * This batch is started immediately after indirect_ctx batch. Since we ensure 1094 * that indirect_ctx ends on a cacheline this batch is aligned automatically. 1095 * 1096 * The number of DWORDS written are returned using this field. 1097 * 1098 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding 1099 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. 1100 */ 1101 static int gen8_init_perctx_bb(struct intel_engine_cs *engine, 1102 struct i915_wa_ctx_bb *wa_ctx, 1103 uint32_t *batch, 1104 uint32_t *offset) 1105 { 1106 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1107 1108 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1109 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1110 1111 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1112 1113 return wa_ctx_end(wa_ctx, *offset = index, 1); 1114 } 1115 1116 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, 1117 struct i915_wa_ctx_bb *wa_ctx, 1118 uint32_t *batch, 1119 uint32_t *offset) 1120 { 1121 int ret; 1122 struct drm_i915_private *dev_priv = engine->i915; 1123 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1124 1125 /* WaDisableCtxRestoreArbitration:bxt */ 1126 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1127 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1128 1129 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1130 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1131 if (ret < 0) 1132 return ret; 1133 index = ret; 1134 1135 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */ 1136 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1137 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2); 1138 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE( 1139 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE)); 1140 wa_ctx_emit(batch, index, MI_NOOP); 1141 1142 /* WaClearSlmSpaceAtContextSwitch:kbl */ 1143 /* Actual scratch location is at 128 bytes offset */ 1144 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { 1145 u32 scratch_addr = 1146 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 1147 1148 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1149 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1150 PIPE_CONTROL_GLOBAL_GTT_IVB | 1151 PIPE_CONTROL_CS_STALL | 1152 PIPE_CONTROL_QW_WRITE)); 1153 wa_ctx_emit(batch, index, scratch_addr); 1154 wa_ctx_emit(batch, index, 0); 1155 wa_ctx_emit(batch, index, 0); 1156 wa_ctx_emit(batch, index, 0); 1157 } 1158 1159 /* WaMediaPoolStateCmdInWABB:bxt */ 1160 if (HAS_POOLED_EU(engine->i915)) { 1161 /* 1162 * EU pool configuration is setup along with golden context 1163 * during context initialization. This value depends on 1164 * device type (2x6 or 3x6) and needs to be updated based 1165 * on which subslice is disabled especially for 2x6 1166 * devices, however it is safe to load default 1167 * configuration of 3x6 device instead of masking off 1168 * corresponding bits because HW ignores bits of a disabled 1169 * subslice and drops down to appropriate config. Please 1170 * see render_state_setup() in i915_gem_render_state.c for 1171 * possible configurations, to avoid duplication they are 1172 * not shown here again. 1173 */ 1174 u32 eu_pool_config = 0x00777000; 1175 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE); 1176 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE); 1177 wa_ctx_emit(batch, index, eu_pool_config); 1178 wa_ctx_emit(batch, index, 0); 1179 wa_ctx_emit(batch, index, 0); 1180 wa_ctx_emit(batch, index, 0); 1181 } 1182 1183 /* Pad to end of cacheline */ 1184 while (index % CACHELINE_DWORDS) 1185 wa_ctx_emit(batch, index, MI_NOOP); 1186 1187 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1188 } 1189 1190 static int gen9_init_perctx_bb(struct intel_engine_cs *engine, 1191 struct i915_wa_ctx_bb *wa_ctx, 1192 uint32_t *batch, 1193 uint32_t *offset) 1194 { 1195 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1196 1197 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */ 1198 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { 1199 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1200 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1201 wa_ctx_emit(batch, index, 1202 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); 1203 wa_ctx_emit(batch, index, MI_NOOP); 1204 } 1205 1206 /* WaClearTdlStateAckDirtyBits:bxt */ 1207 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { 1208 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1209 1210 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1211 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1212 1213 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1); 1214 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1215 1216 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2); 1217 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1218 1219 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2); 1220 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */ 1221 wa_ctx_emit(batch, index, 0x0); 1222 wa_ctx_emit(batch, index, MI_NOOP); 1223 } 1224 1225 /* WaDisableCtxRestoreArbitration:bxt */ 1226 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) 1227 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1228 1229 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1230 1231 return wa_ctx_end(wa_ctx, *offset = index, 1); 1232 } 1233 1234 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) 1235 { 1236 struct drm_i915_gem_object *obj; 1237 struct i915_vma *vma; 1238 int err; 1239 1240 obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); 1241 if (IS_ERR(obj)) 1242 return PTR_ERR(obj); 1243 1244 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); 1245 if (IS_ERR(vma)) { 1246 err = PTR_ERR(vma); 1247 goto err; 1248 } 1249 1250 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); 1251 if (err) 1252 goto err; 1253 1254 engine->wa_ctx.vma = vma; 1255 return 0; 1256 1257 err: 1258 i915_gem_object_put(obj); 1259 return err; 1260 } 1261 1262 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) 1263 { 1264 i915_vma_unpin_and_release(&engine->wa_ctx.vma); 1265 } 1266 1267 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 1268 { 1269 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1270 uint32_t *batch; 1271 uint32_t offset; 1272 struct page *page; 1273 int ret; 1274 1275 WARN_ON(engine->id != RCS); 1276 1277 /* update this when WA for higher Gen are added */ 1278 if (INTEL_GEN(engine->i915) > 9) { 1279 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1280 INTEL_GEN(engine->i915)); 1281 return 0; 1282 } 1283 1284 /* some WA perform writes to scratch page, ensure it is valid */ 1285 if (!engine->scratch) { 1286 DRM_ERROR("scratch page not allocated for %s\n", engine->name); 1287 return -EINVAL; 1288 } 1289 1290 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE); 1291 if (ret) { 1292 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1293 return ret; 1294 } 1295 1296 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); 1297 batch = kmap_atomic(page); 1298 offset = 0; 1299 1300 if (IS_GEN8(engine->i915)) { 1301 ret = gen8_init_indirectctx_bb(engine, 1302 &wa_ctx->indirect_ctx, 1303 batch, 1304 &offset); 1305 if (ret) 1306 goto out; 1307 1308 ret = gen8_init_perctx_bb(engine, 1309 &wa_ctx->per_ctx, 1310 batch, 1311 &offset); 1312 if (ret) 1313 goto out; 1314 } else if (IS_GEN9(engine->i915)) { 1315 ret = gen9_init_indirectctx_bb(engine, 1316 &wa_ctx->indirect_ctx, 1317 batch, 1318 &offset); 1319 if (ret) 1320 goto out; 1321 1322 ret = gen9_init_perctx_bb(engine, 1323 &wa_ctx->per_ctx, 1324 batch, 1325 &offset); 1326 if (ret) 1327 goto out; 1328 } 1329 1330 out: 1331 kunmap_atomic(batch); 1332 if (ret) 1333 lrc_destroy_wa_ctx_obj(engine); 1334 1335 return ret; 1336 } 1337 1338 static void lrc_init_hws(struct intel_engine_cs *engine) 1339 { 1340 struct drm_i915_private *dev_priv = engine->i915; 1341 1342 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1343 engine->status_page.ggtt_offset); 1344 POSTING_READ(RING_HWS_PGA(engine->mmio_base)); 1345 } 1346 1347 static int gen8_init_common_ring(struct intel_engine_cs *engine) 1348 { 1349 struct drm_i915_private *dev_priv = engine->i915; 1350 int ret; 1351 1352 ret = intel_mocs_init_engine(engine); 1353 if (ret) 1354 return ret; 1355 1356 lrc_init_hws(engine); 1357 1358 intel_engine_reset_breadcrumbs(engine); 1359 1360 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 1361 1362 I915_WRITE(RING_MODE_GEN7(engine), 1363 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1364 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1365 1366 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); 1367 1368 intel_engine_init_hangcheck(engine); 1369 1370 /* After a GPU reset, we may have requests to replay */ 1371 if (!execlists_elsp_idle(engine)) { 1372 engine->execlist_port[0].count = 0; 1373 engine->execlist_port[1].count = 0; 1374 execlists_submit_ports(engine); 1375 } 1376 1377 return 0; 1378 } 1379 1380 static int gen8_init_render_ring(struct intel_engine_cs *engine) 1381 { 1382 struct drm_i915_private *dev_priv = engine->i915; 1383 int ret; 1384 1385 ret = gen8_init_common_ring(engine); 1386 if (ret) 1387 return ret; 1388 1389 /* We need to disable the AsyncFlip performance optimisations in order 1390 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1391 * programmed to '1' on all products. 1392 * 1393 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 1394 */ 1395 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1396 1397 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1398 1399 return init_workarounds_ring(engine); 1400 } 1401 1402 static int gen9_init_render_ring(struct intel_engine_cs *engine) 1403 { 1404 int ret; 1405 1406 ret = gen8_init_common_ring(engine); 1407 if (ret) 1408 return ret; 1409 1410 return init_workarounds_ring(engine); 1411 } 1412 1413 static void reset_common_ring(struct intel_engine_cs *engine, 1414 struct drm_i915_gem_request *request) 1415 { 1416 struct drm_i915_private *dev_priv = engine->i915; 1417 struct execlist_port *port = engine->execlist_port; 1418 struct intel_context *ce = &request->ctx->engine[engine->id]; 1419 1420 /* We want a simple context + ring to execute the breadcrumb update. 1421 * We cannot rely on the context being intact across the GPU hang, 1422 * so clear it and rebuild just what we need for the breadcrumb. 1423 * All pending requests for this context will be zapped, and any 1424 * future request will be after userspace has had the opportunity 1425 * to recreate its own state. 1426 */ 1427 execlists_init_reg_state(ce->lrc_reg_state, 1428 request->ctx, engine, ce->ring); 1429 1430 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ 1431 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = 1432 i915_ggtt_offset(ce->ring->vma); 1433 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix; 1434 1435 request->ring->head = request->postfix; 1436 request->ring->last_retired_head = -1; 1437 intel_ring_update_space(request->ring); 1438 1439 if (i915.enable_guc_submission) 1440 return; 1441 1442 /* Catch up with any missed context-switch interrupts */ 1443 I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0)); 1444 if (request->ctx != port[0].request->ctx) { 1445 i915_gem_request_put(port[0].request); 1446 port[0] = port[1]; 1447 memset(&port[1], 0, sizeof(port[1])); 1448 } 1449 1450 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1451 1452 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1453 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1454 } 1455 1456 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1457 { 1458 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1459 struct intel_ring *ring = req->ring; 1460 struct intel_engine_cs *engine = req->engine; 1461 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1462 int i, ret; 1463 1464 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1465 if (ret) 1466 return ret; 1467 1468 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds)); 1469 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 1470 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1471 1472 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i)); 1473 intel_ring_emit(ring, upper_32_bits(pd_daddr)); 1474 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i)); 1475 intel_ring_emit(ring, lower_32_bits(pd_daddr)); 1476 } 1477 1478 intel_ring_emit(ring, MI_NOOP); 1479 intel_ring_advance(ring); 1480 1481 return 0; 1482 } 1483 1484 static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1485 u64 offset, u32 len, 1486 unsigned int dispatch_flags) 1487 { 1488 struct intel_ring *ring = req->ring; 1489 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); 1490 int ret; 1491 1492 /* Don't rely in hw updating PDPs, specially in lite-restore. 1493 * Ideally, we should set Force PD Restore in ctx descriptor, 1494 * but we can't. Force Restore would be a second option, but 1495 * it is unsafe in case of lite-restore (because the ctx is 1496 * not idle). PML4 is allocated during ppgtt init so this is 1497 * not needed in 48-bit.*/ 1498 if (req->ctx->ppgtt && 1499 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1500 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1501 !intel_vgpu_active(req->i915)) { 1502 ret = intel_logical_ring_emit_pdps(req); 1503 if (ret) 1504 return ret; 1505 } 1506 1507 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1508 } 1509 1510 ret = intel_ring_begin(req, 4); 1511 if (ret) 1512 return ret; 1513 1514 /* FIXME(BDW): Address space and security selectors. */ 1515 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | 1516 (ppgtt<<8) | 1517 (dispatch_flags & I915_DISPATCH_RS ? 1518 MI_BATCH_RESOURCE_STREAMER : 0)); 1519 intel_ring_emit(ring, lower_32_bits(offset)); 1520 intel_ring_emit(ring, upper_32_bits(offset)); 1521 intel_ring_emit(ring, MI_NOOP); 1522 intel_ring_advance(ring); 1523 1524 return 0; 1525 } 1526 1527 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 1528 { 1529 struct drm_i915_private *dev_priv = engine->i915; 1530 I915_WRITE_IMR(engine, 1531 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1532 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1533 } 1534 1535 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 1536 { 1537 struct drm_i915_private *dev_priv = engine->i915; 1538 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1539 } 1540 1541 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) 1542 { 1543 struct intel_ring *ring = request->ring; 1544 u32 cmd; 1545 int ret; 1546 1547 ret = intel_ring_begin(request, 4); 1548 if (ret) 1549 return ret; 1550 1551 cmd = MI_FLUSH_DW + 1; 1552 1553 /* We always require a command barrier so that subsequent 1554 * commands, such as breadcrumb interrupts, are strictly ordered 1555 * wrt the contents of the write cache being flushed to memory 1556 * (and thus being coherent from the CPU). 1557 */ 1558 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1559 1560 if (mode & EMIT_INVALIDATE) { 1561 cmd |= MI_INVALIDATE_TLB; 1562 if (request->engine->id == VCS) 1563 cmd |= MI_INVALIDATE_BSD; 1564 } 1565 1566 intel_ring_emit(ring, cmd); 1567 intel_ring_emit(ring, 1568 I915_GEM_HWS_SCRATCH_ADDR | 1569 MI_FLUSH_DW_USE_GTT); 1570 intel_ring_emit(ring, 0); /* upper addr */ 1571 intel_ring_emit(ring, 0); /* value */ 1572 intel_ring_advance(ring); 1573 1574 return 0; 1575 } 1576 1577 static int gen8_emit_flush_render(struct drm_i915_gem_request *request, 1578 u32 mode) 1579 { 1580 struct intel_ring *ring = request->ring; 1581 struct intel_engine_cs *engine = request->engine; 1582 u32 scratch_addr = 1583 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 1584 bool vf_flush_wa = false, dc_flush_wa = false; 1585 u32 flags = 0; 1586 int ret; 1587 int len; 1588 1589 flags |= PIPE_CONTROL_CS_STALL; 1590 1591 if (mode & EMIT_FLUSH) { 1592 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1593 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1594 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 1595 flags |= PIPE_CONTROL_FLUSH_ENABLE; 1596 } 1597 1598 if (mode & EMIT_INVALIDATE) { 1599 flags |= PIPE_CONTROL_TLB_INVALIDATE; 1600 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 1601 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 1602 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 1603 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 1604 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 1605 flags |= PIPE_CONTROL_QW_WRITE; 1606 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 1607 1608 /* 1609 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1610 * pipe control. 1611 */ 1612 if (IS_GEN9(request->i915)) 1613 vf_flush_wa = true; 1614 1615 /* WaForGAMHang:kbl */ 1616 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) 1617 dc_flush_wa = true; 1618 } 1619 1620 len = 6; 1621 1622 if (vf_flush_wa) 1623 len += 6; 1624 1625 if (dc_flush_wa) 1626 len += 12; 1627 1628 ret = intel_ring_begin(request, len); 1629 if (ret) 1630 return ret; 1631 1632 if (vf_flush_wa) { 1633 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1634 intel_ring_emit(ring, 0); 1635 intel_ring_emit(ring, 0); 1636 intel_ring_emit(ring, 0); 1637 intel_ring_emit(ring, 0); 1638 intel_ring_emit(ring, 0); 1639 } 1640 1641 if (dc_flush_wa) { 1642 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1643 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE); 1644 intel_ring_emit(ring, 0); 1645 intel_ring_emit(ring, 0); 1646 intel_ring_emit(ring, 0); 1647 intel_ring_emit(ring, 0); 1648 } 1649 1650 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1651 intel_ring_emit(ring, flags); 1652 intel_ring_emit(ring, scratch_addr); 1653 intel_ring_emit(ring, 0); 1654 intel_ring_emit(ring, 0); 1655 intel_ring_emit(ring, 0); 1656 1657 if (dc_flush_wa) { 1658 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1659 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL); 1660 intel_ring_emit(ring, 0); 1661 intel_ring_emit(ring, 0); 1662 intel_ring_emit(ring, 0); 1663 intel_ring_emit(ring, 0); 1664 } 1665 1666 intel_ring_advance(ring); 1667 1668 return 0; 1669 } 1670 1671 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) 1672 { 1673 /* 1674 * On BXT A steppings there is a HW coherency issue whereby the 1675 * MI_STORE_DATA_IMM storing the completed request's seqno 1676 * occasionally doesn't invalidate the CPU cache. Work around this by 1677 * clflushing the corresponding cacheline whenever the caller wants 1678 * the coherency to be guaranteed. Note that this cacheline is known 1679 * to be clean at this point, since we only write it in 1680 * bxt_a_set_seqno(), where we also do a clflush after the write. So 1681 * this clflush in practice becomes an invalidate operation. 1682 */ 1683 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1684 } 1685 1686 /* 1687 * Reserve space for 2 NOOPs at the end of each request to be 1688 * used as a workaround for not being allowed to do lite 1689 * restore with HEAD==TAIL (WaIdleLiteRestore). 1690 */ 1691 static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out) 1692 { 1693 *out++ = MI_NOOP; 1694 *out++ = MI_NOOP; 1695 request->wa_tail = intel_ring_offset(request->ring, out); 1696 } 1697 1698 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, 1699 u32 *out) 1700 { 1701 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 1702 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); 1703 1704 *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; 1705 *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT; 1706 *out++ = 0; 1707 *out++ = request->global_seqno; 1708 *out++ = MI_USER_INTERRUPT; 1709 *out++ = MI_NOOP; 1710 request->tail = intel_ring_offset(request->ring, out); 1711 1712 gen8_emit_wa_tail(request, out); 1713 } 1714 1715 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; 1716 1717 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, 1718 u32 *out) 1719 { 1720 /* We're using qword write, seqno should be aligned to 8 bytes. */ 1721 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 1722 1723 /* w/a for post sync ops following a GPGPU operation we 1724 * need a prior CS_STALL, which is emitted by the flush 1725 * following the batch. 1726 */ 1727 *out++ = GFX_OP_PIPE_CONTROL(6); 1728 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB | 1729 PIPE_CONTROL_CS_STALL | 1730 PIPE_CONTROL_QW_WRITE); 1731 *out++ = intel_hws_seqno_address(request->engine); 1732 *out++ = 0; 1733 *out++ = request->global_seqno; 1734 /* We're thrashing one dword of HWS. */ 1735 *out++ = 0; 1736 *out++ = MI_USER_INTERRUPT; 1737 *out++ = MI_NOOP; 1738 request->tail = intel_ring_offset(request->ring, out); 1739 1740 gen8_emit_wa_tail(request, out); 1741 } 1742 1743 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS; 1744 1745 static int gen8_init_rcs_context(struct drm_i915_gem_request *req) 1746 { 1747 int ret; 1748 1749 ret = intel_logical_ring_workarounds_emit(req); 1750 if (ret) 1751 return ret; 1752 1753 ret = intel_rcs_context_init_mocs(req); 1754 /* 1755 * Failing to program the MOCS is non-fatal.The system will not 1756 * run at peak performance. So generate an error and carry on. 1757 */ 1758 if (ret) 1759 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 1760 1761 return i915_gem_render_state_emit(req); 1762 } 1763 1764 /** 1765 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer 1766 * @engine: Engine Command Streamer. 1767 */ 1768 void intel_logical_ring_cleanup(struct intel_engine_cs *engine) 1769 { 1770 struct drm_i915_private *dev_priv; 1771 1772 /* 1773 * Tasklet cannot be active at this point due intel_mark_active/idle 1774 * so this is just for documentation. 1775 */ 1776 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1777 tasklet_kill(&engine->irq_tasklet); 1778 1779 dev_priv = engine->i915; 1780 1781 if (engine->buffer) { 1782 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 1783 } 1784 1785 if (engine->cleanup) 1786 engine->cleanup(engine); 1787 1788 intel_engine_cleanup_common(engine); 1789 1790 if (engine->status_page.vma) { 1791 i915_gem_object_unpin_map(engine->status_page.vma->obj); 1792 engine->status_page.vma = NULL; 1793 } 1794 intel_lr_context_unpin(dev_priv->kernel_context, engine); 1795 1796 lrc_destroy_wa_ctx_obj(engine); 1797 engine->i915 = NULL; 1798 dev_priv->engine[engine->id] = NULL; 1799 kfree(engine); 1800 } 1801 1802 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv) 1803 { 1804 struct intel_engine_cs *engine; 1805 enum intel_engine_id id; 1806 1807 for_each_engine(engine, dev_priv, id) { 1808 engine->submit_request = execlists_submit_request; 1809 engine->schedule = execlists_schedule; 1810 } 1811 } 1812 1813 static void 1814 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 1815 { 1816 /* Default vfuncs which can be overriden by each engine. */ 1817 engine->init_hw = gen8_init_common_ring; 1818 engine->reset_hw = reset_common_ring; 1819 engine->emit_flush = gen8_emit_flush; 1820 engine->emit_breadcrumb = gen8_emit_breadcrumb; 1821 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; 1822 engine->submit_request = execlists_submit_request; 1823 engine->schedule = execlists_schedule; 1824 1825 engine->irq_enable = gen8_logical_ring_enable_irq; 1826 engine->irq_disable = gen8_logical_ring_disable_irq; 1827 engine->emit_bb_start = gen8_emit_bb_start; 1828 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) 1829 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1830 } 1831 1832 static inline void 1833 logical_ring_default_irqs(struct intel_engine_cs *engine) 1834 { 1835 unsigned shift = engine->irq_shift; 1836 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1837 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1838 } 1839 1840 static int 1841 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) 1842 { 1843 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; 1844 void *hws; 1845 1846 /* The HWSP is part of the default context object in LRC mode. */ 1847 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 1848 if (IS_ERR(hws)) 1849 return PTR_ERR(hws); 1850 1851 engine->status_page.page_addr = hws + hws_offset; 1852 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; 1853 engine->status_page.vma = vma; 1854 1855 return 0; 1856 } 1857 1858 static void 1859 logical_ring_setup(struct intel_engine_cs *engine) 1860 { 1861 struct drm_i915_private *dev_priv = engine->i915; 1862 enum forcewake_domains fw_domains; 1863 1864 intel_engine_setup_common(engine); 1865 1866 /* Intentionally left blank. */ 1867 engine->buffer = NULL; 1868 1869 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 1870 RING_ELSP(engine), 1871 FW_REG_WRITE); 1872 1873 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 1874 RING_CONTEXT_STATUS_PTR(engine), 1875 FW_REG_READ | FW_REG_WRITE); 1876 1877 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 1878 RING_CONTEXT_STATUS_BUF_BASE(engine), 1879 FW_REG_READ); 1880 1881 engine->fw_domains = fw_domains; 1882 1883 tasklet_init(&engine->irq_tasklet, 1884 intel_lrc_irq_handler, (unsigned long)engine); 1885 1886 logical_ring_init_platform_invariants(engine); 1887 logical_ring_default_vfuncs(engine); 1888 logical_ring_default_irqs(engine); 1889 } 1890 1891 static int 1892 logical_ring_init(struct intel_engine_cs *engine) 1893 { 1894 struct i915_gem_context *dctx = engine->i915->kernel_context; 1895 int ret; 1896 1897 ret = intel_engine_init_common(engine); 1898 if (ret) 1899 goto error; 1900 1901 ret = execlists_context_deferred_alloc(dctx, engine); 1902 if (ret) 1903 goto error; 1904 1905 /* As this is the default context, always pin it */ 1906 ret = intel_lr_context_pin(dctx, engine); 1907 if (ret) { 1908 DRM_ERROR("Failed to pin context for %s: %d\n", 1909 engine->name, ret); 1910 goto error; 1911 } 1912 1913 /* And setup the hardware status page. */ 1914 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); 1915 if (ret) { 1916 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); 1917 goto error; 1918 } 1919 1920 return 0; 1921 1922 error: 1923 intel_logical_ring_cleanup(engine); 1924 return ret; 1925 } 1926 1927 int logical_render_ring_init(struct intel_engine_cs *engine) 1928 { 1929 struct drm_i915_private *dev_priv = engine->i915; 1930 int ret; 1931 1932 logical_ring_setup(engine); 1933 1934 if (HAS_L3_DPF(dev_priv)) 1935 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1936 1937 /* Override some for render ring. */ 1938 if (INTEL_GEN(dev_priv) >= 9) 1939 engine->init_hw = gen9_init_render_ring; 1940 else 1941 engine->init_hw = gen8_init_render_ring; 1942 engine->init_context = gen8_init_rcs_context; 1943 engine->emit_flush = gen8_emit_flush_render; 1944 engine->emit_breadcrumb = gen8_emit_breadcrumb_render; 1945 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz; 1946 1947 ret = intel_engine_create_scratch(engine, 4096); 1948 if (ret) 1949 return ret; 1950 1951 ret = intel_init_workaround_bb(engine); 1952 if (ret) { 1953 /* 1954 * We continue even if we fail to initialize WA batch 1955 * because we only expect rare glitches but nothing 1956 * critical to prevent us from using GPU 1957 */ 1958 DRM_ERROR("WA batch buffer initialization failed: %d\n", 1959 ret); 1960 } 1961 1962 return logical_ring_init(engine); 1963 } 1964 1965 int logical_xcs_ring_init(struct intel_engine_cs *engine) 1966 { 1967 logical_ring_setup(engine); 1968 1969 return logical_ring_init(engine); 1970 } 1971 1972 static u32 1973 make_rpcs(struct drm_i915_private *dev_priv) 1974 { 1975 u32 rpcs = 0; 1976 1977 /* 1978 * No explicit RPCS request is needed to ensure full 1979 * slice/subslice/EU enablement prior to Gen9. 1980 */ 1981 if (INTEL_GEN(dev_priv) < 9) 1982 return 0; 1983 1984 /* 1985 * Starting in Gen9, render power gating can leave 1986 * slice/subslice/EU in a partially enabled state. We 1987 * must make an explicit request through RPCS for full 1988 * enablement. 1989 */ 1990 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { 1991 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 1992 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) << 1993 GEN8_RPCS_S_CNT_SHIFT; 1994 rpcs |= GEN8_RPCS_ENABLE; 1995 } 1996 1997 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) { 1998 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 1999 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) << 2000 GEN8_RPCS_SS_CNT_SHIFT; 2001 rpcs |= GEN8_RPCS_ENABLE; 2002 } 2003 2004 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { 2005 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 2006 GEN8_RPCS_EU_MIN_SHIFT; 2007 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 2008 GEN8_RPCS_EU_MAX_SHIFT; 2009 rpcs |= GEN8_RPCS_ENABLE; 2010 } 2011 2012 return rpcs; 2013 } 2014 2015 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) 2016 { 2017 u32 indirect_ctx_offset; 2018 2019 switch (INTEL_GEN(engine->i915)) { 2020 default: 2021 MISSING_CASE(INTEL_GEN(engine->i915)); 2022 /* fall through */ 2023 case 9: 2024 indirect_ctx_offset = 2025 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2026 break; 2027 case 8: 2028 indirect_ctx_offset = 2029 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2030 break; 2031 } 2032 2033 return indirect_ctx_offset; 2034 } 2035 2036 static void execlists_init_reg_state(u32 *reg_state, 2037 struct i915_gem_context *ctx, 2038 struct intel_engine_cs *engine, 2039 struct intel_ring *ring) 2040 { 2041 struct drm_i915_private *dev_priv = engine->i915; 2042 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; 2043 2044 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM 2045 * commands followed by (reg, value) pairs. The values we are setting here are 2046 * only for the first context restore: on a subsequent save, the GPU will 2047 * recreate this batchbuffer with new values (including all the missing 2048 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2049 reg_state[CTX_LRI_HEADER_0] = 2050 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; 2051 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, 2052 RING_CONTEXT_CONTROL(engine), 2053 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2054 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2055 (HAS_RESOURCE_STREAMER(dev_priv) ? 2056 CTX_CTRL_RS_CTX_ENABLE : 0))); 2057 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2058 0); 2059 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base), 2060 0); 2061 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, 2062 RING_START(engine->mmio_base), 0); 2063 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, 2064 RING_CTL(engine->mmio_base), 2065 RING_CTL_SIZE(ring->size) | RING_VALID); 2066 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, 2067 RING_BBADDR_UDW(engine->mmio_base), 0); 2068 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, 2069 RING_BBADDR(engine->mmio_base), 0); 2070 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, 2071 RING_BBSTATE(engine->mmio_base), 2072 RING_BB_PPGTT); 2073 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, 2074 RING_SBBADDR_UDW(engine->mmio_base), 0); 2075 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, 2076 RING_SBBADDR(engine->mmio_base), 0); 2077 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, 2078 RING_SBBSTATE(engine->mmio_base), 0); 2079 if (engine->id == RCS) { 2080 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, 2081 RING_BB_PER_CTX_PTR(engine->mmio_base), 0); 2082 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, 2083 RING_INDIRECT_CTX(engine->mmio_base), 0); 2084 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, 2085 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); 2086 if (engine->wa_ctx.vma) { 2087 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 2088 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 2089 2090 reg_state[CTX_RCS_INDIRECT_CTX+1] = 2091 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | 2092 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 2093 2094 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 2095 intel_lr_indirect_ctx_offset(engine) << 6; 2096 2097 reg_state[CTX_BB_PER_CTX_PTR+1] = 2098 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 2099 0x01; 2100 } 2101 } 2102 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2103 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, 2104 RING_CTX_TIMESTAMP(engine->mmio_base), 0); 2105 /* PDP values well be assigned later if needed */ 2106 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 2107 0); 2108 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 2109 0); 2110 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 2111 0); 2112 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 2113 0); 2114 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 2115 0); 2116 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 2117 0); 2118 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 2119 0); 2120 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 2121 0); 2122 2123 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2124 /* 64b PPGTT (48bit canonical) 2125 * PDP0_DESCRIPTOR contains the base address to PML4 and 2126 * other PDP Descriptors are ignored. 2127 */ 2128 ASSIGN_CTX_PML4(ppgtt, reg_state); 2129 } else { 2130 /* 32b PPGTT 2131 * PDP*_DESCRIPTOR contains the base address of space supported. 2132 * With dynamic page allocation, PDPs may not be allocated at 2133 * this point. Point the unallocated PDPs to the scratch page 2134 */ 2135 execlists_update_context_pdps(ppgtt, reg_state); 2136 } 2137 2138 if (engine->id == RCS) { 2139 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2140 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2141 make_rpcs(dev_priv)); 2142 } 2143 } 2144 2145 static int 2146 populate_lr_context(struct i915_gem_context *ctx, 2147 struct drm_i915_gem_object *ctx_obj, 2148 struct intel_engine_cs *engine, 2149 struct intel_ring *ring) 2150 { 2151 void *vaddr; 2152 int ret; 2153 2154 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); 2155 if (ret) { 2156 DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); 2157 return ret; 2158 } 2159 2160 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 2161 if (IS_ERR(vaddr)) { 2162 ret = PTR_ERR(vaddr); 2163 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); 2164 return ret; 2165 } 2166 ctx_obj->mm.dirty = true; 2167 2168 /* The second page of the context object contains some fields which must 2169 * be set up prior to the first execution. */ 2170 2171 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, 2172 ctx, engine, ring); 2173 2174 i915_gem_object_unpin_map(ctx_obj); 2175 2176 return 0; 2177 } 2178 2179 /** 2180 * intel_lr_context_size() - return the size of the context for an engine 2181 * @engine: which engine to find the context size for 2182 * 2183 * Each engine may require a different amount of space for a context image, 2184 * so when allocating (or copying) an image, this function can be used to 2185 * find the right size for the specific engine. 2186 * 2187 * Return: size (in bytes) of an engine-specific context image 2188 * 2189 * Note: this size includes the HWSP, which is part of the context image 2190 * in LRC mode, but does not include the "shared data page" used with 2191 * GuC submission. The caller should account for this if using the GuC. 2192 */ 2193 uint32_t intel_lr_context_size(struct intel_engine_cs *engine) 2194 { 2195 int ret = 0; 2196 2197 WARN_ON(INTEL_GEN(engine->i915) < 8); 2198 2199 switch (engine->id) { 2200 case RCS: 2201 if (INTEL_GEN(engine->i915) >= 9) 2202 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2203 else 2204 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2205 break; 2206 case VCS: 2207 case BCS: 2208 case VECS: 2209 case VCS2: 2210 ret = GEN8_LR_CONTEXT_OTHER_SIZE; 2211 break; 2212 } 2213 2214 return ret; 2215 } 2216 2217 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 2218 struct intel_engine_cs *engine) 2219 { 2220 struct drm_i915_gem_object *ctx_obj; 2221 struct intel_context *ce = &ctx->engine[engine->id]; 2222 struct i915_vma *vma; 2223 uint32_t context_size; 2224 struct intel_ring *ring; 2225 int ret; 2226 2227 WARN_ON(ce->state); 2228 2229 context_size = round_up(intel_lr_context_size(engine), 4096); 2230 2231 /* One extra page as the sharing data between driver and GuC */ 2232 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2233 2234 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); 2235 if (IS_ERR(ctx_obj)) { 2236 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2237 return PTR_ERR(ctx_obj); 2238 } 2239 2240 vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); 2241 if (IS_ERR(vma)) { 2242 ret = PTR_ERR(vma); 2243 goto error_deref_obj; 2244 } 2245 2246 ring = intel_engine_create_ring(engine, ctx->ring_size); 2247 if (IS_ERR(ring)) { 2248 ret = PTR_ERR(ring); 2249 goto error_deref_obj; 2250 } 2251 2252 ret = populate_lr_context(ctx, ctx_obj, engine, ring); 2253 if (ret) { 2254 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2255 goto error_ring_free; 2256 } 2257 2258 ce->ring = ring; 2259 ce->state = vma; 2260 ce->initialised = engine->init_context == NULL; 2261 2262 return 0; 2263 2264 error_ring_free: 2265 intel_ring_free(ring); 2266 error_deref_obj: 2267 i915_gem_object_put(ctx_obj); 2268 return ret; 2269 } 2270 2271 void intel_lr_context_resume(struct drm_i915_private *dev_priv) 2272 { 2273 struct intel_engine_cs *engine; 2274 struct i915_gem_context *ctx; 2275 enum intel_engine_id id; 2276 2277 /* Because we emit WA_TAIL_DWORDS there may be a disparity 2278 * between our bookkeeping in ce->ring->head and ce->ring->tail and 2279 * that stored in context. As we only write new commands from 2280 * ce->ring->tail onwards, everything before that is junk. If the GPU 2281 * starts reading from its RING_HEAD from the context, it may try to 2282 * execute that junk and die. 2283 * 2284 * So to avoid that we reset the context images upon resume. For 2285 * simplicity, we just zero everything out. 2286 */ 2287 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2288 for_each_engine(engine, dev_priv, id) { 2289 struct intel_context *ce = &ctx->engine[engine->id]; 2290 u32 *reg; 2291 2292 if (!ce->state) 2293 continue; 2294 2295 reg = i915_gem_object_pin_map(ce->state->obj, 2296 I915_MAP_WB); 2297 if (WARN_ON(IS_ERR(reg))) 2298 continue; 2299 2300 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg); 2301 reg[CTX_RING_HEAD+1] = 0; 2302 reg[CTX_RING_TAIL+1] = 0; 2303 2304 ce->state->obj->mm.dirty = true; 2305 i915_gem_object_unpin_map(ce->state->obj); 2306 2307 ce->ring->head = ce->ring->tail = 0; 2308 ce->ring->last_retired_head = -1; 2309 intel_ring_update_space(ce->ring); 2310 } 2311 } 2312 } 2313