1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include <drm/drmP.h> 137 #include <drm/i915_drm.h> 138 #include "i915_drv.h" 139 #include "intel_mocs.h" 140 141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) 144 145 #define RING_EXECLIST_QFULL (1 << 0x2) 146 #define RING_EXECLIST1_VALID (1 << 0x3) 147 #define RING_EXECLIST0_VALID (1 << 0x4) 148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 149 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 150 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 151 152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 158 159 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 160 (GEN8_CTX_STATUS_ACTIVE_IDLE | \ 161 GEN8_CTX_STATUS_PREEMPTED | \ 162 GEN8_CTX_STATUS_ELEMENT_SWITCH) 163 164 #define CTX_LRI_HEADER_0 0x01 165 #define CTX_CONTEXT_CONTROL 0x02 166 #define CTX_RING_HEAD 0x04 167 #define CTX_RING_TAIL 0x06 168 #define CTX_RING_BUFFER_START 0x08 169 #define CTX_RING_BUFFER_CONTROL 0x0a 170 #define CTX_BB_HEAD_U 0x0c 171 #define CTX_BB_HEAD_L 0x0e 172 #define CTX_BB_STATE 0x10 173 #define CTX_SECOND_BB_HEAD_U 0x12 174 #define CTX_SECOND_BB_HEAD_L 0x14 175 #define CTX_SECOND_BB_STATE 0x16 176 #define CTX_BB_PER_CTX_PTR 0x18 177 #define CTX_RCS_INDIRECT_CTX 0x1a 178 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c 179 #define CTX_LRI_HEADER_1 0x21 180 #define CTX_CTX_TIMESTAMP 0x22 181 #define CTX_PDP3_UDW 0x24 182 #define CTX_PDP3_LDW 0x26 183 #define CTX_PDP2_UDW 0x28 184 #define CTX_PDP2_LDW 0x2a 185 #define CTX_PDP1_UDW 0x2c 186 #define CTX_PDP1_LDW 0x2e 187 #define CTX_PDP0_UDW 0x30 188 #define CTX_PDP0_LDW 0x32 189 #define CTX_LRI_HEADER_2 0x41 190 #define CTX_R_PWR_CLK_STATE 0x42 191 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 192 193 #define GEN8_CTX_VALID (1<<0) 194 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1) 195 #define GEN8_CTX_FORCE_RESTORE (1<<2) 196 #define GEN8_CTX_L3LLC_COHERENT (1<<5) 197 #define GEN8_CTX_PRIVILEGE (1<<8) 198 199 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \ 200 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \ 201 (reg_state)[(pos)+1] = (val); \ 202 } while (0) 203 204 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ 205 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ 206 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ 207 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 208 } while (0) 209 210 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ 211 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ 212 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ 213 } while (0) 214 215 enum { 216 FAULT_AND_HANG = 0, 217 FAULT_AND_HALT, /* Debug only */ 218 FAULT_AND_STREAM, 219 FAULT_AND_CONTINUE /* Unsupported */ 220 }; 221 #define GEN8_CTX_ID_SHIFT 32 222 #define GEN8_CTX_ID_WIDTH 21 223 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 224 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 225 226 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 227 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 228 229 #define WA_TAIL_DWORDS 2 230 231 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 232 struct intel_engine_cs *engine); 233 static int intel_lr_context_pin(struct i915_gem_context *ctx, 234 struct intel_engine_cs *engine); 235 static void execlists_init_reg_state(u32 *reg_state, 236 struct i915_gem_context *ctx, 237 struct intel_engine_cs *engine, 238 struct intel_ring *ring); 239 240 /** 241 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 242 * @dev_priv: i915 device private 243 * @enable_execlists: value of i915.enable_execlists module parameter. 244 * 245 * Only certain platforms support Execlists (the prerequisites being 246 * support for Logical Ring Contexts and Aliasing PPGTT or better). 247 * 248 * Return: 1 if Execlists is supported and has to be enabled. 249 */ 250 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) 251 { 252 /* On platforms with execlist available, vGPU will only 253 * support execlist mode, no ring buffer mode. 254 */ 255 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) 256 return 1; 257 258 if (INTEL_GEN(dev_priv) >= 9) 259 return 1; 260 261 if (enable_execlists == 0) 262 return 0; 263 264 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && 265 USES_PPGTT(dev_priv) && 266 i915.use_mmio_flip >= 0) 267 return 1; 268 269 return 0; 270 } 271 272 static void 273 logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 274 { 275 struct drm_i915_private *dev_priv = engine->i915; 276 277 engine->disable_lite_restore_wa = 278 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && 279 (engine->id == VCS || engine->id == VCS2); 280 281 engine->ctx_desc_template = GEN8_CTX_VALID; 282 if (IS_GEN8(dev_priv)) 283 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 284 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 285 286 /* TODO: WaDisableLiteRestore when we start using semaphore 287 * signalling between Command Streamers */ 288 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ 289 290 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 291 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ 292 if (engine->disable_lite_restore_wa) 293 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; 294 } 295 296 /** 297 * intel_lr_context_descriptor_update() - calculate & cache the descriptor 298 * descriptor for a pinned context 299 * @ctx: Context to work on 300 * @engine: Engine the descriptor will be used with 301 * 302 * The context descriptor encodes various attributes of a context, 303 * including its GTT address and some flags. Because it's fairly 304 * expensive to calculate, we'll just do it once and cache the result, 305 * which remains valid until the context is unpinned. 306 * 307 * This is what a descriptor looks like, from LSB to MSB:: 308 * 309 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 310 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 311 * bits 32-52: ctx ID, a globally unique tag 312 * bits 53-54: mbz, reserved for use by hardware 313 * bits 55-63: group ID, currently unused and set to 0 314 */ 315 static void 316 intel_lr_context_descriptor_update(struct i915_gem_context *ctx, 317 struct intel_engine_cs *engine) 318 { 319 struct intel_context *ce = &ctx->engine[engine->id]; 320 u64 desc; 321 322 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); 323 324 desc = ctx->desc_template; /* bits 3-4 */ 325 desc |= engine->ctx_desc_template; /* bits 0-11 */ 326 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; 327 /* bits 12-31 */ 328 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 329 330 ce->lrc_desc = desc; 331 } 332 333 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, 334 struct intel_engine_cs *engine) 335 { 336 return ctx->engine[engine->id].lrc_desc; 337 } 338 339 static inline void 340 execlists_context_status_change(struct drm_i915_gem_request *rq, 341 unsigned long status) 342 { 343 /* 344 * Only used when GVT-g is enabled now. When GVT-g is disabled, 345 * The compiler should eliminate this function as dead-code. 346 */ 347 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 348 return; 349 350 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); 351 } 352 353 static void 354 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) 355 { 356 ASSIGN_CTX_PDP(ppgtt, reg_state, 3); 357 ASSIGN_CTX_PDP(ppgtt, reg_state, 2); 358 ASSIGN_CTX_PDP(ppgtt, reg_state, 1); 359 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 360 } 361 362 static u64 execlists_update_context(struct drm_i915_gem_request *rq) 363 { 364 struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; 365 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; 366 u32 *reg_state = ce->lrc_reg_state; 367 368 reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail); 369 370 /* True 32b PPGTT with dynamic page allocation: update PDP 371 * registers and point the unallocated PDPs to scratch page. 372 * PML4 is allocated during ppgtt init, so this is not needed 373 * in 48-bit mode. 374 */ 375 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 376 execlists_update_context_pdps(ppgtt, reg_state); 377 378 return ce->lrc_desc; 379 } 380 381 static void execlists_submit_ports(struct intel_engine_cs *engine) 382 { 383 struct drm_i915_private *dev_priv = engine->i915; 384 struct execlist_port *port = engine->execlist_port; 385 u32 __iomem *elsp = 386 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 387 u64 desc[2]; 388 389 if (!port[0].count) 390 execlists_context_status_change(port[0].request, 391 INTEL_CONTEXT_SCHEDULE_IN); 392 desc[0] = execlists_update_context(port[0].request); 393 engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */ 394 395 if (port[1].request) { 396 GEM_BUG_ON(port[1].count); 397 execlists_context_status_change(port[1].request, 398 INTEL_CONTEXT_SCHEDULE_IN); 399 desc[1] = execlists_update_context(port[1].request); 400 port[1].count = 1; 401 } else { 402 desc[1] = 0; 403 } 404 GEM_BUG_ON(desc[0] == desc[1]); 405 406 /* You must always write both descriptors in the order below. */ 407 writel(upper_32_bits(desc[1]), elsp); 408 writel(lower_32_bits(desc[1]), elsp); 409 410 writel(upper_32_bits(desc[0]), elsp); 411 /* The context is automatically loaded after the following */ 412 writel(lower_32_bits(desc[0]), elsp); 413 } 414 415 static bool ctx_single_port_submission(const struct i915_gem_context *ctx) 416 { 417 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 418 ctx->execlists_force_single_submission); 419 } 420 421 static bool can_merge_ctx(const struct i915_gem_context *prev, 422 const struct i915_gem_context *next) 423 { 424 if (prev != next) 425 return false; 426 427 if (ctx_single_port_submission(prev)) 428 return false; 429 430 return true; 431 } 432 433 static void execlists_dequeue(struct intel_engine_cs *engine) 434 { 435 struct drm_i915_gem_request *cursor, *last; 436 struct execlist_port *port = engine->execlist_port; 437 bool submit = false; 438 439 last = port->request; 440 if (last) 441 /* WaIdleLiteRestore:bdw,skl 442 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL 443 * as we resubmit the request. See gen8_emit_request() 444 * for where we prepare the padding after the end of the 445 * request. 446 */ 447 last->tail = last->wa_tail; 448 449 GEM_BUG_ON(port[1].request); 450 451 /* Hardware submission is through 2 ports. Conceptually each port 452 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 453 * static for a context, and unique to each, so we only execute 454 * requests belonging to a single context from each ring. RING_HEAD 455 * is maintained by the CS in the context image, it marks the place 456 * where it got up to last time, and through RING_TAIL we tell the CS 457 * where we want to execute up to this time. 458 * 459 * In this list the requests are in order of execution. Consecutive 460 * requests from the same context are adjacent in the ringbuffer. We 461 * can combine these requests into a single RING_TAIL update: 462 * 463 * RING_HEAD...req1...req2 464 * ^- RING_TAIL 465 * since to execute req2 the CS must first execute req1. 466 * 467 * Our goal then is to point each port to the end of a consecutive 468 * sequence of requests as being the most optimal (fewest wake ups 469 * and context switches) submission. 470 */ 471 472 lockmgr(&engine->execlist_lock, LK_EXCLUSIVE); 473 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) { 474 /* Can we combine this request with the current port? It has to 475 * be the same context/ringbuffer and not have any exceptions 476 * (e.g. GVT saying never to combine contexts). 477 * 478 * If we can combine the requests, we can execute both by 479 * updating the RING_TAIL to point to the end of the second 480 * request, and so we never need to tell the hardware about 481 * the first. 482 */ 483 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) { 484 /* If we are on the second port and cannot combine 485 * this request with the last, then we are done. 486 */ 487 if (port != engine->execlist_port) 488 break; 489 490 /* If GVT overrides us we only ever submit port[0], 491 * leaving port[1] empty. Note that we also have 492 * to be careful that we don't queue the same 493 * context (even though a different request) to 494 * the second port. 495 */ 496 if (ctx_single_port_submission(cursor->ctx)) 497 break; 498 499 GEM_BUG_ON(last->ctx == cursor->ctx); 500 501 i915_gem_request_assign(&port->request, last); 502 port++; 503 } 504 last = cursor; 505 submit = true; 506 } 507 if (submit) { 508 /* Decouple all the requests submitted from the queue */ 509 engine->execlist_queue.next = &cursor->execlist_link; 510 cursor->execlist_link.prev = &engine->execlist_queue; 511 512 i915_gem_request_assign(&port->request, last); 513 } 514 lockmgr(&engine->execlist_lock, LK_RELEASE); 515 516 if (submit) 517 execlists_submit_ports(engine); 518 } 519 520 static bool execlists_elsp_idle(struct intel_engine_cs *engine) 521 { 522 return !engine->execlist_port[0].request; 523 } 524 525 static bool execlists_elsp_ready(struct intel_engine_cs *engine) 526 { 527 int port; 528 529 port = 1; /* wait for a free slot */ 530 if (engine->disable_lite_restore_wa || engine->preempt_wa) 531 port = 0; /* wait for GPU to be idle before continuing */ 532 533 return !engine->execlist_port[port].request; 534 } 535 536 /* 537 * Check the unread Context Status Buffers and manage the submission of new 538 * contexts to the ELSP accordingly. 539 */ 540 static void intel_lrc_irq_handler(unsigned long data) 541 { 542 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 543 struct execlist_port *port = engine->execlist_port; 544 struct drm_i915_private *dev_priv = engine->i915; 545 546 intel_uncore_forcewake_get(dev_priv, engine->fw_domains); 547 548 if (!execlists_elsp_idle(engine)) { 549 u32 __iomem *csb_mmio = 550 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); 551 u32 __iomem *buf = 552 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); 553 unsigned int csb, head, tail; 554 555 csb = readl(csb_mmio); 556 head = GEN8_CSB_READ_PTR(csb); 557 tail = GEN8_CSB_WRITE_PTR(csb); 558 if (tail < head) 559 tail += GEN8_CSB_ENTRIES; 560 while (head < tail) { 561 unsigned int idx = ++head % GEN8_CSB_ENTRIES; 562 unsigned int status = readl(buf + 2 * idx); 563 564 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) 565 continue; 566 567 GEM_BUG_ON(port[0].count == 0); 568 if (--port[0].count == 0) { 569 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); 570 execlists_context_status_change(port[0].request, 571 INTEL_CONTEXT_SCHEDULE_OUT); 572 573 i915_gem_request_put(port[0].request); 574 port[0] = port[1]; 575 memset(&port[1], 0, sizeof(port[1])); 576 577 engine->preempt_wa = false; 578 } 579 580 GEM_BUG_ON(port[0].count == 0 && 581 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); 582 } 583 584 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 585 GEN8_CSB_WRITE_PTR(csb) << 8), 586 csb_mmio); 587 } 588 589 if (execlists_elsp_ready(engine)) 590 execlists_dequeue(engine); 591 592 intel_uncore_forcewake_put(dev_priv, engine->fw_domains); 593 } 594 595 static void execlists_submit_request(struct drm_i915_gem_request *request) 596 { 597 struct intel_engine_cs *engine = request->engine; 598 unsigned long flags; 599 600 spin_lock_irqsave(&engine->execlist_lock, flags); 601 602 list_add_tail(&request->execlist_link, &engine->execlist_queue); 603 if (execlists_elsp_idle(engine)) 604 tasklet_hi_schedule(&engine->irq_tasklet); 605 606 spin_unlock_irqrestore(&engine->execlist_lock, flags); 607 } 608 609 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 610 { 611 struct intel_engine_cs *engine = request->engine; 612 struct intel_context *ce = &request->ctx->engine[engine->id]; 613 int ret; 614 615 /* Flush enough space to reduce the likelihood of waiting after 616 * we start building the request - in which case we will just 617 * have to repeat work. 618 */ 619 request->reserved_space += EXECLISTS_REQUEST_SIZE; 620 621 if (!ce->state) { 622 ret = execlists_context_deferred_alloc(request->ctx, engine); 623 if (ret) 624 return ret; 625 } 626 627 request->ring = ce->ring; 628 629 ret = intel_lr_context_pin(request->ctx, engine); 630 if (ret) 631 return ret; 632 633 if (i915.enable_guc_submission) { 634 /* 635 * Check that the GuC has space for the request before 636 * going any further, as the i915_add_request() call 637 * later on mustn't fail ... 638 */ 639 ret = i915_guc_wq_reserve(request); 640 if (ret) 641 goto err_unpin; 642 } 643 644 ret = intel_ring_begin(request, 0); 645 if (ret) 646 goto err_unreserve; 647 648 if (!ce->initialised) { 649 ret = engine->init_context(request); 650 if (ret) 651 goto err_unreserve; 652 653 ce->initialised = true; 654 } 655 656 /* Note that after this point, we have committed to using 657 * this request as it is being used to both track the 658 * state of engine initialisation and liveness of the 659 * golden renderstate above. Think twice before you try 660 * to cancel/unwind this request now. 661 */ 662 663 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 664 return 0; 665 666 err_unreserve: 667 if (i915.enable_guc_submission) 668 i915_guc_wq_unreserve(request); 669 err_unpin: 670 intel_lr_context_unpin(request->ctx, engine); 671 return ret; 672 } 673 674 /* 675 * intel_logical_ring_advance() - advance the tail and prepare for submission 676 * @request: Request to advance the logical ringbuffer of. 677 * 678 * The tail is updated in our logical ringbuffer struct, not in the actual context. What 679 * really happens during submission is that the context and current tail will be placed 680 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that 681 * point, the tail *inside* the context is updated and the ELSP written to. 682 */ 683 static int 684 intel_logical_ring_advance(struct drm_i915_gem_request *request) 685 { 686 struct intel_ring *ring = request->ring; 687 struct intel_engine_cs *engine = request->engine; 688 689 intel_ring_advance(ring); 690 request->tail = ring->tail; 691 692 /* 693 * Here we add two extra NOOPs as padding to avoid 694 * lite restore of a context with HEAD==TAIL. 695 * 696 * Caller must reserve WA_TAIL_DWORDS for us! 697 */ 698 intel_ring_emit(ring, MI_NOOP); 699 intel_ring_emit(ring, MI_NOOP); 700 intel_ring_advance(ring); 701 request->wa_tail = ring->tail; 702 703 /* We keep the previous context alive until we retire the following 704 * request. This ensures that any the context object is still pinned 705 * for any residual writes the HW makes into it on the context switch 706 * into the next object following the breadcrumb. Otherwise, we may 707 * retire the context too early. 708 */ 709 request->previous_context = engine->last_context; 710 engine->last_context = request->ctx; 711 return 0; 712 } 713 714 static int intel_lr_context_pin(struct i915_gem_context *ctx, 715 struct intel_engine_cs *engine) 716 { 717 struct intel_context *ce = &ctx->engine[engine->id]; 718 void *vaddr; 719 int ret; 720 721 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 722 723 if (ce->pin_count++) 724 return 0; 725 726 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, 727 PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); 728 if (ret) 729 goto err; 730 731 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 732 if (IS_ERR(vaddr)) { 733 ret = PTR_ERR(vaddr); 734 goto unpin_vma; 735 } 736 737 ret = intel_ring_pin(ce->ring); 738 if (ret) 739 goto unpin_map; 740 741 intel_lr_context_descriptor_update(ctx, engine); 742 743 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 744 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = 745 i915_ggtt_offset(ce->ring->vma); 746 747 ce->state->obj->dirty = true; 748 749 /* Invalidate GuC TLB. */ 750 if (i915.enable_guc_submission) { 751 struct drm_i915_private *dev_priv = ctx->i915; 752 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 753 } 754 755 i915_gem_context_get(ctx); 756 return 0; 757 758 unpin_map: 759 i915_gem_object_unpin_map(ce->state->obj); 760 unpin_vma: 761 __i915_vma_unpin(ce->state); 762 err: 763 ce->pin_count = 0; 764 return ret; 765 } 766 767 void intel_lr_context_unpin(struct i915_gem_context *ctx, 768 struct intel_engine_cs *engine) 769 { 770 struct intel_context *ce = &ctx->engine[engine->id]; 771 772 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 773 GEM_BUG_ON(ce->pin_count == 0); 774 775 if (--ce->pin_count) 776 return; 777 778 intel_ring_unpin(ce->ring); 779 780 i915_gem_object_unpin_map(ce->state->obj); 781 i915_vma_unpin(ce->state); 782 783 i915_gem_context_put(ctx); 784 } 785 786 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 787 { 788 int ret, i; 789 struct intel_ring *ring = req->ring; 790 struct i915_workarounds *w = &req->i915->workarounds; 791 792 if (w->count == 0) 793 return 0; 794 795 ret = req->engine->emit_flush(req, EMIT_BARRIER); 796 if (ret) 797 return ret; 798 799 ret = intel_ring_begin(req, w->count * 2 + 2); 800 if (ret) 801 return ret; 802 803 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 804 for (i = 0; i < w->count; i++) { 805 intel_ring_emit_reg(ring, w->reg[i].addr); 806 intel_ring_emit(ring, w->reg[i].value); 807 } 808 intel_ring_emit(ring, MI_NOOP); 809 810 intel_ring_advance(ring); 811 812 ret = req->engine->emit_flush(req, EMIT_BARRIER); 813 if (ret) 814 return ret; 815 816 return 0; 817 } 818 819 #define wa_ctx_emit(batch, index, cmd) \ 820 do { \ 821 int __index = (index)++; \ 822 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \ 823 return -ENOSPC; \ 824 } \ 825 batch[__index] = (cmd); \ 826 } while (0) 827 828 #define wa_ctx_emit_reg(batch, index, reg) \ 829 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) 830 831 /* 832 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 833 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 834 * but there is a slight complication as this is applied in WA batch where the 835 * values are only initialized once so we cannot take register value at the 836 * beginning and reuse it further; hence we save its value to memory, upload a 837 * constant value with bit21 set and then we restore it back with the saved value. 838 * To simplify the WA, a constant value is formed by using the default value 839 * of this register. This shouldn't be a problem because we are only modifying 840 * it for a short period and this batch in non-premptible. We can ofcourse 841 * use additional instructions that read the actual value of the register 842 * at that time and set our bit of interest but it makes the WA complicated. 843 * 844 * This WA is also required for Gen9 so extracting as a function avoids 845 * code duplication. 846 */ 847 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, 848 uint32_t *batch, 849 uint32_t index) 850 { 851 struct drm_i915_private *dev_priv = engine->i915; 852 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 853 854 /* 855 * WaDisableLSQCROPERFforOCL:kbl 856 * This WA is implemented in skl_init_clock_gating() but since 857 * this batch updates GEN8_L3SQCREG4 with default value we need to 858 * set this bit here to retain the WA during flush. 859 */ 860 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 861 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 862 863 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 864 MI_SRM_LRM_GLOBAL_GTT)); 865 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 866 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); 867 wa_ctx_emit(batch, index, 0); 868 869 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 870 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 871 wa_ctx_emit(batch, index, l3sqc4_flush); 872 873 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 874 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL | 875 PIPE_CONTROL_DC_FLUSH_ENABLE)); 876 wa_ctx_emit(batch, index, 0); 877 wa_ctx_emit(batch, index, 0); 878 wa_ctx_emit(batch, index, 0); 879 wa_ctx_emit(batch, index, 0); 880 881 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 882 MI_SRM_LRM_GLOBAL_GTT)); 883 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 884 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); 885 wa_ctx_emit(batch, index, 0); 886 887 return index; 888 } 889 890 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx, 891 uint32_t offset, 892 uint32_t start_alignment) 893 { 894 return wa_ctx->offset = ALIGN(offset, start_alignment); 895 } 896 897 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, 898 uint32_t offset, 899 uint32_t size_alignment) 900 { 901 wa_ctx->size = offset - wa_ctx->offset; 902 903 WARN(wa_ctx->size % size_alignment, 904 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n", 905 wa_ctx->size, size_alignment); 906 return 0; 907 } 908 909 /* 910 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 911 * initialized at the beginning and shared across all contexts but this field 912 * helps us to have multiple batches at different offsets and select them based 913 * on a criteria. At the moment this batch always start at the beginning of the page 914 * and at this point we don't have multiple wa_ctx batch buffers. 915 * 916 * The number of WA applied are not known at the beginning; we use this field 917 * to return the no of DWORDS written. 918 * 919 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 920 * so it adds NOOPs as padding to make it cacheline aligned. 921 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 922 * makes a complete batch buffer. 923 */ 924 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, 925 struct i915_wa_ctx_bb *wa_ctx, 926 uint32_t *batch, 927 uint32_t *offset) 928 { 929 uint32_t scratch_addr; 930 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 931 932 /* WaDisableCtxRestoreArbitration:bdw,chv */ 933 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 934 935 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 936 if (IS_BROADWELL(engine->i915)) { 937 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 938 if (rc < 0) 939 return rc; 940 index = rc; 941 } 942 943 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 944 /* Actual scratch location is at 128 bytes offset */ 945 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 946 947 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 948 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 949 PIPE_CONTROL_GLOBAL_GTT_IVB | 950 PIPE_CONTROL_CS_STALL | 951 PIPE_CONTROL_QW_WRITE)); 952 wa_ctx_emit(batch, index, scratch_addr); 953 wa_ctx_emit(batch, index, 0); 954 wa_ctx_emit(batch, index, 0); 955 wa_ctx_emit(batch, index, 0); 956 957 /* Pad to end of cacheline */ 958 while (index % CACHELINE_DWORDS) 959 wa_ctx_emit(batch, index, MI_NOOP); 960 961 /* 962 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 963 * execution depends on the length specified in terms of cache lines 964 * in the register CTX_RCS_INDIRECT_CTX 965 */ 966 967 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 968 } 969 970 /* 971 * This batch is started immediately after indirect_ctx batch. Since we ensure 972 * that indirect_ctx ends on a cacheline this batch is aligned automatically. 973 * 974 * The number of DWORDS written are returned using this field. 975 * 976 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding 977 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. 978 */ 979 static int gen8_init_perctx_bb(struct intel_engine_cs *engine, 980 struct i915_wa_ctx_bb *wa_ctx, 981 uint32_t *batch, 982 uint32_t *offset) 983 { 984 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 985 986 /* WaDisableCtxRestoreArbitration:bdw,chv */ 987 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 988 989 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 990 991 return wa_ctx_end(wa_ctx, *offset = index, 1); 992 } 993 994 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, 995 struct i915_wa_ctx_bb *wa_ctx, 996 uint32_t *batch, 997 uint32_t *offset) 998 { 999 int ret; 1000 struct drm_i915_private *dev_priv = engine->i915; 1001 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1002 1003 /* WaDisableCtxRestoreArbitration:bxt */ 1004 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 1005 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1006 1007 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1008 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1009 if (ret < 0) 1010 return ret; 1011 index = ret; 1012 1013 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */ 1014 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1015 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2); 1016 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE( 1017 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE)); 1018 wa_ctx_emit(batch, index, MI_NOOP); 1019 1020 /* WaClearSlmSpaceAtContextSwitch:kbl */ 1021 /* Actual scratch location is at 128 bytes offset */ 1022 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { 1023 u32 scratch_addr = 1024 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 1025 1026 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1027 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1028 PIPE_CONTROL_GLOBAL_GTT_IVB | 1029 PIPE_CONTROL_CS_STALL | 1030 PIPE_CONTROL_QW_WRITE)); 1031 wa_ctx_emit(batch, index, scratch_addr); 1032 wa_ctx_emit(batch, index, 0); 1033 wa_ctx_emit(batch, index, 0); 1034 wa_ctx_emit(batch, index, 0); 1035 } 1036 1037 /* WaMediaPoolStateCmdInWABB:bxt */ 1038 if (HAS_POOLED_EU(engine->i915)) { 1039 /* 1040 * EU pool configuration is setup along with golden context 1041 * during context initialization. This value depends on 1042 * device type (2x6 or 3x6) and needs to be updated based 1043 * on which subslice is disabled especially for 2x6 1044 * devices, however it is safe to load default 1045 * configuration of 3x6 device instead of masking off 1046 * corresponding bits because HW ignores bits of a disabled 1047 * subslice and drops down to appropriate config. Please 1048 * see render_state_setup() in i915_gem_render_state.c for 1049 * possible configurations, to avoid duplication they are 1050 * not shown here again. 1051 */ 1052 u32 eu_pool_config = 0x00777000; 1053 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE); 1054 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE); 1055 wa_ctx_emit(batch, index, eu_pool_config); 1056 wa_ctx_emit(batch, index, 0); 1057 wa_ctx_emit(batch, index, 0); 1058 wa_ctx_emit(batch, index, 0); 1059 } 1060 1061 /* Pad to end of cacheline */ 1062 while (index % CACHELINE_DWORDS) 1063 wa_ctx_emit(batch, index, MI_NOOP); 1064 1065 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1066 } 1067 1068 static int gen9_init_perctx_bb(struct intel_engine_cs *engine, 1069 struct i915_wa_ctx_bb *wa_ctx, 1070 uint32_t *batch, 1071 uint32_t *offset) 1072 { 1073 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1074 1075 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */ 1076 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { 1077 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1078 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1079 wa_ctx_emit(batch, index, 1080 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); 1081 wa_ctx_emit(batch, index, MI_NOOP); 1082 } 1083 1084 /* WaClearTdlStateAckDirtyBits:bxt */ 1085 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { 1086 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1087 1088 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1089 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1090 1091 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1); 1092 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1093 1094 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2); 1095 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1096 1097 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2); 1098 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */ 1099 wa_ctx_emit(batch, index, 0x0); 1100 wa_ctx_emit(batch, index, MI_NOOP); 1101 } 1102 1103 /* WaDisableCtxRestoreArbitration:bxt */ 1104 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) 1105 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1106 1107 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1108 1109 return wa_ctx_end(wa_ctx, *offset = index, 1); 1110 } 1111 1112 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) 1113 { 1114 struct drm_i915_gem_object *obj; 1115 struct i915_vma *vma; 1116 int err; 1117 1118 obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); 1119 if (IS_ERR(obj)) 1120 return PTR_ERR(obj); 1121 1122 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); 1123 if (IS_ERR(vma)) { 1124 err = PTR_ERR(vma); 1125 goto err; 1126 } 1127 1128 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); 1129 if (err) 1130 goto err; 1131 1132 engine->wa_ctx.vma = vma; 1133 return 0; 1134 1135 err: 1136 i915_gem_object_put(obj); 1137 return err; 1138 } 1139 1140 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) 1141 { 1142 i915_vma_unpin_and_release(&engine->wa_ctx.vma); 1143 } 1144 1145 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 1146 { 1147 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1148 uint32_t *batch; 1149 uint32_t offset; 1150 struct page *page; 1151 int ret; 1152 1153 WARN_ON(engine->id != RCS); 1154 1155 /* update this when WA for higher Gen are added */ 1156 if (INTEL_GEN(engine->i915) > 9) { 1157 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1158 INTEL_GEN(engine->i915)); 1159 return 0; 1160 } 1161 1162 /* some WA perform writes to scratch page, ensure it is valid */ 1163 if (!engine->scratch) { 1164 DRM_ERROR("scratch page not allocated for %s\n", engine->name); 1165 return -EINVAL; 1166 } 1167 1168 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE); 1169 if (ret) { 1170 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1171 return ret; 1172 } 1173 1174 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); 1175 batch = kmap_atomic(page); 1176 offset = 0; 1177 1178 if (IS_GEN8(engine->i915)) { 1179 ret = gen8_init_indirectctx_bb(engine, 1180 &wa_ctx->indirect_ctx, 1181 batch, 1182 &offset); 1183 if (ret) 1184 goto out; 1185 1186 ret = gen8_init_perctx_bb(engine, 1187 &wa_ctx->per_ctx, 1188 batch, 1189 &offset); 1190 if (ret) 1191 goto out; 1192 } else if (IS_GEN9(engine->i915)) { 1193 ret = gen9_init_indirectctx_bb(engine, 1194 &wa_ctx->indirect_ctx, 1195 batch, 1196 &offset); 1197 if (ret) 1198 goto out; 1199 1200 ret = gen9_init_perctx_bb(engine, 1201 &wa_ctx->per_ctx, 1202 batch, 1203 &offset); 1204 if (ret) 1205 goto out; 1206 } 1207 1208 out: 1209 kunmap_atomic(batch); 1210 if (ret) 1211 lrc_destroy_wa_ctx_obj(engine); 1212 1213 return ret; 1214 } 1215 1216 static void lrc_init_hws(struct intel_engine_cs *engine) 1217 { 1218 struct drm_i915_private *dev_priv = engine->i915; 1219 1220 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1221 engine->status_page.ggtt_offset); 1222 POSTING_READ(RING_HWS_PGA(engine->mmio_base)); 1223 } 1224 1225 static int gen8_init_common_ring(struct intel_engine_cs *engine) 1226 { 1227 struct drm_i915_private *dev_priv = engine->i915; 1228 int ret; 1229 1230 ret = intel_mocs_init_engine(engine); 1231 if (ret) 1232 return ret; 1233 1234 lrc_init_hws(engine); 1235 1236 intel_engine_reset_breadcrumbs(engine); 1237 1238 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 1239 1240 I915_WRITE(RING_MODE_GEN7(engine), 1241 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1242 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1243 1244 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); 1245 1246 intel_engine_init_hangcheck(engine); 1247 1248 /* After a GPU reset, we may have requests to replay */ 1249 if (!execlists_elsp_idle(engine)) { 1250 engine->execlist_port[0].count = 0; 1251 engine->execlist_port[1].count = 0; 1252 execlists_submit_ports(engine); 1253 } 1254 1255 return 0; 1256 } 1257 1258 static int gen8_init_render_ring(struct intel_engine_cs *engine) 1259 { 1260 struct drm_i915_private *dev_priv = engine->i915; 1261 int ret; 1262 1263 ret = gen8_init_common_ring(engine); 1264 if (ret) 1265 return ret; 1266 1267 /* We need to disable the AsyncFlip performance optimisations in order 1268 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1269 * programmed to '1' on all products. 1270 * 1271 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 1272 */ 1273 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1274 1275 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1276 1277 return init_workarounds_ring(engine); 1278 } 1279 1280 static int gen9_init_render_ring(struct intel_engine_cs *engine) 1281 { 1282 int ret; 1283 1284 ret = gen8_init_common_ring(engine); 1285 if (ret) 1286 return ret; 1287 1288 return init_workarounds_ring(engine); 1289 } 1290 1291 static void reset_common_ring(struct intel_engine_cs *engine, 1292 struct drm_i915_gem_request *request) 1293 { 1294 struct drm_i915_private *dev_priv = engine->i915; 1295 struct execlist_port *port = engine->execlist_port; 1296 struct intel_context *ce = &request->ctx->engine[engine->id]; 1297 1298 /* We want a simple context + ring to execute the breadcrumb update. 1299 * We cannot rely on the context being intact across the GPU hang, 1300 * so clear it and rebuild just what we need for the breadcrumb. 1301 * All pending requests for this context will be zapped, and any 1302 * future request will be after userspace has had the opportunity 1303 * to recreate its own state. 1304 */ 1305 execlists_init_reg_state(ce->lrc_reg_state, 1306 request->ctx, engine, ce->ring); 1307 1308 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ 1309 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = 1310 i915_ggtt_offset(ce->ring->vma); 1311 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix; 1312 1313 request->ring->head = request->postfix; 1314 request->ring->last_retired_head = -1; 1315 intel_ring_update_space(request->ring); 1316 1317 if (i915.enable_guc_submission) 1318 return; 1319 1320 /* Catch up with any missed context-switch interrupts */ 1321 I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0)); 1322 if (request->ctx != port[0].request->ctx) { 1323 i915_gem_request_put(port[0].request); 1324 port[0] = port[1]; 1325 memset(&port[1], 0, sizeof(port[1])); 1326 } 1327 1328 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1329 1330 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1331 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1332 } 1333 1334 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1335 { 1336 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1337 struct intel_ring *ring = req->ring; 1338 struct intel_engine_cs *engine = req->engine; 1339 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1340 int i, ret; 1341 1342 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1343 if (ret) 1344 return ret; 1345 1346 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds)); 1347 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 1348 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1349 1350 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i)); 1351 intel_ring_emit(ring, upper_32_bits(pd_daddr)); 1352 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i)); 1353 intel_ring_emit(ring, lower_32_bits(pd_daddr)); 1354 } 1355 1356 intel_ring_emit(ring, MI_NOOP); 1357 intel_ring_advance(ring); 1358 1359 return 0; 1360 } 1361 1362 static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1363 u64 offset, u32 len, 1364 unsigned int dispatch_flags) 1365 { 1366 struct intel_ring *ring = req->ring; 1367 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); 1368 int ret; 1369 1370 /* Don't rely in hw updating PDPs, specially in lite-restore. 1371 * Ideally, we should set Force PD Restore in ctx descriptor, 1372 * but we can't. Force Restore would be a second option, but 1373 * it is unsafe in case of lite-restore (because the ctx is 1374 * not idle). PML4 is allocated during ppgtt init so this is 1375 * not needed in 48-bit.*/ 1376 if (req->ctx->ppgtt && 1377 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1378 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1379 !intel_vgpu_active(req->i915)) { 1380 ret = intel_logical_ring_emit_pdps(req); 1381 if (ret) 1382 return ret; 1383 } 1384 1385 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1386 } 1387 1388 ret = intel_ring_begin(req, 4); 1389 if (ret) 1390 return ret; 1391 1392 /* FIXME(BDW): Address space and security selectors. */ 1393 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | 1394 (ppgtt<<8) | 1395 (dispatch_flags & I915_DISPATCH_RS ? 1396 MI_BATCH_RESOURCE_STREAMER : 0)); 1397 intel_ring_emit(ring, lower_32_bits(offset)); 1398 intel_ring_emit(ring, upper_32_bits(offset)); 1399 intel_ring_emit(ring, MI_NOOP); 1400 intel_ring_advance(ring); 1401 1402 return 0; 1403 } 1404 1405 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 1406 { 1407 struct drm_i915_private *dev_priv = engine->i915; 1408 I915_WRITE_IMR(engine, 1409 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1410 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1411 } 1412 1413 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 1414 { 1415 struct drm_i915_private *dev_priv = engine->i915; 1416 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1417 } 1418 1419 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) 1420 { 1421 struct intel_ring *ring = request->ring; 1422 u32 cmd; 1423 int ret; 1424 1425 ret = intel_ring_begin(request, 4); 1426 if (ret) 1427 return ret; 1428 1429 cmd = MI_FLUSH_DW + 1; 1430 1431 /* We always require a command barrier so that subsequent 1432 * commands, such as breadcrumb interrupts, are strictly ordered 1433 * wrt the contents of the write cache being flushed to memory 1434 * (and thus being coherent from the CPU). 1435 */ 1436 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1437 1438 if (mode & EMIT_INVALIDATE) { 1439 cmd |= MI_INVALIDATE_TLB; 1440 if (request->engine->id == VCS) 1441 cmd |= MI_INVALIDATE_BSD; 1442 } 1443 1444 intel_ring_emit(ring, cmd); 1445 intel_ring_emit(ring, 1446 I915_GEM_HWS_SCRATCH_ADDR | 1447 MI_FLUSH_DW_USE_GTT); 1448 intel_ring_emit(ring, 0); /* upper addr */ 1449 intel_ring_emit(ring, 0); /* value */ 1450 intel_ring_advance(ring); 1451 1452 return 0; 1453 } 1454 1455 static int gen8_emit_flush_render(struct drm_i915_gem_request *request, 1456 u32 mode) 1457 { 1458 struct intel_ring *ring = request->ring; 1459 struct intel_engine_cs *engine = request->engine; 1460 u32 scratch_addr = 1461 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 1462 bool vf_flush_wa = false, dc_flush_wa = false; 1463 u32 flags = 0; 1464 int ret; 1465 int len; 1466 1467 flags |= PIPE_CONTROL_CS_STALL; 1468 1469 if (mode & EMIT_FLUSH) { 1470 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1471 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1472 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 1473 flags |= PIPE_CONTROL_FLUSH_ENABLE; 1474 } 1475 1476 if (mode & EMIT_INVALIDATE) { 1477 flags |= PIPE_CONTROL_TLB_INVALIDATE; 1478 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 1479 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 1480 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 1481 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 1482 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 1483 flags |= PIPE_CONTROL_QW_WRITE; 1484 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 1485 1486 /* 1487 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1488 * pipe control. 1489 */ 1490 if (IS_GEN9(request->i915)) 1491 vf_flush_wa = true; 1492 1493 /* WaForGAMHang:kbl */ 1494 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) 1495 dc_flush_wa = true; 1496 } 1497 1498 len = 6; 1499 1500 if (vf_flush_wa) 1501 len += 6; 1502 1503 if (dc_flush_wa) 1504 len += 12; 1505 1506 ret = intel_ring_begin(request, len); 1507 if (ret) 1508 return ret; 1509 1510 if (vf_flush_wa) { 1511 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1512 intel_ring_emit(ring, 0); 1513 intel_ring_emit(ring, 0); 1514 intel_ring_emit(ring, 0); 1515 intel_ring_emit(ring, 0); 1516 intel_ring_emit(ring, 0); 1517 } 1518 1519 if (dc_flush_wa) { 1520 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1521 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE); 1522 intel_ring_emit(ring, 0); 1523 intel_ring_emit(ring, 0); 1524 intel_ring_emit(ring, 0); 1525 intel_ring_emit(ring, 0); 1526 } 1527 1528 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1529 intel_ring_emit(ring, flags); 1530 intel_ring_emit(ring, scratch_addr); 1531 intel_ring_emit(ring, 0); 1532 intel_ring_emit(ring, 0); 1533 intel_ring_emit(ring, 0); 1534 1535 if (dc_flush_wa) { 1536 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1537 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL); 1538 intel_ring_emit(ring, 0); 1539 intel_ring_emit(ring, 0); 1540 intel_ring_emit(ring, 0); 1541 intel_ring_emit(ring, 0); 1542 } 1543 1544 intel_ring_advance(ring); 1545 1546 return 0; 1547 } 1548 1549 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) 1550 { 1551 /* 1552 * On BXT A steppings there is a HW coherency issue whereby the 1553 * MI_STORE_DATA_IMM storing the completed request's seqno 1554 * occasionally doesn't invalidate the CPU cache. Work around this by 1555 * clflushing the corresponding cacheline whenever the caller wants 1556 * the coherency to be guaranteed. Note that this cacheline is known 1557 * to be clean at this point, since we only write it in 1558 * bxt_a_set_seqno(), where we also do a clflush after the write. So 1559 * this clflush in practice becomes an invalidate operation. 1560 */ 1561 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1562 } 1563 1564 /* 1565 * Reserve space for 2 NOOPs at the end of each request to be 1566 * used as a workaround for not being allowed to do lite 1567 * restore with HEAD==TAIL (WaIdleLiteRestore). 1568 */ 1569 1570 static int gen8_emit_request(struct drm_i915_gem_request *request) 1571 { 1572 struct intel_ring *ring = request->ring; 1573 int ret; 1574 1575 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); 1576 if (ret) 1577 return ret; 1578 1579 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 1580 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); 1581 1582 intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1583 intel_ring_emit(ring, 1584 intel_hws_seqno_address(request->engine) | 1585 MI_FLUSH_DW_USE_GTT); 1586 intel_ring_emit(ring, 0); 1587 intel_ring_emit(ring, request->fence.seqno); 1588 intel_ring_emit(ring, MI_USER_INTERRUPT); 1589 intel_ring_emit(ring, MI_NOOP); 1590 return intel_logical_ring_advance(request); 1591 } 1592 1593 static int gen8_emit_request_render(struct drm_i915_gem_request *request) 1594 { 1595 struct intel_ring *ring = request->ring; 1596 int ret; 1597 1598 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); 1599 if (ret) 1600 return ret; 1601 1602 /* We're using qword write, seqno should be aligned to 8 bytes. */ 1603 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 1604 1605 /* w/a for post sync ops following a GPGPU operation we 1606 * need a prior CS_STALL, which is emitted by the flush 1607 * following the batch. 1608 */ 1609 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 1610 intel_ring_emit(ring, 1611 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1612 PIPE_CONTROL_CS_STALL | 1613 PIPE_CONTROL_QW_WRITE)); 1614 intel_ring_emit(ring, intel_hws_seqno_address(request->engine)); 1615 intel_ring_emit(ring, 0); 1616 intel_ring_emit(ring, i915_gem_request_get_seqno(request)); 1617 /* We're thrashing one dword of HWS. */ 1618 intel_ring_emit(ring, 0); 1619 intel_ring_emit(ring, MI_USER_INTERRUPT); 1620 intel_ring_emit(ring, MI_NOOP); 1621 return intel_logical_ring_advance(request); 1622 } 1623 1624 static int gen8_init_rcs_context(struct drm_i915_gem_request *req) 1625 { 1626 int ret; 1627 1628 ret = intel_logical_ring_workarounds_emit(req); 1629 if (ret) 1630 return ret; 1631 1632 ret = intel_rcs_context_init_mocs(req); 1633 /* 1634 * Failing to program the MOCS is non-fatal.The system will not 1635 * run at peak performance. So generate an error and carry on. 1636 */ 1637 if (ret) 1638 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 1639 1640 return i915_gem_render_state_init(req); 1641 } 1642 1643 /** 1644 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer 1645 * @engine: Engine Command Streamer. 1646 */ 1647 void intel_logical_ring_cleanup(struct intel_engine_cs *engine) 1648 { 1649 struct drm_i915_private *dev_priv; 1650 1651 /* 1652 * Tasklet cannot be active at this point due intel_mark_active/idle 1653 * so this is just for documentation. 1654 */ 1655 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1656 tasklet_kill(&engine->irq_tasklet); 1657 1658 dev_priv = engine->i915; 1659 1660 if (engine->buffer) { 1661 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 1662 } 1663 1664 if (engine->cleanup) 1665 engine->cleanup(engine); 1666 1667 intel_engine_cleanup_common(engine); 1668 1669 if (engine->status_page.vma) { 1670 i915_gem_object_unpin_map(engine->status_page.vma->obj); 1671 engine->status_page.vma = NULL; 1672 } 1673 intel_lr_context_unpin(dev_priv->kernel_context, engine); 1674 1675 lrc_destroy_wa_ctx_obj(engine); 1676 engine->i915 = NULL; 1677 dev_priv->engine[engine->id] = NULL; 1678 kfree(engine); 1679 } 1680 1681 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv) 1682 { 1683 struct intel_engine_cs *engine; 1684 enum intel_engine_id id; 1685 1686 for_each_engine(engine, dev_priv, id) 1687 engine->submit_request = execlists_submit_request; 1688 } 1689 1690 static void 1691 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 1692 { 1693 /* Default vfuncs which can be overriden by each engine. */ 1694 engine->init_hw = gen8_init_common_ring; 1695 engine->reset_hw = reset_common_ring; 1696 engine->emit_flush = gen8_emit_flush; 1697 engine->emit_request = gen8_emit_request; 1698 engine->submit_request = execlists_submit_request; 1699 1700 engine->irq_enable = gen8_logical_ring_enable_irq; 1701 engine->irq_disable = gen8_logical_ring_disable_irq; 1702 engine->emit_bb_start = gen8_emit_bb_start; 1703 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) 1704 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1705 } 1706 1707 static inline void 1708 logical_ring_default_irqs(struct intel_engine_cs *engine) 1709 { 1710 unsigned shift = engine->irq_shift; 1711 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1712 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1713 } 1714 1715 static int 1716 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) 1717 { 1718 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; 1719 void *hws; 1720 1721 /* The HWSP is part of the default context object in LRC mode. */ 1722 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 1723 if (IS_ERR(hws)) 1724 return PTR_ERR(hws); 1725 1726 engine->status_page.page_addr = hws + hws_offset; 1727 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; 1728 engine->status_page.vma = vma; 1729 1730 return 0; 1731 } 1732 1733 static void 1734 logical_ring_setup(struct intel_engine_cs *engine) 1735 { 1736 struct drm_i915_private *dev_priv = engine->i915; 1737 enum forcewake_domains fw_domains; 1738 1739 intel_engine_setup_common(engine); 1740 1741 /* Intentionally left blank. */ 1742 engine->buffer = NULL; 1743 1744 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 1745 RING_ELSP(engine), 1746 FW_REG_WRITE); 1747 1748 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 1749 RING_CONTEXT_STATUS_PTR(engine), 1750 FW_REG_READ | FW_REG_WRITE); 1751 1752 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 1753 RING_CONTEXT_STATUS_BUF_BASE(engine), 1754 FW_REG_READ); 1755 1756 engine->fw_domains = fw_domains; 1757 1758 tasklet_init(&engine->irq_tasklet, 1759 intel_lrc_irq_handler, (unsigned long)engine); 1760 1761 logical_ring_init_platform_invariants(engine); 1762 logical_ring_default_vfuncs(engine); 1763 logical_ring_default_irqs(engine); 1764 } 1765 1766 static int 1767 logical_ring_init(struct intel_engine_cs *engine) 1768 { 1769 struct i915_gem_context *dctx = engine->i915->kernel_context; 1770 int ret; 1771 1772 ret = intel_engine_init_common(engine); 1773 if (ret) 1774 goto error; 1775 1776 ret = execlists_context_deferred_alloc(dctx, engine); 1777 if (ret) 1778 goto error; 1779 1780 /* As this is the default context, always pin it */ 1781 ret = intel_lr_context_pin(dctx, engine); 1782 if (ret) { 1783 DRM_ERROR("Failed to pin context for %s: %d\n", 1784 engine->name, ret); 1785 goto error; 1786 } 1787 1788 /* And setup the hardware status page. */ 1789 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); 1790 if (ret) { 1791 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); 1792 goto error; 1793 } 1794 1795 return 0; 1796 1797 error: 1798 intel_logical_ring_cleanup(engine); 1799 return ret; 1800 } 1801 1802 int logical_render_ring_init(struct intel_engine_cs *engine) 1803 { 1804 struct drm_i915_private *dev_priv = engine->i915; 1805 int ret; 1806 1807 logical_ring_setup(engine); 1808 1809 if (HAS_L3_DPF(dev_priv)) 1810 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1811 1812 /* Override some for render ring. */ 1813 if (INTEL_GEN(dev_priv) >= 9) 1814 engine->init_hw = gen9_init_render_ring; 1815 else 1816 engine->init_hw = gen8_init_render_ring; 1817 engine->init_context = gen8_init_rcs_context; 1818 engine->emit_flush = gen8_emit_flush_render; 1819 engine->emit_request = gen8_emit_request_render; 1820 1821 ret = intel_engine_create_scratch(engine, 4096); 1822 if (ret) 1823 return ret; 1824 1825 ret = intel_init_workaround_bb(engine); 1826 if (ret) { 1827 /* 1828 * We continue even if we fail to initialize WA batch 1829 * because we only expect rare glitches but nothing 1830 * critical to prevent us from using GPU 1831 */ 1832 DRM_ERROR("WA batch buffer initialization failed: %d\n", 1833 ret); 1834 } 1835 1836 ret = logical_ring_init(engine); 1837 if (ret) { 1838 lrc_destroy_wa_ctx_obj(engine); 1839 } 1840 1841 return ret; 1842 } 1843 1844 int logical_xcs_ring_init(struct intel_engine_cs *engine) 1845 { 1846 logical_ring_setup(engine); 1847 1848 return logical_ring_init(engine); 1849 } 1850 1851 static u32 1852 make_rpcs(struct drm_i915_private *dev_priv) 1853 { 1854 u32 rpcs = 0; 1855 1856 /* 1857 * No explicit RPCS request is needed to ensure full 1858 * slice/subslice/EU enablement prior to Gen9. 1859 */ 1860 if (INTEL_GEN(dev_priv) < 9) 1861 return 0; 1862 1863 /* 1864 * Starting in Gen9, render power gating can leave 1865 * slice/subslice/EU in a partially enabled state. We 1866 * must make an explicit request through RPCS for full 1867 * enablement. 1868 */ 1869 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { 1870 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 1871 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) << 1872 GEN8_RPCS_S_CNT_SHIFT; 1873 rpcs |= GEN8_RPCS_ENABLE; 1874 } 1875 1876 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) { 1877 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 1878 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) << 1879 GEN8_RPCS_SS_CNT_SHIFT; 1880 rpcs |= GEN8_RPCS_ENABLE; 1881 } 1882 1883 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { 1884 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 1885 GEN8_RPCS_EU_MIN_SHIFT; 1886 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 1887 GEN8_RPCS_EU_MAX_SHIFT; 1888 rpcs |= GEN8_RPCS_ENABLE; 1889 } 1890 1891 return rpcs; 1892 } 1893 1894 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) 1895 { 1896 u32 indirect_ctx_offset; 1897 1898 switch (INTEL_GEN(engine->i915)) { 1899 default: 1900 MISSING_CASE(INTEL_GEN(engine->i915)); 1901 /* fall through */ 1902 case 9: 1903 indirect_ctx_offset = 1904 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 1905 break; 1906 case 8: 1907 indirect_ctx_offset = 1908 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 1909 break; 1910 } 1911 1912 return indirect_ctx_offset; 1913 } 1914 1915 static void execlists_init_reg_state(u32 *reg_state, 1916 struct i915_gem_context *ctx, 1917 struct intel_engine_cs *engine, 1918 struct intel_ring *ring) 1919 { 1920 struct drm_i915_private *dev_priv = engine->i915; 1921 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; 1922 1923 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM 1924 * commands followed by (reg, value) pairs. The values we are setting here are 1925 * only for the first context restore: on a subsequent save, the GPU will 1926 * recreate this batchbuffer with new values (including all the missing 1927 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 1928 reg_state[CTX_LRI_HEADER_0] = 1929 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; 1930 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, 1931 RING_CONTEXT_CONTROL(engine), 1932 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 1933 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 1934 (HAS_RESOURCE_STREAMER(dev_priv) ? 1935 CTX_CTRL_RS_CTX_ENABLE : 0))); 1936 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 1937 0); 1938 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base), 1939 0); 1940 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, 1941 RING_START(engine->mmio_base), 0); 1942 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, 1943 RING_CTL(engine->mmio_base), 1944 RING_CTL_SIZE(ring->size) | RING_VALID); 1945 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, 1946 RING_BBADDR_UDW(engine->mmio_base), 0); 1947 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, 1948 RING_BBADDR(engine->mmio_base), 0); 1949 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, 1950 RING_BBSTATE(engine->mmio_base), 1951 RING_BB_PPGTT); 1952 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, 1953 RING_SBBADDR_UDW(engine->mmio_base), 0); 1954 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, 1955 RING_SBBADDR(engine->mmio_base), 0); 1956 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, 1957 RING_SBBSTATE(engine->mmio_base), 0); 1958 if (engine->id == RCS) { 1959 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, 1960 RING_BB_PER_CTX_PTR(engine->mmio_base), 0); 1961 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, 1962 RING_INDIRECT_CTX(engine->mmio_base), 0); 1963 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, 1964 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); 1965 if (engine->wa_ctx.vma) { 1966 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1967 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 1968 1969 reg_state[CTX_RCS_INDIRECT_CTX+1] = 1970 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | 1971 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 1972 1973 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 1974 intel_lr_indirect_ctx_offset(engine) << 6; 1975 1976 reg_state[CTX_BB_PER_CTX_PTR+1] = 1977 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 1978 0x01; 1979 } 1980 } 1981 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 1982 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, 1983 RING_CTX_TIMESTAMP(engine->mmio_base), 0); 1984 /* PDP values well be assigned later if needed */ 1985 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 1986 0); 1987 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 1988 0); 1989 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 1990 0); 1991 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 1992 0); 1993 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 1994 0); 1995 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 1996 0); 1997 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 1998 0); 1999 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 2000 0); 2001 2002 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2003 /* 64b PPGTT (48bit canonical) 2004 * PDP0_DESCRIPTOR contains the base address to PML4 and 2005 * other PDP Descriptors are ignored. 2006 */ 2007 ASSIGN_CTX_PML4(ppgtt, reg_state); 2008 } else { 2009 /* 32b PPGTT 2010 * PDP*_DESCRIPTOR contains the base address of space supported. 2011 * With dynamic page allocation, PDPs may not be allocated at 2012 * this point. Point the unallocated PDPs to the scratch page 2013 */ 2014 execlists_update_context_pdps(ppgtt, reg_state); 2015 } 2016 2017 if (engine->id == RCS) { 2018 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2019 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2020 make_rpcs(dev_priv)); 2021 } 2022 } 2023 2024 static int 2025 populate_lr_context(struct i915_gem_context *ctx, 2026 struct drm_i915_gem_object *ctx_obj, 2027 struct intel_engine_cs *engine, 2028 struct intel_ring *ring) 2029 { 2030 void *vaddr; 2031 int ret; 2032 2033 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); 2034 if (ret) { 2035 DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); 2036 return ret; 2037 } 2038 2039 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 2040 if (IS_ERR(vaddr)) { 2041 ret = PTR_ERR(vaddr); 2042 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); 2043 return ret; 2044 } 2045 ctx_obj->dirty = true; 2046 2047 /* The second page of the context object contains some fields which must 2048 * be set up prior to the first execution. */ 2049 2050 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, 2051 ctx, engine, ring); 2052 2053 i915_gem_object_unpin_map(ctx_obj); 2054 2055 return 0; 2056 } 2057 2058 /** 2059 * intel_lr_context_size() - return the size of the context for an engine 2060 * @engine: which engine to find the context size for 2061 * 2062 * Each engine may require a different amount of space for a context image, 2063 * so when allocating (or copying) an image, this function can be used to 2064 * find the right size for the specific engine. 2065 * 2066 * Return: size (in bytes) of an engine-specific context image 2067 * 2068 * Note: this size includes the HWSP, which is part of the context image 2069 * in LRC mode, but does not include the "shared data page" used with 2070 * GuC submission. The caller should account for this if using the GuC. 2071 */ 2072 uint32_t intel_lr_context_size(struct intel_engine_cs *engine) 2073 { 2074 int ret = 0; 2075 2076 WARN_ON(INTEL_GEN(engine->i915) < 8); 2077 2078 switch (engine->id) { 2079 case RCS: 2080 if (INTEL_GEN(engine->i915) >= 9) 2081 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2082 else 2083 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2084 break; 2085 case VCS: 2086 case BCS: 2087 case VECS: 2088 case VCS2: 2089 ret = GEN8_LR_CONTEXT_OTHER_SIZE; 2090 break; 2091 } 2092 2093 return ret; 2094 } 2095 2096 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 2097 struct intel_engine_cs *engine) 2098 { 2099 struct drm_i915_gem_object *ctx_obj; 2100 struct intel_context *ce = &ctx->engine[engine->id]; 2101 struct i915_vma *vma; 2102 uint32_t context_size; 2103 struct intel_ring *ring; 2104 int ret; 2105 2106 WARN_ON(ce->state); 2107 2108 context_size = round_up(intel_lr_context_size(engine), 4096); 2109 2110 /* One extra page as the sharing data between driver and GuC */ 2111 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2112 2113 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); 2114 if (IS_ERR(ctx_obj)) { 2115 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2116 return PTR_ERR(ctx_obj); 2117 } 2118 2119 vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); 2120 if (IS_ERR(vma)) { 2121 ret = PTR_ERR(vma); 2122 goto error_deref_obj; 2123 } 2124 2125 ring = intel_engine_create_ring(engine, ctx->ring_size); 2126 if (IS_ERR(ring)) { 2127 ret = PTR_ERR(ring); 2128 goto error_deref_obj; 2129 } 2130 2131 ret = populate_lr_context(ctx, ctx_obj, engine, ring); 2132 if (ret) { 2133 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2134 goto error_ring_free; 2135 } 2136 2137 ce->ring = ring; 2138 ce->state = vma; 2139 ce->initialised = engine->init_context == NULL; 2140 2141 return 0; 2142 2143 error_ring_free: 2144 intel_ring_free(ring); 2145 error_deref_obj: 2146 i915_gem_object_put(ctx_obj); 2147 return ret; 2148 } 2149 2150 void intel_lr_context_resume(struct drm_i915_private *dev_priv) 2151 { 2152 struct intel_engine_cs *engine; 2153 struct i915_gem_context *ctx; 2154 enum intel_engine_id id; 2155 2156 /* Because we emit WA_TAIL_DWORDS there may be a disparity 2157 * between our bookkeeping in ce->ring->head and ce->ring->tail and 2158 * that stored in context. As we only write new commands from 2159 * ce->ring->tail onwards, everything before that is junk. If the GPU 2160 * starts reading from its RING_HEAD from the context, it may try to 2161 * execute that junk and die. 2162 * 2163 * So to avoid that we reset the context images upon resume. For 2164 * simplicity, we just zero everything out. 2165 */ 2166 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2167 for_each_engine(engine, dev_priv, id) { 2168 struct intel_context *ce = &ctx->engine[engine->id]; 2169 u32 *reg; 2170 2171 if (!ce->state) 2172 continue; 2173 2174 reg = i915_gem_object_pin_map(ce->state->obj, 2175 I915_MAP_WB); 2176 if (WARN_ON(IS_ERR(reg))) 2177 continue; 2178 2179 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg); 2180 reg[CTX_RING_HEAD+1] = 0; 2181 reg[CTX_RING_TAIL+1] = 0; 2182 2183 ce->state->obj->dirty = true; 2184 i915_gem_object_unpin_map(ce->state->obj); 2185 2186 ce->ring->head = ce->ring->tail = 0; 2187 ce->ring->last_retired_head = -1; 2188 intel_ring_update_space(ce->ring); 2189 } 2190 } 2191 } 2192