1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include <drm/drmP.h> 137 #include <drm/i915_drm.h> 138 #include "i915_drv.h" 139 #include "intel_mocs.h" 140 141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) 144 145 #define RING_EXECLIST_QFULL (1 << 0x2) 146 #define RING_EXECLIST1_VALID (1 << 0x3) 147 #define RING_EXECLIST0_VALID (1 << 0x4) 148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 149 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 150 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 151 152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 158 159 #define CTX_LRI_HEADER_0 0x01 160 #define CTX_CONTEXT_CONTROL 0x02 161 #define CTX_RING_HEAD 0x04 162 #define CTX_RING_TAIL 0x06 163 #define CTX_RING_BUFFER_START 0x08 164 #define CTX_RING_BUFFER_CONTROL 0x0a 165 #define CTX_BB_HEAD_U 0x0c 166 #define CTX_BB_HEAD_L 0x0e 167 #define CTX_BB_STATE 0x10 168 #define CTX_SECOND_BB_HEAD_U 0x12 169 #define CTX_SECOND_BB_HEAD_L 0x14 170 #define CTX_SECOND_BB_STATE 0x16 171 #define CTX_BB_PER_CTX_PTR 0x18 172 #define CTX_RCS_INDIRECT_CTX 0x1a 173 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c 174 #define CTX_LRI_HEADER_1 0x21 175 #define CTX_CTX_TIMESTAMP 0x22 176 #define CTX_PDP3_UDW 0x24 177 #define CTX_PDP3_LDW 0x26 178 #define CTX_PDP2_UDW 0x28 179 #define CTX_PDP2_LDW 0x2a 180 #define CTX_PDP1_UDW 0x2c 181 #define CTX_PDP1_LDW 0x2e 182 #define CTX_PDP0_UDW 0x30 183 #define CTX_PDP0_LDW 0x32 184 #define CTX_LRI_HEADER_2 0x41 185 #define CTX_R_PWR_CLK_STATE 0x42 186 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 187 188 #define GEN8_CTX_VALID (1<<0) 189 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1) 190 #define GEN8_CTX_FORCE_RESTORE (1<<2) 191 #define GEN8_CTX_L3LLC_COHERENT (1<<5) 192 #define GEN8_CTX_PRIVILEGE (1<<8) 193 194 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \ 195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \ 196 (reg_state)[(pos)+1] = (val); \ 197 } while (0) 198 199 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ 200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ 201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ 202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 203 } while (0) 204 205 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ 206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ 207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ 208 } while (0) 209 210 enum { 211 ADVANCED_CONTEXT = 0, 212 LEGACY_32B_CONTEXT, 213 ADVANCED_AD_CONTEXT, 214 LEGACY_64B_CONTEXT 215 }; 216 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 217 #define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ 218 LEGACY_64B_CONTEXT :\ 219 LEGACY_32B_CONTEXT) 220 enum { 221 FAULT_AND_HANG = 0, 222 FAULT_AND_HALT, /* Debug only */ 223 FAULT_AND_STREAM, 224 FAULT_AND_CONTINUE /* Unsupported */ 225 }; 226 #define GEN8_CTX_ID_SHIFT 32 227 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 228 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 229 230 static int intel_lr_context_pin(struct intel_context *ctx, 231 struct intel_engine_cs *engine); 232 233 /** 234 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 235 * @dev: DRM device. 236 * @enable_execlists: value of i915.enable_execlists module parameter. 237 * 238 * Only certain platforms support Execlists (the prerequisites being 239 * support for Logical Ring Contexts and Aliasing PPGTT or better). 240 * 241 * Return: 1 if Execlists is supported and has to be enabled. 242 */ 243 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) 244 { 245 WARN_ON(i915.enable_ppgtt == -1); 246 247 /* On platforms with execlist available, vGPU will only 248 * support execlist mode, no ring buffer mode. 249 */ 250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) 251 return 1; 252 253 if (INTEL_INFO(dev)->gen >= 9) 254 return 1; 255 256 if (enable_execlists == 0) 257 return 0; 258 259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && 260 i915.use_mmio_flip >= 0) 261 return 1; 262 263 return 0; 264 } 265 266 static void 267 logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 268 { 269 struct drm_device *dev = engine->dev; 270 271 if (IS_GEN8(dev) || IS_GEN9(dev)) 272 engine->idle_lite_restore_wa = ~0; 273 274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 276 (engine->id == VCS || engine->id == VCS2); 277 278 engine->ctx_desc_template = GEN8_CTX_VALID; 279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 280 GEN8_CTX_ADDRESSING_MODE_SHIFT; 281 if (IS_GEN8(dev)) 282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 284 285 /* TODO: WaDisableLiteRestore when we start using semaphore 286 * signalling between Command Streamers */ 287 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ 288 289 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 290 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ 291 if (engine->disable_lite_restore_wa) 292 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; 293 } 294 295 /** 296 * intel_lr_context_descriptor_update() - calculate & cache the descriptor 297 * descriptor for a pinned context 298 * 299 * @ctx: Context to work on 300 * @ring: Engine the descriptor will be used with 301 * 302 * The context descriptor encodes various attributes of a context, 303 * including its GTT address and some flags. Because it's fairly 304 * expensive to calculate, we'll just do it once and cache the result, 305 * which remains valid until the context is unpinned. 306 * 307 * This is what a descriptor looks like, from LSB to MSB: 308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) 311 * bits 52-63: reserved, may encode the engine ID (for GuC) 312 */ 313 static void 314 intel_lr_context_descriptor_update(struct intel_context *ctx, 315 struct intel_engine_cs *engine) 316 { 317 uint64_t lrca, desc; 318 319 lrca = ctx->engine[engine->id].lrc_vma->node.start + 320 LRC_PPHWSP_PN * PAGE_SIZE; 321 322 desc = engine->ctx_desc_template; /* bits 0-11 */ 323 desc |= lrca; /* bits 12-31 */ 324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 325 326 ctx->engine[engine->id].lrc_desc = desc; 327 } 328 329 uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 330 struct intel_engine_cs *engine) 331 { 332 return ctx->engine[engine->id].lrc_desc; 333 } 334 335 /** 336 * intel_execlists_ctx_id() - get the Execlists Context ID 337 * @ctx: Context to get the ID for 338 * @ring: Engine to get the ID for 339 * 340 * Do not confuse with ctx->id! Unfortunately we have a name overload 341 * here: the old context ID we pass to userspace as a handler so that 342 * they can refer to a context, and the new context ID we pass to the 343 * ELSP so that the GPU can inform us of the context status via 344 * interrupts. 345 * 346 * The context ID is a portion of the context descriptor, so we can 347 * just extract the required part from the cached descriptor. 348 * 349 * Return: 20-bits globally unique context ID. 350 */ 351 u32 intel_execlists_ctx_id(struct intel_context *ctx, 352 struct intel_engine_cs *engine) 353 { 354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT; 355 } 356 357 static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 358 struct drm_i915_gem_request *rq1) 359 { 360 361 struct intel_engine_cs *engine = rq0->engine; 362 struct drm_device *dev = engine->dev; 363 struct drm_i915_private *dev_priv = dev->dev_private; 364 uint64_t desc[2]; 365 366 if (rq1) { 367 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine); 368 rq1->elsp_submitted++; 369 } else { 370 desc[1] = 0; 371 } 372 373 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine); 374 rq0->elsp_submitted++; 375 376 /* You must always write both descriptors in the order below. */ 377 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1])); 378 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1])); 379 380 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0])); 381 /* The context is automatically loaded after the following */ 382 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0])); 383 384 /* ELSP is a wo register, use another nearby reg for posting */ 385 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine)); 386 } 387 388 static void 389 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) 390 { 391 ASSIGN_CTX_PDP(ppgtt, reg_state, 3); 392 ASSIGN_CTX_PDP(ppgtt, reg_state, 2); 393 ASSIGN_CTX_PDP(ppgtt, reg_state, 1); 394 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 395 } 396 397 static void execlists_update_context(struct drm_i915_gem_request *rq) 398 { 399 struct intel_engine_cs *engine = rq->engine; 400 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; 401 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; 402 403 reg_state[CTX_RING_TAIL+1] = rq->tail; 404 405 /* True 32b PPGTT with dynamic page allocation: update PDP 406 * registers and point the unallocated PDPs to scratch page. 407 * PML4 is allocated during ppgtt init, so this is not needed 408 * in 48-bit mode. 409 */ 410 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 411 execlists_update_context_pdps(ppgtt, reg_state); 412 } 413 414 static void execlists_submit_requests(struct drm_i915_gem_request *rq0, 415 struct drm_i915_gem_request *rq1) 416 { 417 struct drm_i915_private *dev_priv = rq0->i915; 418 unsigned int fw_domains = rq0->engine->fw_domains; 419 420 execlists_update_context(rq0); 421 422 if (rq1) 423 execlists_update_context(rq1); 424 425 spin_lock_irq(&dev_priv->uncore.lock); 426 intel_uncore_forcewake_get__locked(dev_priv, fw_domains); 427 428 execlists_elsp_write(rq0, rq1); 429 430 intel_uncore_forcewake_put__locked(dev_priv, fw_domains); 431 spin_unlock_irq(&dev_priv->uncore.lock); 432 } 433 434 static void execlists_context_unqueue(struct intel_engine_cs *engine) 435 { 436 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; 437 struct drm_i915_gem_request *cursor, *tmp; 438 439 assert_spin_locked(&engine->execlist_lock); 440 441 /* 442 * If irqs are not active generate a warning as batches that finish 443 * without the irqs may get lost and a GPU Hang may occur. 444 */ 445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); 446 447 /* Try to read in pairs */ 448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, 449 execlist_link) { 450 if (!req0) { 451 req0 = cursor; 452 } else if (req0->ctx == cursor->ctx) { 453 /* Same ctx: ignore first request, as second request 454 * will update tail past first request's workload */ 455 cursor->elsp_submitted = req0->elsp_submitted; 456 list_move_tail(&req0->execlist_link, 457 &engine->execlist_retired_req_list); 458 req0 = cursor; 459 } else { 460 req1 = cursor; 461 WARN_ON(req1->elsp_submitted); 462 break; 463 } 464 } 465 466 if (unlikely(!req0)) 467 return; 468 469 if (req0->elsp_submitted & engine->idle_lite_restore_wa) { 470 /* 471 * WaIdleLiteRestore: make sure we never cause a lite restore 472 * with HEAD==TAIL. 473 * 474 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we 475 * resubmit the request. See gen8_emit_request() for where we 476 * prepare the padding after the end of the request. 477 */ 478 struct intel_ringbuffer *ringbuf; 479 480 ringbuf = req0->ctx->engine[engine->id].ringbuf; 481 req0->tail += 8; 482 req0->tail &= ringbuf->size - 1; 483 } 484 485 execlists_submit_requests(req0, req1); 486 } 487 488 static unsigned int 489 execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) 490 { 491 struct drm_i915_gem_request *head_req; 492 493 assert_spin_locked(&engine->execlist_lock); 494 495 head_req = list_first_entry_or_null(&engine->execlist_queue, 496 struct drm_i915_gem_request, 497 execlist_link); 498 499 if (!head_req) 500 return 0; 501 502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) 503 return 0; 504 505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 506 507 if (--head_req->elsp_submitted > 0) 508 return 0; 509 510 list_move_tail(&head_req->execlist_link, 511 &engine->execlist_retired_req_list); 512 513 return 1; 514 } 515 516 static u32 517 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, 518 u32 *context_id) 519 { 520 struct drm_i915_private *dev_priv = engine->dev->dev_private; 521 u32 status; 522 523 read_pointer %= GEN8_CSB_ENTRIES; 524 525 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer)); 526 527 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 528 return 0; 529 530 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine, 531 read_pointer)); 532 533 return status; 534 } 535 536 /** 537 * intel_lrc_irq_handler() - handle Context Switch interrupts 538 * @engine: Engine Command Streamer to handle. 539 * 540 * Check the unread Context Status Buffers and manage the submission of new 541 * contexts to the ELSP accordingly. 542 */ 543 static void intel_lrc_irq_handler(unsigned long data) 544 { 545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 546 struct drm_i915_private *dev_priv = engine->dev->dev_private; 547 u32 status_pointer; 548 unsigned int read_pointer, write_pointer; 549 u32 csb[GEN8_CSB_ENTRIES][2]; 550 unsigned int csb_read = 0, i; 551 unsigned int submit_contexts = 0; 552 553 intel_uncore_forcewake_get(dev_priv, engine->fw_domains); 554 555 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine)); 556 557 read_pointer = engine->next_context_status_buffer; 558 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 559 if (read_pointer > write_pointer) 560 write_pointer += GEN8_CSB_ENTRIES; 561 562 while (read_pointer < write_pointer) { 563 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES)) 564 break; 565 csb[csb_read][0] = get_context_status(engine, ++read_pointer, 566 &csb[csb_read][1]); 567 csb_read++; 568 } 569 570 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; 571 572 /* Update the read pointer to the old write pointer. Manual ringbuffer 573 * management ftw </sarcasm> */ 574 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine), 575 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 576 engine->next_context_status_buffer << 8)); 577 578 intel_uncore_forcewake_put(dev_priv, engine->fw_domains); 579 580 lockmgr(&engine->execlist_lock, LK_EXCLUSIVE); 581 582 for (i = 0; i < csb_read; i++) { 583 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) { 584 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) { 585 if (execlists_check_remove_request(engine, csb[i][1])) 586 WARN(1, "Lite Restored request removed from queue\n"); 587 } else 588 WARN(1, "Preemption without Lite Restore\n"); 589 } 590 591 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE | 592 GEN8_CTX_STATUS_ELEMENT_SWITCH)) 593 submit_contexts += 594 execlists_check_remove_request(engine, csb[i][1]); 595 } 596 597 if (submit_contexts) { 598 if (!engine->disable_lite_restore_wa || 599 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE)) 600 execlists_context_unqueue(engine); 601 } 602 603 lockmgr(&engine->execlist_lock, LK_RELEASE); 604 605 if (unlikely(submit_contexts > 2)) 606 DRM_ERROR("More than two context complete events?\n"); 607 } 608 609 static void execlists_context_queue(struct drm_i915_gem_request *request) 610 { 611 struct intel_engine_cs *engine = request->engine; 612 struct drm_i915_gem_request *cursor; 613 int num_elements = 0; 614 615 if (request->ctx != request->i915->kernel_context) 616 intel_lr_context_pin(request->ctx, engine); 617 618 i915_gem_request_reference(request); 619 620 spin_lock_bh(&engine->execlist_lock); 621 622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) 623 if (++num_elements > 2) 624 break; 625 626 if (num_elements > 2) { 627 struct drm_i915_gem_request *tail_req; 628 629 tail_req = list_last_entry(&engine->execlist_queue, 630 struct drm_i915_gem_request, 631 execlist_link); 632 633 if (request->ctx == tail_req->ctx) { 634 WARN(tail_req->elsp_submitted != 0, 635 "More than 2 already-submitted reqs queued\n"); 636 list_move_tail(&tail_req->execlist_link, 637 &engine->execlist_retired_req_list); 638 } 639 } 640 641 list_add_tail(&request->execlist_link, &engine->execlist_queue); 642 if (num_elements == 0) 643 execlists_context_unqueue(engine); 644 645 spin_unlock_bh(&engine->execlist_lock); 646 } 647 648 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 649 { 650 struct intel_engine_cs *engine = req->engine; 651 uint32_t flush_domains; 652 int ret; 653 654 flush_domains = 0; 655 if (engine->gpu_caches_dirty) 656 flush_domains = I915_GEM_GPU_DOMAINS; 657 658 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 659 if (ret) 660 return ret; 661 662 engine->gpu_caches_dirty = false; 663 return 0; 664 } 665 666 static int execlists_move_to_gpu(struct drm_i915_gem_request *req, 667 struct list_head *vmas) 668 { 669 const unsigned other_rings = ~intel_engine_flag(req->engine); 670 struct i915_vma *vma; 671 uint32_t flush_domains = 0; 672 bool flush_chipset = false; 673 int ret; 674 675 list_for_each_entry(vma, vmas, exec_list) { 676 struct drm_i915_gem_object *obj = vma->obj; 677 678 if (obj->active & other_rings) { 679 ret = i915_gem_object_sync(obj, req->engine, &req); 680 if (ret) 681 return ret; 682 } 683 684 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 685 flush_chipset |= i915_gem_clflush_object(obj, false); 686 687 flush_domains |= obj->base.write_domain; 688 } 689 690 if (flush_domains & I915_GEM_DOMAIN_GTT) 691 wmb(); 692 693 /* Unconditionally invalidate gpu caches and ensure that we do flush 694 * any residual writes from the previous batch. 695 */ 696 return logical_ring_invalidate_all_caches(req); 697 } 698 699 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 700 { 701 int ret = 0; 702 703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; 704 705 if (i915.enable_guc_submission) { 706 /* 707 * Check that the GuC has space for the request before 708 * going any further, as the i915_add_request() call 709 * later on mustn't fail ... 710 */ 711 struct intel_guc *guc = &request->i915->guc; 712 713 ret = i915_guc_wq_check_space(guc->execbuf_client); 714 if (ret) 715 return ret; 716 } 717 718 if (request->ctx != request->i915->kernel_context) 719 ret = intel_lr_context_pin(request->ctx, request->engine); 720 721 return ret; 722 } 723 724 /* 725 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload 726 * @request: Request to advance the logical ringbuffer of. 727 * 728 * The tail is updated in our logical ringbuffer struct, not in the actual context. What 729 * really happens during submission is that the context and current tail will be placed 730 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that 731 * point, the tail *inside* the context is updated and the ELSP written to. 732 */ 733 static int 734 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 735 { 736 struct intel_ringbuffer *ringbuf = request->ringbuf; 737 struct drm_i915_private *dev_priv = request->i915; 738 struct intel_engine_cs *engine = request->engine; 739 740 intel_logical_ring_advance(ringbuf); 741 request->tail = ringbuf->tail; 742 743 /* 744 * Here we add two extra NOOPs as padding to avoid 745 * lite restore of a context with HEAD==TAIL. 746 * 747 * Caller must reserve WA_TAIL_DWORDS for us! 748 */ 749 intel_logical_ring_emit(ringbuf, MI_NOOP); 750 intel_logical_ring_emit(ringbuf, MI_NOOP); 751 intel_logical_ring_advance(ringbuf); 752 753 if (intel_engine_stopped(engine)) 754 return 0; 755 756 if (engine->last_context != request->ctx) { 757 if (engine->last_context) 758 intel_lr_context_unpin(engine->last_context, engine); 759 if (request->ctx != request->i915->kernel_context) { 760 intel_lr_context_pin(request->ctx, engine); 761 engine->last_context = request->ctx; 762 } else { 763 engine->last_context = NULL; 764 } 765 } 766 767 if (dev_priv->guc.execbuf_client) 768 i915_guc_submit(dev_priv->guc.execbuf_client, request); 769 else 770 execlists_context_queue(request); 771 772 return 0; 773 } 774 775 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) 776 { 777 /* 778 * The first call merely notes the reserve request and is common for 779 * all back ends. The subsequent localised _begin() call actually 780 * ensures that the reservation is available. Without the begin, if 781 * the request creator immediately submitted the request without 782 * adding any commands to it then there might not actually be 783 * sufficient room for the submission commands. 784 */ 785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 786 787 return intel_ring_begin(request, 0); 788 } 789 790 /** 791 * execlists_submission() - submit a batchbuffer for execution, Execlists style 792 * @dev: DRM device. 793 * @file: DRM file. 794 * @ring: Engine Command Streamer to submit to. 795 * @ctx: Context to employ for this submission. 796 * @args: execbuffer call arguments. 797 * @vmas: list of vmas. 798 * @batch_obj: the batchbuffer to submit. 799 * @exec_start: batchbuffer start virtual address pointer. 800 * @dispatch_flags: translated execbuffer call flags. 801 * 802 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 803 * away the submission details of the execbuffer ioctl call. 804 * 805 * Return: non-zero if the submission fails. 806 */ 807 int intel_execlists_submission(struct i915_execbuffer_params *params, 808 struct drm_i915_gem_execbuffer2 *args, 809 struct list_head *vmas) 810 { 811 struct drm_device *dev = params->dev; 812 struct intel_engine_cs *engine = params->engine; 813 struct drm_i915_private *dev_priv = dev->dev_private; 814 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; 815 u64 exec_start; 816 int instp_mode; 817 u32 instp_mask; 818 int ret; 819 820 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; 821 instp_mask = I915_EXEC_CONSTANTS_MASK; 822 switch (instp_mode) { 823 case I915_EXEC_CONSTANTS_REL_GENERAL: 824 case I915_EXEC_CONSTANTS_ABSOLUTE: 825 case I915_EXEC_CONSTANTS_REL_SURFACE: 826 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { 827 DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); 828 return -EINVAL; 829 } 830 831 if (instp_mode != dev_priv->relative_constants_mode) { 832 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { 833 DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); 834 return -EINVAL; 835 } 836 837 /* The HW changed the meaning on this bit on gen6 */ 838 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; 839 } 840 break; 841 default: 842 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); 843 return -EINVAL; 844 } 845 846 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 847 DRM_DEBUG("sol reset is gen7 only\n"); 848 return -EINVAL; 849 } 850 851 ret = execlists_move_to_gpu(params->request, vmas); 852 if (ret) 853 return ret; 854 855 if (engine == &dev_priv->engine[RCS] && 856 instp_mode != dev_priv->relative_constants_mode) { 857 ret = intel_ring_begin(params->request, 4); 858 if (ret) 859 return ret; 860 861 intel_logical_ring_emit(ringbuf, MI_NOOP); 862 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); 863 intel_logical_ring_emit_reg(ringbuf, INSTPM); 864 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); 865 intel_logical_ring_advance(ringbuf); 866 867 dev_priv->relative_constants_mode = instp_mode; 868 } 869 870 exec_start = params->batch_obj_vm_offset + 871 args->batch_start_offset; 872 873 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags); 874 if (ret) 875 return ret; 876 877 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); 878 879 i915_gem_execbuffer_move_to_active(vmas, params->request); 880 881 return 0; 882 } 883 884 void intel_execlists_retire_requests(struct intel_engine_cs *engine) 885 { 886 struct drm_i915_gem_request *req, *tmp; 887 struct list_head retired_list; 888 889 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 890 if (list_empty(&engine->execlist_retired_req_list)) 891 return; 892 893 INIT_LIST_HEAD(&retired_list); 894 spin_lock_bh(&engine->execlist_lock); 895 list_replace_init(&engine->execlist_retired_req_list, &retired_list); 896 spin_unlock_bh(&engine->execlist_lock); 897 898 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 899 struct intel_context *ctx = req->ctx; 900 struct drm_i915_gem_object *ctx_obj = 901 ctx->engine[engine->id].state; 902 903 if (ctx_obj && (ctx != req->i915->kernel_context)) 904 intel_lr_context_unpin(ctx, engine); 905 906 list_del(&req->execlist_link); 907 i915_gem_request_unreference(req); 908 } 909 } 910 911 void intel_logical_ring_stop(struct intel_engine_cs *engine) 912 { 913 struct drm_i915_private *dev_priv = engine->dev->dev_private; 914 int ret; 915 916 if (!intel_engine_initialized(engine)) 917 return; 918 919 ret = intel_engine_idle(engine); 920 if (ret) 921 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 922 engine->name, ret); 923 924 /* TODO: Is this correct with Execlists enabled? */ 925 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 926 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 927 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); 928 return; 929 } 930 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 931 } 932 933 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) 934 { 935 struct intel_engine_cs *engine = req->engine; 936 int ret; 937 938 if (!engine->gpu_caches_dirty) 939 return 0; 940 941 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); 942 if (ret) 943 return ret; 944 945 engine->gpu_caches_dirty = false; 946 return 0; 947 } 948 949 static int intel_lr_context_do_pin(struct intel_context *ctx, 950 struct intel_engine_cs *engine) 951 { 952 struct drm_device *dev = engine->dev; 953 struct drm_i915_private *dev_priv = dev->dev_private; 954 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 955 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; 956 char *vaddr; 957 u32 *lrc_reg_state; 958 int ret; 959 960 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 961 962 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 963 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 964 if (ret) 965 return ret; 966 967 vaddr = i915_gem_object_pin_map(ctx_obj); 968 if (IS_ERR(vaddr)) { 969 ret = PTR_ERR(vaddr); 970 goto unpin_ctx_obj; 971 } 972 973 lrc_reg_state = (u32 *)(vaddr + LRC_STATE_PN * PAGE_SIZE); 974 975 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); 976 if (ret) 977 goto unpin_map; 978 979 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 980 intel_lr_context_descriptor_update(ctx, engine); 981 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 982 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; 983 ctx_obj->dirty = true; 984 985 /* Invalidate GuC TLB. */ 986 if (i915.enable_guc_submission) 987 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 988 989 return ret; 990 991 unpin_map: 992 i915_gem_object_unpin_map(ctx_obj); 993 unpin_ctx_obj: 994 i915_gem_object_ggtt_unpin(ctx_obj); 995 996 return ret; 997 } 998 999 static int intel_lr_context_pin(struct intel_context *ctx, 1000 struct intel_engine_cs *engine) 1001 { 1002 int ret = 0; 1003 1004 if (ctx->engine[engine->id].pin_count++ == 0) { 1005 ret = intel_lr_context_do_pin(ctx, engine); 1006 if (ret) 1007 goto reset_pin_count; 1008 1009 i915_gem_context_reference(ctx); 1010 } 1011 return ret; 1012 1013 reset_pin_count: 1014 ctx->engine[engine->id].pin_count = 0; 1015 return ret; 1016 } 1017 1018 void intel_lr_context_unpin(struct intel_context *ctx, 1019 struct intel_engine_cs *engine) 1020 { 1021 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 1022 1023 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1024 if (--ctx->engine[engine->id].pin_count == 0) { 1025 i915_gem_object_unpin_map(ctx_obj); 1026 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); 1027 i915_gem_object_ggtt_unpin(ctx_obj); 1028 ctx->engine[engine->id].lrc_vma = NULL; 1029 ctx->engine[engine->id].lrc_desc = 0; 1030 ctx->engine[engine->id].lrc_reg_state = NULL; 1031 1032 i915_gem_context_unreference(ctx); 1033 } 1034 } 1035 1036 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 1037 { 1038 int ret, i; 1039 struct intel_engine_cs *engine = req->engine; 1040 struct intel_ringbuffer *ringbuf = req->ringbuf; 1041 struct drm_device *dev = engine->dev; 1042 struct drm_i915_private *dev_priv = dev->dev_private; 1043 struct i915_workarounds *w = &dev_priv->workarounds; 1044 1045 if (w->count == 0) 1046 return 0; 1047 1048 engine->gpu_caches_dirty = true; 1049 ret = logical_ring_flush_all_caches(req); 1050 if (ret) 1051 return ret; 1052 1053 ret = intel_ring_begin(req, w->count * 2 + 2); 1054 if (ret) 1055 return ret; 1056 1057 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); 1058 for (i = 0; i < w->count; i++) { 1059 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr); 1060 intel_logical_ring_emit(ringbuf, w->reg[i].value); 1061 } 1062 intel_logical_ring_emit(ringbuf, MI_NOOP); 1063 1064 intel_logical_ring_advance(ringbuf); 1065 1066 engine->gpu_caches_dirty = true; 1067 ret = logical_ring_flush_all_caches(req); 1068 if (ret) 1069 return ret; 1070 1071 return 0; 1072 } 1073 1074 #define wa_ctx_emit(batch, index, cmd) \ 1075 do { \ 1076 int __index = (index)++; \ 1077 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \ 1078 return -ENOSPC; \ 1079 } \ 1080 batch[__index] = (cmd); \ 1081 } while (0) 1082 1083 #define wa_ctx_emit_reg(batch, index, reg) \ 1084 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) 1085 1086 /* 1087 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1088 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 1089 * but there is a slight complication as this is applied in WA batch where the 1090 * values are only initialized once so we cannot take register value at the 1091 * beginning and reuse it further; hence we save its value to memory, upload a 1092 * constant value with bit21 set and then we restore it back with the saved value. 1093 * To simplify the WA, a constant value is formed by using the default value 1094 * of this register. This shouldn't be a problem because we are only modifying 1095 * it for a short period and this batch in non-premptible. We can ofcourse 1096 * use additional instructions that read the actual value of the register 1097 * at that time and set our bit of interest but it makes the WA complicated. 1098 * 1099 * This WA is also required for Gen9 so extracting as a function avoids 1100 * code duplication. 1101 */ 1102 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, 1103 uint32_t *const batch, 1104 uint32_t index) 1105 { 1106 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1107 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 1108 1109 /* 1110 * WaDisableLSQCROPERFforOCL:skl,kbl 1111 * This WA is implemented in skl_init_clock_gating() but since 1112 * this batch updates GEN8_L3SQCREG4 with default value we need to 1113 * set this bit here to retain the WA during flush. 1114 */ 1115 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) || 1116 IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1117 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1118 1119 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1120 MI_SRM_LRM_GLOBAL_GTT)); 1121 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1122 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); 1123 wa_ctx_emit(batch, index, 0); 1124 1125 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1126 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1127 wa_ctx_emit(batch, index, l3sqc4_flush); 1128 1129 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1130 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL | 1131 PIPE_CONTROL_DC_FLUSH_ENABLE)); 1132 wa_ctx_emit(batch, index, 0); 1133 wa_ctx_emit(batch, index, 0); 1134 wa_ctx_emit(batch, index, 0); 1135 wa_ctx_emit(batch, index, 0); 1136 1137 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1138 MI_SRM_LRM_GLOBAL_GTT)); 1139 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1140 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); 1141 wa_ctx_emit(batch, index, 0); 1142 1143 return index; 1144 } 1145 1146 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx, 1147 uint32_t offset, 1148 uint32_t start_alignment) 1149 { 1150 return wa_ctx->offset = ALIGN(offset, start_alignment); 1151 } 1152 1153 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, 1154 uint32_t offset, 1155 uint32_t size_alignment) 1156 { 1157 wa_ctx->size = offset - wa_ctx->offset; 1158 1159 WARN(wa_ctx->size % size_alignment, 1160 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n", 1161 wa_ctx->size, size_alignment); 1162 return 0; 1163 } 1164 1165 /** 1166 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA 1167 * 1168 * @ring: only applicable for RCS 1169 * @wa_ctx: structure representing wa_ctx 1170 * offset: specifies start of the batch, should be cache-aligned. This is updated 1171 * with the offset value received as input. 1172 * size: size of the batch in DWORDS but HW expects in terms of cachelines 1173 * @batch: page in which WA are loaded 1174 * @offset: This field specifies the start of the batch, it should be 1175 * cache-aligned otherwise it is adjusted accordingly. 1176 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 1177 * initialized at the beginning and shared across all contexts but this field 1178 * helps us to have multiple batches at different offsets and select them based 1179 * on a criteria. At the moment this batch always start at the beginning of the page 1180 * and at this point we don't have multiple wa_ctx batch buffers. 1181 * 1182 * The number of WA applied are not known at the beginning; we use this field 1183 * to return the no of DWORDS written. 1184 * 1185 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 1186 * so it adds NOOPs as padding to make it cacheline aligned. 1187 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 1188 * makes a complete batch buffer. 1189 * 1190 * Return: non-zero if we exceed the PAGE_SIZE limit. 1191 */ 1192 1193 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, 1194 struct i915_wa_ctx_bb *wa_ctx, 1195 uint32_t *const batch, 1196 uint32_t *offset) 1197 { 1198 uint32_t scratch_addr; 1199 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1200 1201 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1202 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1203 1204 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1205 if (IS_BROADWELL(engine->dev)) { 1206 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1207 if (rc < 0) 1208 return rc; 1209 index = rc; 1210 } 1211 1212 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1213 /* Actual scratch location is at 128 bytes offset */ 1214 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; 1215 1216 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1217 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1218 PIPE_CONTROL_GLOBAL_GTT_IVB | 1219 PIPE_CONTROL_CS_STALL | 1220 PIPE_CONTROL_QW_WRITE)); 1221 wa_ctx_emit(batch, index, scratch_addr); 1222 wa_ctx_emit(batch, index, 0); 1223 wa_ctx_emit(batch, index, 0); 1224 wa_ctx_emit(batch, index, 0); 1225 1226 /* Pad to end of cacheline */ 1227 while (index % CACHELINE_DWORDS) 1228 wa_ctx_emit(batch, index, MI_NOOP); 1229 1230 /* 1231 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 1232 * execution depends on the length specified in terms of cache lines 1233 * in the register CTX_RCS_INDIRECT_CTX 1234 */ 1235 1236 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1237 } 1238 1239 /** 1240 * gen8_init_perctx_bb() - initialize per ctx batch with WA 1241 * 1242 * @ring: only applicable for RCS 1243 * @wa_ctx: structure representing wa_ctx 1244 * offset: specifies start of the batch, should be cache-aligned. 1245 * size: size of the batch in DWORDS but HW expects in terms of cachelines 1246 * @batch: page in which WA are loaded 1247 * @offset: This field specifies the start of this batch. 1248 * This batch is started immediately after indirect_ctx batch. Since we ensure 1249 * that indirect_ctx ends on a cacheline this batch is aligned automatically. 1250 * 1251 * The number of DWORDS written are returned using this field. 1252 * 1253 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding 1254 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. 1255 */ 1256 static int gen8_init_perctx_bb(struct intel_engine_cs *engine, 1257 struct i915_wa_ctx_bb *wa_ctx, 1258 uint32_t *const batch, 1259 uint32_t *offset) 1260 { 1261 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1262 1263 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1264 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1265 1266 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1267 1268 return wa_ctx_end(wa_ctx, *offset = index, 1); 1269 } 1270 1271 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, 1272 struct i915_wa_ctx_bb *wa_ctx, 1273 uint32_t *const batch, 1274 uint32_t *offset) 1275 { 1276 int ret; 1277 struct drm_device *dev = engine->dev; 1278 struct drm_i915_private *dev_priv = dev->dev_private; 1279 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1280 1281 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1282 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1283 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1284 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1285 1286 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1287 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1288 if (ret < 0) 1289 return ret; 1290 index = ret; 1291 1292 /* WaClearSlmSpaceAtContextSwitch:kbl */ 1293 /* Actual scratch location is at 128 bytes offset */ 1294 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { 1295 uint32_t scratch_addr 1296 = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; 1297 1298 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1299 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1300 PIPE_CONTROL_GLOBAL_GTT_IVB | 1301 PIPE_CONTROL_CS_STALL | 1302 PIPE_CONTROL_QW_WRITE)); 1303 wa_ctx_emit(batch, index, scratch_addr); 1304 wa_ctx_emit(batch, index, 0); 1305 wa_ctx_emit(batch, index, 0); 1306 wa_ctx_emit(batch, index, 0); 1307 } 1308 /* Pad to end of cacheline */ 1309 while (index % CACHELINE_DWORDS) 1310 wa_ctx_emit(batch, index, MI_NOOP); 1311 1312 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1313 } 1314 1315 static int gen9_init_perctx_bb(struct intel_engine_cs *engine, 1316 struct i915_wa_ctx_bb *wa_ctx, 1317 uint32_t *const batch, 1318 uint32_t *offset) 1319 { 1320 struct drm_device *dev = engine->dev; 1321 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1322 1323 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1324 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 1325 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1326 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1327 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1328 wa_ctx_emit(batch, index, 1329 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); 1330 wa_ctx_emit(batch, index, MI_NOOP); 1331 } 1332 1333 /* WaClearTdlStateAckDirtyBits:bxt */ 1334 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1335 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1336 1337 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1338 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1339 1340 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1); 1341 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1342 1343 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2); 1344 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); 1345 1346 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2); 1347 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */ 1348 wa_ctx_emit(batch, index, 0x0); 1349 wa_ctx_emit(batch, index, MI_NOOP); 1350 } 1351 1352 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1353 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1354 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1355 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1356 1357 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1358 1359 return wa_ctx_end(wa_ctx, *offset = index, 1); 1360 } 1361 1362 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) 1363 { 1364 int ret; 1365 1366 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, 1367 PAGE_ALIGN(size)); 1368 if (!engine->wa_ctx.obj) { 1369 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1370 return -ENOMEM; 1371 } 1372 1373 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); 1374 if (ret) { 1375 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", 1376 ret); 1377 drm_gem_object_unreference(&engine->wa_ctx.obj->base); 1378 return ret; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) 1385 { 1386 if (engine->wa_ctx.obj) { 1387 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj); 1388 drm_gem_object_unreference(&engine->wa_ctx.obj->base); 1389 engine->wa_ctx.obj = NULL; 1390 } 1391 } 1392 1393 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 1394 { 1395 int ret; 1396 uint32_t *batch; 1397 uint32_t offset; 1398 struct page *page; 1399 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1400 1401 WARN_ON(engine->id != RCS); 1402 1403 /* update this when WA for higher Gen are added */ 1404 if (INTEL_INFO(engine->dev)->gen > 9) { 1405 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1406 INTEL_INFO(engine->dev)->gen); 1407 return 0; 1408 } 1409 1410 /* some WA perform writes to scratch page, ensure it is valid */ 1411 if (engine->scratch.obj == NULL) { 1412 DRM_ERROR("scratch page not allocated for %s\n", engine->name); 1413 return -EINVAL; 1414 } 1415 1416 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE); 1417 if (ret) { 1418 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1419 return ret; 1420 } 1421 1422 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); 1423 batch = kmap_atomic(page); 1424 offset = 0; 1425 1426 if (INTEL_INFO(engine->dev)->gen == 8) { 1427 ret = gen8_init_indirectctx_bb(engine, 1428 &wa_ctx->indirect_ctx, 1429 batch, 1430 &offset); 1431 if (ret) 1432 goto out; 1433 1434 ret = gen8_init_perctx_bb(engine, 1435 &wa_ctx->per_ctx, 1436 batch, 1437 &offset); 1438 if (ret) 1439 goto out; 1440 } else if (INTEL_INFO(engine->dev)->gen == 9) { 1441 ret = gen9_init_indirectctx_bb(engine, 1442 &wa_ctx->indirect_ctx, 1443 batch, 1444 &offset); 1445 if (ret) 1446 goto out; 1447 1448 ret = gen9_init_perctx_bb(engine, 1449 &wa_ctx->per_ctx, 1450 batch, 1451 &offset); 1452 if (ret) 1453 goto out; 1454 } 1455 1456 out: 1457 kunmap_atomic(batch); 1458 if (ret) 1459 lrc_destroy_wa_ctx_obj(engine); 1460 1461 return ret; 1462 } 1463 1464 static void lrc_init_hws(struct intel_engine_cs *engine) 1465 { 1466 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1467 1468 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1469 (u32)engine->status_page.gfx_addr); 1470 POSTING_READ(RING_HWS_PGA(engine->mmio_base)); 1471 } 1472 1473 static int gen8_init_common_ring(struct intel_engine_cs *engine) 1474 { 1475 struct drm_device *dev = engine->dev; 1476 struct drm_i915_private *dev_priv = dev->dev_private; 1477 unsigned int next_context_status_buffer_hw; 1478 1479 lrc_init_hws(engine); 1480 1481 I915_WRITE_IMR(engine, 1482 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1483 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 1484 1485 I915_WRITE(RING_MODE_GEN7(engine), 1486 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1487 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1488 POSTING_READ(RING_MODE_GEN7(engine)); 1489 1490 /* 1491 * Instead of resetting the Context Status Buffer (CSB) read pointer to 1492 * zero, we need to read the write pointer from hardware and use its 1493 * value because "this register is power context save restored". 1494 * Effectively, these states have been observed: 1495 * 1496 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | 1497 * BDW | CSB regs not reset | CSB regs reset | 1498 * CHT | CSB regs not reset | CSB regs not reset | 1499 * SKL | ? | ? | 1500 * BXT | ? | ? | 1501 */ 1502 next_context_status_buffer_hw = 1503 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))); 1504 1505 /* 1506 * When the CSB registers are reset (also after power-up / gpu reset), 1507 * CSB write pointer is set to all 1's, which is not valid, use '5' in 1508 * this special case, so the first element read is CSB[0]. 1509 */ 1510 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) 1511 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); 1512 1513 engine->next_context_status_buffer = next_context_status_buffer_hw; 1514 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); 1515 1516 intel_engine_init_hangcheck(engine); 1517 1518 return intel_mocs_init_engine(engine); 1519 } 1520 1521 static int gen8_init_render_ring(struct intel_engine_cs *engine) 1522 { 1523 struct drm_device *dev = engine->dev; 1524 struct drm_i915_private *dev_priv = dev->dev_private; 1525 int ret; 1526 1527 ret = gen8_init_common_ring(engine); 1528 if (ret) 1529 return ret; 1530 1531 /* We need to disable the AsyncFlip performance optimisations in order 1532 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1533 * programmed to '1' on all products. 1534 * 1535 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 1536 */ 1537 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1538 1539 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1540 1541 return init_workarounds_ring(engine); 1542 } 1543 1544 static int gen9_init_render_ring(struct intel_engine_cs *engine) 1545 { 1546 int ret; 1547 1548 ret = gen8_init_common_ring(engine); 1549 if (ret) 1550 return ret; 1551 1552 return init_workarounds_ring(engine); 1553 } 1554 1555 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1556 { 1557 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1558 struct intel_engine_cs *engine = req->engine; 1559 struct intel_ringbuffer *ringbuf = req->ringbuf; 1560 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1561 int i, ret; 1562 1563 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1564 if (ret) 1565 return ret; 1566 1567 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds)); 1568 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 1569 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1570 1571 intel_logical_ring_emit_reg(ringbuf, 1572 GEN8_RING_PDP_UDW(engine, i)); 1573 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); 1574 intel_logical_ring_emit_reg(ringbuf, 1575 GEN8_RING_PDP_LDW(engine, i)); 1576 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); 1577 } 1578 1579 intel_logical_ring_emit(ringbuf, MI_NOOP); 1580 intel_logical_ring_advance(ringbuf); 1581 1582 return 0; 1583 } 1584 1585 static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1586 u64 offset, unsigned dispatch_flags) 1587 { 1588 struct intel_ringbuffer *ringbuf = req->ringbuf; 1589 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); 1590 int ret; 1591 1592 /* Don't rely in hw updating PDPs, specially in lite-restore. 1593 * Ideally, we should set Force PD Restore in ctx descriptor, 1594 * but we can't. Force Restore would be a second option, but 1595 * it is unsafe in case of lite-restore (because the ctx is 1596 * not idle). PML4 is allocated during ppgtt init so this is 1597 * not needed in 48-bit.*/ 1598 if (req->ctx->ppgtt && 1599 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1600 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1601 !intel_vgpu_active(req->i915->dev)) { 1602 ret = intel_logical_ring_emit_pdps(req); 1603 if (ret) 1604 return ret; 1605 } 1606 1607 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1608 } 1609 1610 ret = intel_ring_begin(req, 4); 1611 if (ret) 1612 return ret; 1613 1614 /* FIXME(BDW): Address space and security selectors. */ 1615 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | 1616 (ppgtt<<8) | 1617 (dispatch_flags & I915_DISPATCH_RS ? 1618 MI_BATCH_RESOURCE_STREAMER : 0)); 1619 intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); 1620 intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); 1621 intel_logical_ring_emit(ringbuf, MI_NOOP); 1622 intel_logical_ring_advance(ringbuf); 1623 1624 return 0; 1625 } 1626 1627 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1628 { 1629 struct drm_device *dev = engine->dev; 1630 struct drm_i915_private *dev_priv = dev->dev_private; 1631 unsigned long flags; 1632 1633 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1634 return false; 1635 1636 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1637 if (engine->irq_refcount++ == 0) { 1638 I915_WRITE_IMR(engine, 1639 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1640 POSTING_READ(RING_IMR(engine->mmio_base)); 1641 } 1642 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1643 1644 return true; 1645 } 1646 1647 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1648 { 1649 struct drm_device *dev = engine->dev; 1650 struct drm_i915_private *dev_priv = dev->dev_private; 1651 unsigned long flags; 1652 1653 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1654 if (--engine->irq_refcount == 0) { 1655 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1656 POSTING_READ(RING_IMR(engine->mmio_base)); 1657 } 1658 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1659 } 1660 1661 static int gen8_emit_flush(struct drm_i915_gem_request *request, 1662 u32 invalidate_domains, 1663 u32 unused) 1664 { 1665 struct intel_ringbuffer *ringbuf = request->ringbuf; 1666 struct intel_engine_cs *engine = ringbuf->engine; 1667 struct drm_device *dev = engine->dev; 1668 struct drm_i915_private *dev_priv = dev->dev_private; 1669 uint32_t cmd; 1670 int ret; 1671 1672 ret = intel_ring_begin(request, 4); 1673 if (ret) 1674 return ret; 1675 1676 cmd = MI_FLUSH_DW + 1; 1677 1678 /* We always require a command barrier so that subsequent 1679 * commands, such as breadcrumb interrupts, are strictly ordered 1680 * wrt the contents of the write cache being flushed to memory 1681 * (and thus being coherent from the CPU). 1682 */ 1683 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1684 1685 if (invalidate_domains & I915_GEM_GPU_DOMAINS) { 1686 cmd |= MI_INVALIDATE_TLB; 1687 if (engine == &dev_priv->engine[VCS]) 1688 cmd |= MI_INVALIDATE_BSD; 1689 } 1690 1691 intel_logical_ring_emit(ringbuf, cmd); 1692 intel_logical_ring_emit(ringbuf, 1693 I915_GEM_HWS_SCRATCH_ADDR | 1694 MI_FLUSH_DW_USE_GTT); 1695 intel_logical_ring_emit(ringbuf, 0); /* upper addr */ 1696 intel_logical_ring_emit(ringbuf, 0); /* value */ 1697 intel_logical_ring_advance(ringbuf); 1698 1699 return 0; 1700 } 1701 1702 static int gen8_emit_flush_render(struct drm_i915_gem_request *request, 1703 u32 invalidate_domains, 1704 u32 flush_domains) 1705 { 1706 struct intel_ringbuffer *ringbuf = request->ringbuf; 1707 struct intel_engine_cs *engine = ringbuf->engine; 1708 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1709 bool vf_flush_wa = false, dc_flush_wa = false; 1710 u32 flags = 0; 1711 int ret; 1712 int len; 1713 1714 flags |= PIPE_CONTROL_CS_STALL; 1715 1716 if (flush_domains) { 1717 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1718 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1719 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 1720 flags |= PIPE_CONTROL_FLUSH_ENABLE; 1721 } 1722 1723 if (invalidate_domains) { 1724 flags |= PIPE_CONTROL_TLB_INVALIDATE; 1725 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 1726 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 1727 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 1728 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 1729 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 1730 flags |= PIPE_CONTROL_QW_WRITE; 1731 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 1732 1733 /* 1734 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1735 * pipe control. 1736 */ 1737 if (IS_GEN9(engine->dev)) 1738 vf_flush_wa = true; 1739 1740 /* WaForGAMHang:kbl */ 1741 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) 1742 dc_flush_wa = true; 1743 } 1744 1745 len = 6; 1746 1747 if (vf_flush_wa) 1748 len += 6; 1749 1750 if (dc_flush_wa) 1751 len += 12; 1752 1753 ret = intel_ring_begin(request, len); 1754 if (ret) 1755 return ret; 1756 1757 if (vf_flush_wa) { 1758 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1759 intel_logical_ring_emit(ringbuf, 0); 1760 intel_logical_ring_emit(ringbuf, 0); 1761 intel_logical_ring_emit(ringbuf, 0); 1762 intel_logical_ring_emit(ringbuf, 0); 1763 intel_logical_ring_emit(ringbuf, 0); 1764 } 1765 1766 if (dc_flush_wa) { 1767 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1768 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); 1769 intel_logical_ring_emit(ringbuf, 0); 1770 intel_logical_ring_emit(ringbuf, 0); 1771 intel_logical_ring_emit(ringbuf, 0); 1772 intel_logical_ring_emit(ringbuf, 0); 1773 } 1774 1775 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1776 intel_logical_ring_emit(ringbuf, flags); 1777 intel_logical_ring_emit(ringbuf, scratch_addr); 1778 intel_logical_ring_emit(ringbuf, 0); 1779 intel_logical_ring_emit(ringbuf, 0); 1780 intel_logical_ring_emit(ringbuf, 0); 1781 1782 if (dc_flush_wa) { 1783 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1784 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); 1785 intel_logical_ring_emit(ringbuf, 0); 1786 intel_logical_ring_emit(ringbuf, 0); 1787 intel_logical_ring_emit(ringbuf, 0); 1788 intel_logical_ring_emit(ringbuf, 0); 1789 } 1790 1791 intel_logical_ring_advance(ringbuf); 1792 1793 return 0; 1794 } 1795 1796 static u32 gen8_get_seqno(struct intel_engine_cs *engine) 1797 { 1798 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 1799 } 1800 1801 static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1802 { 1803 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 1804 } 1805 1806 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) 1807 { 1808 /* 1809 * On BXT A steppings there is a HW coherency issue whereby the 1810 * MI_STORE_DATA_IMM storing the completed request's seqno 1811 * occasionally doesn't invalidate the CPU cache. Work around this by 1812 * clflushing the corresponding cacheline whenever the caller wants 1813 * the coherency to be guaranteed. Note that this cacheline is known 1814 * to be clean at this point, since we only write it in 1815 * bxt_a_set_seqno(), where we also do a clflush after the write. So 1816 * this clflush in practice becomes an invalidate operation. 1817 */ 1818 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1819 } 1820 1821 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1822 { 1823 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 1824 1825 /* See bxt_a_get_seqno() explaining the reason for the clflush. */ 1826 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1827 } 1828 1829 /* 1830 * Reserve space for 2 NOOPs at the end of each request to be 1831 * used as a workaround for not being allowed to do lite 1832 * restore with HEAD==TAIL (WaIdleLiteRestore). 1833 */ 1834 #define WA_TAIL_DWORDS 2 1835 1836 static inline u32 hws_seqno_address(struct intel_engine_cs *engine) 1837 { 1838 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; 1839 } 1840 1841 static int gen8_emit_request(struct drm_i915_gem_request *request) 1842 { 1843 struct intel_ringbuffer *ringbuf = request->ringbuf; 1844 int ret; 1845 1846 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); 1847 if (ret) 1848 return ret; 1849 1850 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 1851 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); 1852 1853 intel_logical_ring_emit(ringbuf, 1854 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1855 intel_logical_ring_emit(ringbuf, 1856 hws_seqno_address(request->engine) | 1857 MI_FLUSH_DW_USE_GTT); 1858 intel_logical_ring_emit(ringbuf, 0); 1859 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1860 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1861 intel_logical_ring_emit(ringbuf, MI_NOOP); 1862 return intel_logical_ring_advance_and_submit(request); 1863 } 1864 1865 static int gen8_emit_request_render(struct drm_i915_gem_request *request) 1866 { 1867 struct intel_ringbuffer *ringbuf = request->ringbuf; 1868 int ret; 1869 1870 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); 1871 if (ret) 1872 return ret; 1873 1874 /* We're using qword write, seqno should be aligned to 8 bytes. */ 1875 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 1876 1877 /* w/a for post sync ops following a GPGPU operation we 1878 * need a prior CS_STALL, which is emitted by the flush 1879 * following the batch. 1880 */ 1881 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1882 intel_logical_ring_emit(ringbuf, 1883 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1884 PIPE_CONTROL_CS_STALL | 1885 PIPE_CONTROL_QW_WRITE)); 1886 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); 1887 intel_logical_ring_emit(ringbuf, 0); 1888 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1889 /* We're thrashing one dword of HWS. */ 1890 intel_logical_ring_emit(ringbuf, 0); 1891 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1892 intel_logical_ring_emit(ringbuf, MI_NOOP); 1893 return intel_logical_ring_advance_and_submit(request); 1894 } 1895 1896 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) 1897 { 1898 struct render_state so; 1899 int ret; 1900 1901 ret = i915_gem_render_state_prepare(req->engine, &so); 1902 if (ret) 1903 return ret; 1904 1905 if (so.rodata == NULL) 1906 return 0; 1907 1908 ret = req->engine->emit_bb_start(req, so.ggtt_offset, 1909 I915_DISPATCH_SECURE); 1910 if (ret) 1911 goto out; 1912 1913 ret = req->engine->emit_bb_start(req, 1914 (so.ggtt_offset + so.aux_batch_offset), 1915 I915_DISPATCH_SECURE); 1916 if (ret) 1917 goto out; 1918 1919 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); 1920 1921 out: 1922 i915_gem_render_state_fini(&so); 1923 return ret; 1924 } 1925 1926 static int gen8_init_rcs_context(struct drm_i915_gem_request *req) 1927 { 1928 int ret; 1929 1930 ret = intel_logical_ring_workarounds_emit(req); 1931 if (ret) 1932 return ret; 1933 1934 ret = intel_rcs_context_init_mocs(req); 1935 /* 1936 * Failing to program the MOCS is non-fatal.The system will not 1937 * run at peak performance. So generate an error and carry on. 1938 */ 1939 if (ret) 1940 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 1941 1942 return intel_lr_context_render_state_init(req); 1943 } 1944 1945 /** 1946 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer 1947 * 1948 * @ring: Engine Command Streamer. 1949 * 1950 */ 1951 void intel_logical_ring_cleanup(struct intel_engine_cs *engine) 1952 { 1953 struct drm_i915_private *dev_priv; 1954 1955 if (!intel_engine_initialized(engine)) 1956 return; 1957 1958 /* 1959 * Tasklet cannot be active at this point due intel_mark_active/idle 1960 * so this is just for documentation. 1961 */ 1962 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1963 tasklet_kill(&engine->irq_tasklet); 1964 1965 dev_priv = engine->dev->dev_private; 1966 1967 if (engine->buffer) { 1968 intel_logical_ring_stop(engine); 1969 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 1970 } 1971 1972 if (engine->cleanup) 1973 engine->cleanup(engine); 1974 1975 i915_cmd_parser_fini_ring(engine); 1976 i915_gem_batch_pool_fini(&engine->batch_pool); 1977 1978 if (engine->status_page.obj) { 1979 i915_gem_object_unpin_map(engine->status_page.obj); 1980 engine->status_page.obj = NULL; 1981 } 1982 1983 engine->idle_lite_restore_wa = 0; 1984 engine->disable_lite_restore_wa = false; 1985 engine->ctx_desc_template = 0; 1986 1987 lrc_destroy_wa_ctx_obj(engine); 1988 engine->dev = NULL; 1989 } 1990 1991 static void 1992 logical_ring_default_vfuncs(struct drm_device *dev, 1993 struct intel_engine_cs *engine) 1994 { 1995 /* Default vfuncs which can be overriden by each engine. */ 1996 engine->init_hw = gen8_init_common_ring; 1997 engine->emit_request = gen8_emit_request; 1998 engine->emit_flush = gen8_emit_flush; 1999 engine->irq_get = gen8_logical_ring_get_irq; 2000 engine->irq_put = gen8_logical_ring_put_irq; 2001 engine->emit_bb_start = gen8_emit_bb_start; 2002 engine->get_seqno = gen8_get_seqno; 2003 engine->set_seqno = gen8_set_seqno; 2004 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 2005 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 2006 engine->set_seqno = bxt_a_set_seqno; 2007 } 2008 } 2009 2010 static inline void 2011 logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) 2012 { 2013 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 2014 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 2015 } 2016 2017 static int 2018 lrc_setup_hws(struct intel_engine_cs *engine, 2019 struct drm_i915_gem_object *dctx_obj) 2020 { 2021 char *hws; 2022 2023 /* The HWSP is part of the default context object in LRC mode. */ 2024 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + 2025 LRC_PPHWSP_PN * PAGE_SIZE; 2026 hws = i915_gem_object_pin_map(dctx_obj); 2027 if (IS_ERR(hws)) 2028 return PTR_ERR(hws); 2029 engine->status_page.page_addr = (u32 *)(hws + LRC_PPHWSP_PN * PAGE_SIZE); 2030 engine->status_page.obj = dctx_obj; 2031 2032 return 0; 2033 } 2034 2035 static int 2036 logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) 2037 { 2038 struct drm_i915_private *dev_priv = to_i915(dev); 2039 struct intel_context *dctx = dev_priv->kernel_context; 2040 enum forcewake_domains fw_domains; 2041 int ret; 2042 2043 /* Intentionally left blank. */ 2044 engine->buffer = NULL; 2045 2046 engine->dev = dev; 2047 INIT_LIST_HEAD(&engine->active_list); 2048 INIT_LIST_HEAD(&engine->request_list); 2049 i915_gem_batch_pool_init(dev, &engine->batch_pool); 2050 init_waitqueue_head(&engine->irq_queue); 2051 2052 INIT_LIST_HEAD(&engine->buffers); 2053 INIT_LIST_HEAD(&engine->execlist_queue); 2054 INIT_LIST_HEAD(&engine->execlist_retired_req_list); 2055 lockinit(&engine->execlist_lock, "i915el", 0, LK_CANRECURSE); 2056 2057 tasklet_init(&engine->irq_tasklet, 2058 intel_lrc_irq_handler, (unsigned long)engine); 2059 2060 logical_ring_init_platform_invariants(engine); 2061 2062 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 2063 RING_ELSP(engine), 2064 FW_REG_WRITE); 2065 2066 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 2067 RING_CONTEXT_STATUS_PTR(engine), 2068 FW_REG_READ | FW_REG_WRITE); 2069 2070 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 2071 RING_CONTEXT_STATUS_BUF_BASE(engine), 2072 FW_REG_READ); 2073 2074 engine->fw_domains = fw_domains; 2075 2076 ret = i915_cmd_parser_init_ring(engine); 2077 if (ret) 2078 goto error; 2079 2080 ret = intel_lr_context_deferred_alloc(dctx, engine); 2081 if (ret) 2082 goto error; 2083 2084 /* As this is the default context, always pin it */ 2085 ret = intel_lr_context_do_pin(dctx, engine); 2086 if (ret) { 2087 DRM_ERROR( 2088 "Failed to pin and map ringbuffer %s: %d\n", 2089 engine->name, ret); 2090 goto error; 2091 } 2092 2093 /* And setup the hardware status page. */ 2094 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); 2095 if (ret) { 2096 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); 2097 goto error; 2098 } 2099 2100 return 0; 2101 2102 error: 2103 intel_logical_ring_cleanup(engine); 2104 return ret; 2105 } 2106 2107 static int logical_render_ring_init(struct drm_device *dev) 2108 { 2109 struct drm_i915_private *dev_priv = dev->dev_private; 2110 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 2111 int ret; 2112 2113 engine->name = "render ring"; 2114 engine->id = RCS; 2115 engine->exec_id = I915_EXEC_RENDER; 2116 engine->guc_id = GUC_RENDER_ENGINE; 2117 engine->mmio_base = RENDER_RING_BASE; 2118 2119 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT); 2120 if (HAS_L3_DPF(dev)) 2121 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2122 2123 logical_ring_default_vfuncs(dev, engine); 2124 2125 /* Override some for render ring. */ 2126 if (INTEL_INFO(dev)->gen >= 9) 2127 engine->init_hw = gen9_init_render_ring; 2128 else 2129 engine->init_hw = gen8_init_render_ring; 2130 engine->init_context = gen8_init_rcs_context; 2131 engine->cleanup = intel_fini_pipe_control; 2132 engine->emit_flush = gen8_emit_flush_render; 2133 engine->emit_request = gen8_emit_request_render; 2134 2135 engine->dev = dev; 2136 2137 ret = intel_init_pipe_control(engine); 2138 if (ret) 2139 return ret; 2140 2141 ret = intel_init_workaround_bb(engine); 2142 if (ret) { 2143 /* 2144 * We continue even if we fail to initialize WA batch 2145 * because we only expect rare glitches but nothing 2146 * critical to prevent us from using GPU 2147 */ 2148 DRM_ERROR("WA batch buffer initialization failed: %d\n", 2149 ret); 2150 } 2151 2152 ret = logical_ring_init(dev, engine); 2153 if (ret) { 2154 lrc_destroy_wa_ctx_obj(engine); 2155 } 2156 2157 return ret; 2158 } 2159 2160 static int logical_bsd_ring_init(struct drm_device *dev) 2161 { 2162 struct drm_i915_private *dev_priv = dev->dev_private; 2163 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2164 2165 engine->name = "bsd ring"; 2166 engine->id = VCS; 2167 engine->exec_id = I915_EXEC_BSD; 2168 engine->guc_id = GUC_VIDEO_ENGINE; 2169 engine->mmio_base = GEN6_BSD_RING_BASE; 2170 2171 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); 2172 logical_ring_default_vfuncs(dev, engine); 2173 2174 return logical_ring_init(dev, engine); 2175 } 2176 2177 static int logical_bsd2_ring_init(struct drm_device *dev) 2178 { 2179 struct drm_i915_private *dev_priv = dev->dev_private; 2180 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 2181 2182 engine->name = "bsd2 ring"; 2183 engine->id = VCS2; 2184 engine->exec_id = I915_EXEC_BSD; 2185 engine->guc_id = GUC_VIDEO_ENGINE2; 2186 engine->mmio_base = GEN8_BSD2_RING_BASE; 2187 2188 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); 2189 logical_ring_default_vfuncs(dev, engine); 2190 2191 return logical_ring_init(dev, engine); 2192 } 2193 2194 static int logical_blt_ring_init(struct drm_device *dev) 2195 { 2196 struct drm_i915_private *dev_priv = dev->dev_private; 2197 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 2198 2199 engine->name = "blitter ring"; 2200 engine->id = BCS; 2201 engine->exec_id = I915_EXEC_BLT; 2202 engine->guc_id = GUC_BLITTER_ENGINE; 2203 engine->mmio_base = BLT_RING_BASE; 2204 2205 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); 2206 logical_ring_default_vfuncs(dev, engine); 2207 2208 return logical_ring_init(dev, engine); 2209 } 2210 2211 static int logical_vebox_ring_init(struct drm_device *dev) 2212 { 2213 struct drm_i915_private *dev_priv = dev->dev_private; 2214 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 2215 2216 engine->name = "video enhancement ring"; 2217 engine->id = VECS; 2218 engine->exec_id = I915_EXEC_VEBOX; 2219 engine->guc_id = GUC_VIDEOENHANCE_ENGINE; 2220 engine->mmio_base = VEBOX_RING_BASE; 2221 2222 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); 2223 logical_ring_default_vfuncs(dev, engine); 2224 2225 return logical_ring_init(dev, engine); 2226 } 2227 2228 /** 2229 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers 2230 * @dev: DRM device. 2231 * 2232 * This function inits the engines for an Execlists submission style (the equivalent in the 2233 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for 2234 * those engines that are present in the hardware. 2235 * 2236 * Return: non-zero if the initialization failed. 2237 */ 2238 int intel_logical_rings_init(struct drm_device *dev) 2239 { 2240 struct drm_i915_private *dev_priv = dev->dev_private; 2241 int ret; 2242 2243 ret = logical_render_ring_init(dev); 2244 if (ret) 2245 return ret; 2246 2247 if (HAS_BSD(dev)) { 2248 ret = logical_bsd_ring_init(dev); 2249 if (ret) 2250 goto cleanup_render_ring; 2251 } 2252 2253 if (HAS_BLT(dev)) { 2254 ret = logical_blt_ring_init(dev); 2255 if (ret) 2256 goto cleanup_bsd_ring; 2257 } 2258 2259 if (HAS_VEBOX(dev)) { 2260 ret = logical_vebox_ring_init(dev); 2261 if (ret) 2262 goto cleanup_blt_ring; 2263 } 2264 2265 if (HAS_BSD2(dev)) { 2266 ret = logical_bsd2_ring_init(dev); 2267 if (ret) 2268 goto cleanup_vebox_ring; 2269 } 2270 2271 return 0; 2272 2273 cleanup_vebox_ring: 2274 intel_logical_ring_cleanup(&dev_priv->engine[VECS]); 2275 cleanup_blt_ring: 2276 intel_logical_ring_cleanup(&dev_priv->engine[BCS]); 2277 cleanup_bsd_ring: 2278 intel_logical_ring_cleanup(&dev_priv->engine[VCS]); 2279 cleanup_render_ring: 2280 intel_logical_ring_cleanup(&dev_priv->engine[RCS]); 2281 2282 return ret; 2283 } 2284 2285 static u32 2286 make_rpcs(struct drm_device *dev) 2287 { 2288 u32 rpcs = 0; 2289 2290 /* 2291 * No explicit RPCS request is needed to ensure full 2292 * slice/subslice/EU enablement prior to Gen9. 2293 */ 2294 if (INTEL_INFO(dev)->gen < 9) 2295 return 0; 2296 2297 /* 2298 * Starting in Gen9, render power gating can leave 2299 * slice/subslice/EU in a partially enabled state. We 2300 * must make an explicit request through RPCS for full 2301 * enablement. 2302 */ 2303 if (INTEL_INFO(dev)->has_slice_pg) { 2304 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 2305 rpcs |= INTEL_INFO(dev)->slice_total << 2306 GEN8_RPCS_S_CNT_SHIFT; 2307 rpcs |= GEN8_RPCS_ENABLE; 2308 } 2309 2310 if (INTEL_INFO(dev)->has_subslice_pg) { 2311 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2312 rpcs |= INTEL_INFO(dev)->subslice_per_slice << 2313 GEN8_RPCS_SS_CNT_SHIFT; 2314 rpcs |= GEN8_RPCS_ENABLE; 2315 } 2316 2317 if (INTEL_INFO(dev)->has_eu_pg) { 2318 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2319 GEN8_RPCS_EU_MIN_SHIFT; 2320 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2321 GEN8_RPCS_EU_MAX_SHIFT; 2322 rpcs |= GEN8_RPCS_ENABLE; 2323 } 2324 2325 return rpcs; 2326 } 2327 2328 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) 2329 { 2330 u32 indirect_ctx_offset; 2331 2332 switch (INTEL_INFO(engine->dev)->gen) { 2333 default: 2334 MISSING_CASE(INTEL_INFO(engine->dev)->gen); 2335 /* fall through */ 2336 case 9: 2337 indirect_ctx_offset = 2338 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2339 break; 2340 case 8: 2341 indirect_ctx_offset = 2342 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2343 break; 2344 } 2345 2346 return indirect_ctx_offset; 2347 } 2348 2349 static int 2350 populate_lr_context(struct intel_context *ctx, 2351 struct drm_i915_gem_object *ctx_obj, 2352 struct intel_engine_cs *engine, 2353 struct intel_ringbuffer *ringbuf) 2354 { 2355 struct drm_device *dev = engine->dev; 2356 struct drm_i915_private *dev_priv = dev->dev_private; 2357 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2358 char *vaddr; 2359 u32 *reg_state; 2360 int ret; 2361 2362 if (!ppgtt) 2363 ppgtt = dev_priv->mm.aliasing_ppgtt; 2364 2365 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); 2366 if (ret) { 2367 DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); 2368 return ret; 2369 } 2370 2371 vaddr = i915_gem_object_pin_map(ctx_obj); 2372 if (IS_ERR(vaddr)) { 2373 ret = PTR_ERR(vaddr); 2374 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); 2375 return ret; 2376 } 2377 ctx_obj->dirty = true; 2378 2379 /* The second page of the context object contains some fields which must 2380 * be set up prior to the first execution. */ 2381 reg_state = (u32 *)(vaddr + LRC_STATE_PN * PAGE_SIZE); 2382 2383 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM 2384 * commands followed by (reg, value) pairs. The values we are setting here are 2385 * only for the first context restore: on a subsequent save, the GPU will 2386 * recreate this batchbuffer with new values (including all the missing 2387 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2388 reg_state[CTX_LRI_HEADER_0] = 2389 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; 2390 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, 2391 RING_CONTEXT_CONTROL(engine), 2392 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2393 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2394 (HAS_RESOURCE_STREAMER(dev) ? 2395 CTX_CTRL_RS_CTX_ENABLE : 0))); 2396 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2397 0); 2398 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base), 2399 0); 2400 /* Ring buffer start address is not known until the buffer is pinned. 2401 * It is written to the context image in execlists_update_context() 2402 */ 2403 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, 2404 RING_START(engine->mmio_base), 0); 2405 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, 2406 RING_CTL(engine->mmio_base), 2407 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); 2408 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, 2409 RING_BBADDR_UDW(engine->mmio_base), 0); 2410 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, 2411 RING_BBADDR(engine->mmio_base), 0); 2412 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, 2413 RING_BBSTATE(engine->mmio_base), 2414 RING_BB_PPGTT); 2415 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, 2416 RING_SBBADDR_UDW(engine->mmio_base), 0); 2417 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, 2418 RING_SBBADDR(engine->mmio_base), 0); 2419 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, 2420 RING_SBBSTATE(engine->mmio_base), 0); 2421 if (engine->id == RCS) { 2422 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, 2423 RING_BB_PER_CTX_PTR(engine->mmio_base), 0); 2424 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, 2425 RING_INDIRECT_CTX(engine->mmio_base), 0); 2426 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, 2427 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); 2428 if (engine->wa_ctx.obj) { 2429 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 2430 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); 2431 2432 reg_state[CTX_RCS_INDIRECT_CTX+1] = 2433 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | 2434 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 2435 2436 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 2437 intel_lr_indirect_ctx_offset(engine) << 6; 2438 2439 reg_state[CTX_BB_PER_CTX_PTR+1] = 2440 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 2441 0x01; 2442 } 2443 } 2444 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2445 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, 2446 RING_CTX_TIMESTAMP(engine->mmio_base), 0); 2447 /* PDP values well be assigned later if needed */ 2448 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 2449 0); 2450 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 2451 0); 2452 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 2453 0); 2454 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 2455 0); 2456 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 2457 0); 2458 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 2459 0); 2460 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 2461 0); 2462 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 2463 0); 2464 2465 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2466 /* 64b PPGTT (48bit canonical) 2467 * PDP0_DESCRIPTOR contains the base address to PML4 and 2468 * other PDP Descriptors are ignored. 2469 */ 2470 ASSIGN_CTX_PML4(ppgtt, reg_state); 2471 } else { 2472 /* 32b PPGTT 2473 * PDP*_DESCRIPTOR contains the base address of space supported. 2474 * With dynamic page allocation, PDPs may not be allocated at 2475 * this point. Point the unallocated PDPs to the scratch page 2476 */ 2477 execlists_update_context_pdps(ppgtt, reg_state); 2478 } 2479 2480 if (engine->id == RCS) { 2481 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2482 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2483 make_rpcs(dev)); 2484 } 2485 2486 i915_gem_object_unpin_map(ctx_obj); 2487 2488 return 0; 2489 } 2490 2491 /** 2492 * intel_lr_context_free() - free the LRC specific bits of a context 2493 * @ctx: the LR context to free. 2494 * 2495 * The real context freeing is done in i915_gem_context_free: this only 2496 * takes care of the bits that are LRC related: the per-engine backing 2497 * objects and the logical ringbuffer. 2498 */ 2499 void intel_lr_context_free(struct intel_context *ctx) 2500 { 2501 int i; 2502 2503 for (i = I915_NUM_ENGINES; --i >= 0; ) { 2504 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; 2505 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 2506 2507 if (!ctx_obj) 2508 continue; 2509 2510 if (ctx == ctx->i915->kernel_context) { 2511 intel_unpin_ringbuffer_obj(ringbuf); 2512 i915_gem_object_ggtt_unpin(ctx_obj); 2513 i915_gem_object_unpin_map(ctx_obj); 2514 } 2515 2516 WARN_ON(ctx->engine[i].pin_count); 2517 intel_ringbuffer_free(ringbuf); 2518 drm_gem_object_unreference(&ctx_obj->base); 2519 } 2520 } 2521 2522 /** 2523 * intel_lr_context_size() - return the size of the context for an engine 2524 * @ring: which engine to find the context size for 2525 * 2526 * Each engine may require a different amount of space for a context image, 2527 * so when allocating (or copying) an image, this function can be used to 2528 * find the right size for the specific engine. 2529 * 2530 * Return: size (in bytes) of an engine-specific context image 2531 * 2532 * Note: this size includes the HWSP, which is part of the context image 2533 * in LRC mode, but does not include the "shared data page" used with 2534 * GuC submission. The caller should account for this if using the GuC. 2535 */ 2536 uint32_t intel_lr_context_size(struct intel_engine_cs *engine) 2537 { 2538 int ret = 0; 2539 2540 WARN_ON(INTEL_INFO(engine->dev)->gen < 8); 2541 2542 switch (engine->id) { 2543 case RCS: 2544 if (INTEL_INFO(engine->dev)->gen >= 9) 2545 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2546 else 2547 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2548 break; 2549 case VCS: 2550 case BCS: 2551 case VECS: 2552 case VCS2: 2553 ret = GEN8_LR_CONTEXT_OTHER_SIZE; 2554 break; 2555 } 2556 2557 return ret; 2558 } 2559 2560 /** 2561 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context 2562 * @ctx: LR context to create. 2563 * @ring: engine to be used with the context. 2564 * 2565 * This function can be called more than once, with different engines, if we plan 2566 * to use the context with them. The context backing objects and the ringbuffers 2567 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why 2568 * the creation is a deferred call: it's better to make sure first that we need to use 2569 * a given ring with the context. 2570 * 2571 * Return: non-zero on error. 2572 */ 2573 2574 int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2575 struct intel_engine_cs *engine) 2576 { 2577 struct drm_device *dev = engine->dev; 2578 struct drm_i915_gem_object *ctx_obj; 2579 uint32_t context_size; 2580 struct intel_ringbuffer *ringbuf; 2581 int ret; 2582 2583 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2584 WARN_ON(ctx->engine[engine->id].state); 2585 2586 context_size = round_up(intel_lr_context_size(engine), 4096); 2587 2588 /* One extra page as the sharing data between driver and GuC */ 2589 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2590 2591 ctx_obj = i915_gem_alloc_object(dev, context_size); 2592 if (!ctx_obj) { 2593 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2594 return -ENOMEM; 2595 } 2596 2597 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); 2598 if (IS_ERR(ringbuf)) { 2599 ret = PTR_ERR(ringbuf); 2600 goto error_deref_obj; 2601 } 2602 2603 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf); 2604 if (ret) { 2605 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2606 goto error_ringbuf; 2607 } 2608 2609 ctx->engine[engine->id].ringbuf = ringbuf; 2610 ctx->engine[engine->id].state = ctx_obj; 2611 2612 if (ctx != ctx->i915->kernel_context && engine->init_context) { 2613 struct drm_i915_gem_request *req; 2614 2615 req = i915_gem_request_alloc(engine, ctx); 2616 if (IS_ERR(req)) { 2617 ret = PTR_ERR(req); 2618 DRM_ERROR("ring create req: %d\n", ret); 2619 goto error_ringbuf; 2620 } 2621 2622 ret = engine->init_context(req); 2623 i915_add_request_no_flush(req); 2624 if (ret) { 2625 DRM_ERROR("ring init context: %d\n", 2626 ret); 2627 goto error_ringbuf; 2628 } 2629 } 2630 return 0; 2631 2632 error_ringbuf: 2633 intel_ringbuffer_free(ringbuf); 2634 error_deref_obj: 2635 drm_gem_object_unreference(&ctx_obj->base); 2636 ctx->engine[engine->id].ringbuf = NULL; 2637 ctx->engine[engine->id].state = NULL; 2638 return ret; 2639 } 2640 2641 void intel_lr_context_reset(struct drm_i915_private *dev_priv, 2642 struct intel_context *ctx) 2643 { 2644 struct intel_engine_cs *engine; 2645 2646 for_each_engine(engine, dev_priv) { 2647 struct drm_i915_gem_object *ctx_obj = 2648 ctx->engine[engine->id].state; 2649 struct intel_ringbuffer *ringbuf = 2650 ctx->engine[engine->id].ringbuf; 2651 char *vaddr; 2652 uint32_t *reg_state; 2653 2654 if (!ctx_obj) 2655 continue; 2656 2657 vaddr = i915_gem_object_pin_map(ctx_obj); 2658 if (WARN_ON(IS_ERR(vaddr))) 2659 continue; 2660 2661 reg_state = (uint32_t *)(vaddr + LRC_STATE_PN * PAGE_SIZE); 2662 ctx_obj->dirty = true; 2663 2664 reg_state[CTX_RING_HEAD+1] = 0; 2665 reg_state[CTX_RING_TAIL+1] = 0; 2666 2667 i915_gem_object_unpin_map(ctx_obj); 2668 2669 ringbuf->head = 0; 2670 ringbuf->tail = 0; 2671 } 2672 } 2673