1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_drv.h" 26 #include "intel_ringbuffer.h" 27 #include "intel_lrc.h" 28 29 static const struct engine_info { 30 const char *name; 31 unsigned exec_id; 32 enum intel_engine_hw_id hw_id; 33 u32 mmio_base; 34 unsigned irq_shift; 35 int (*init_legacy)(struct intel_engine_cs *engine); 36 int (*init_execlists)(struct intel_engine_cs *engine); 37 } intel_engines[] = { 38 [RCS] = { 39 .name = "render ring", 40 .exec_id = I915_EXEC_RENDER, 41 .hw_id = RCS_HW, 42 .mmio_base = RENDER_RING_BASE, 43 .irq_shift = GEN8_RCS_IRQ_SHIFT, 44 .init_execlists = logical_render_ring_init, 45 .init_legacy = intel_init_render_ring_buffer, 46 }, 47 [BCS] = { 48 .name = "blitter ring", 49 .exec_id = I915_EXEC_BLT, 50 .hw_id = BCS_HW, 51 .mmio_base = BLT_RING_BASE, 52 .irq_shift = GEN8_BCS_IRQ_SHIFT, 53 .init_execlists = logical_xcs_ring_init, 54 .init_legacy = intel_init_blt_ring_buffer, 55 }, 56 [VCS] = { 57 .name = "bsd ring", 58 .exec_id = I915_EXEC_BSD, 59 .hw_id = VCS_HW, 60 .mmio_base = GEN6_BSD_RING_BASE, 61 .irq_shift = GEN8_VCS1_IRQ_SHIFT, 62 .init_execlists = logical_xcs_ring_init, 63 .init_legacy = intel_init_bsd_ring_buffer, 64 }, 65 [VCS2] = { 66 .name = "bsd2 ring", 67 .exec_id = I915_EXEC_BSD, 68 .hw_id = VCS2_HW, 69 .mmio_base = GEN8_BSD2_RING_BASE, 70 .irq_shift = GEN8_VCS2_IRQ_SHIFT, 71 .init_execlists = logical_xcs_ring_init, 72 .init_legacy = intel_init_bsd2_ring_buffer, 73 }, 74 [VECS] = { 75 .name = "video enhancement ring", 76 .exec_id = I915_EXEC_VEBOX, 77 .hw_id = VECS_HW, 78 .mmio_base = VEBOX_RING_BASE, 79 .irq_shift = GEN8_VECS_IRQ_SHIFT, 80 .init_execlists = logical_xcs_ring_init, 81 .init_legacy = intel_init_vebox_ring_buffer, 82 }, 83 }; 84 85 static int 86 intel_engine_setup(struct drm_i915_private *dev_priv, 87 enum intel_engine_id id) 88 { 89 const struct engine_info *info = &intel_engines[id]; 90 struct intel_engine_cs *engine; 91 92 GEM_BUG_ON(dev_priv->engine[id]); 93 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 94 if (!engine) 95 return -ENOMEM; 96 97 engine->id = id; 98 engine->i915 = dev_priv; 99 engine->name = info->name; 100 engine->exec_id = info->exec_id; 101 engine->hw_id = engine->guc_id = info->hw_id; 102 engine->mmio_base = info->mmio_base; 103 engine->irq_shift = info->irq_shift; 104 105 dev_priv->engine[id] = engine; 106 return 0; 107 } 108 109 /** 110 * intel_engines_init() - allocate, populate and init the Engine Command Streamers 111 * @dev: DRM device. 112 * 113 * Return: non-zero if the initialization failed. 114 */ 115 int intel_engines_init(struct drm_device *dev) 116 { 117 struct drm_i915_private *dev_priv = to_i915(dev); 118 struct intel_device_info *device_info = mkwrite_device_info(dev_priv); 119 unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; 120 unsigned int mask = 0; 121 int (*init)(struct intel_engine_cs *engine); 122 struct intel_engine_cs *engine; 123 enum intel_engine_id id; 124 unsigned int i; 125 int ret; 126 127 WARN_ON(ring_mask == 0); 128 WARN_ON(ring_mask & 129 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 130 131 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 132 if (!HAS_ENGINE(dev_priv, i)) 133 continue; 134 135 if (i915.enable_execlists) 136 init = intel_engines[i].init_execlists; 137 else 138 init = intel_engines[i].init_legacy; 139 140 if (!init) 141 continue; 142 143 ret = intel_engine_setup(dev_priv, i); 144 if (ret) 145 goto cleanup; 146 147 ret = init(dev_priv->engine[i]); 148 if (ret) 149 goto cleanup; 150 151 mask |= ENGINE_MASK(i); 152 } 153 154 /* 155 * Catch failures to update intel_engines table when the new engines 156 * are added to the driver by a warning and disabling the forgotten 157 * engines. 158 */ 159 if (WARN_ON(mask != ring_mask)) 160 device_info->ring_mask = mask; 161 162 device_info->num_rings = hweight32(mask); 163 164 return 0; 165 166 cleanup: 167 for_each_engine(engine, dev_priv, id) { 168 if (i915.enable_execlists) 169 intel_logical_ring_cleanup(engine); 170 else 171 intel_engine_cleanup(engine); 172 } 173 174 return ret; 175 } 176 177 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) 178 { 179 struct drm_i915_private *dev_priv = engine->i915; 180 181 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 182 * so long as the semaphore value in the register/page is greater 183 * than the sync value), so whenever we reset the seqno, 184 * so long as we reset the tracking semaphore value to 0, it will 185 * always be before the next request's seqno. If we don't reset 186 * the semaphore value, then when the seqno moves backwards all 187 * future waits will complete instantly (causing rendering corruption). 188 */ 189 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 190 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 191 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 192 if (HAS_VEBOX(dev_priv)) 193 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 194 } 195 if (dev_priv->semaphore) { 196 struct page *page = i915_vma_first_page(dev_priv->semaphore); 197 void *semaphores; 198 199 /* Semaphores are in noncoherent memory, flush to be safe */ 200 semaphores = kmap(page); 201 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 202 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); 203 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 204 I915_NUM_ENGINES * gen8_semaphore_seqno_size); 205 kunmap(page); 206 } 207 memset(engine->semaphore.sync_seqno, 0, 208 sizeof(engine->semaphore.sync_seqno)); 209 210 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 211 if (engine->irq_seqno_barrier) 212 engine->irq_seqno_barrier(engine); 213 engine->last_submitted_seqno = seqno; 214 215 engine->hangcheck.seqno = seqno; 216 217 /* After manually advancing the seqno, fake the interrupt in case 218 * there are any waiters for that seqno. 219 */ 220 intel_engine_wakeup(engine); 221 } 222 223 void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 224 { 225 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 226 } 227 228 static void intel_engine_init_requests(struct intel_engine_cs *engine) 229 { 230 init_request_active(&engine->last_request, NULL); 231 INIT_LIST_HEAD(&engine->request_list); 232 } 233 234 /** 235 * intel_engines_setup_common - setup engine state not requiring hw access 236 * @engine: Engine to setup. 237 * 238 * Initializes @engine@ structure members shared between legacy and execlists 239 * submission modes which do not require hardware access. 240 * 241 * Typically done early in the submission mode specific engine setup stage. 242 */ 243 void intel_engine_setup_common(struct intel_engine_cs *engine) 244 { 245 INIT_LIST_HEAD(&engine->execlist_queue); 246 lockinit(&engine->execlist_lock, "i915el", 0, 0); 247 248 engine->fence_context = dma_fence_context_alloc(1); 249 250 intel_engine_init_requests(engine); 251 intel_engine_init_hangcheck(engine); 252 i915_gem_batch_pool_init(engine, &engine->batch_pool); 253 254 intel_engine_init_cmd_parser(engine); 255 } 256 257 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) 258 { 259 struct drm_i915_gem_object *obj; 260 struct i915_vma *vma; 261 int ret; 262 263 WARN_ON(engine->scratch); 264 265 obj = i915_gem_object_create_stolen(&engine->i915->drm, size); 266 if (!obj) 267 obj = i915_gem_object_create(&engine->i915->drm, size); 268 if (IS_ERR(obj)) { 269 DRM_ERROR("Failed to allocate scratch page\n"); 270 return PTR_ERR(obj); 271 } 272 273 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); 274 if (IS_ERR(vma)) { 275 ret = PTR_ERR(vma); 276 goto err_unref; 277 } 278 279 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); 280 if (ret) 281 goto err_unref; 282 283 engine->scratch = vma; 284 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 285 engine->name, i915_ggtt_offset(vma)); 286 return 0; 287 288 err_unref: 289 i915_gem_object_put(obj); 290 return ret; 291 } 292 293 static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) 294 { 295 i915_vma_unpin_and_release(&engine->scratch); 296 } 297 298 /** 299 * intel_engines_init_common - initialize cengine state which might require hw access 300 * @engine: Engine to initialize. 301 * 302 * Initializes @engine@ structure members shared between legacy and execlists 303 * submission modes which do require hardware access. 304 * 305 * Typcally done at later stages of submission mode specific engine setup. 306 * 307 * Returns zero on success or an error code on failure. 308 */ 309 int intel_engine_init_common(struct intel_engine_cs *engine) 310 { 311 int ret; 312 313 ret = intel_engine_init_breadcrumbs(engine); 314 if (ret) 315 return ret; 316 317 return 0; 318 } 319 320 /** 321 * intel_engines_cleanup_common - cleans up the engine state created by 322 * the common initiailizers. 323 * @engine: Engine to cleanup. 324 * 325 * This cleans up everything created by the common helpers. 326 */ 327 void intel_engine_cleanup_common(struct intel_engine_cs *engine) 328 { 329 intel_engine_cleanup_scratch(engine); 330 331 intel_engine_fini_breadcrumbs(engine); 332 intel_engine_cleanup_cmd_parser(engine); 333 i915_gem_batch_pool_fini(&engine->batch_pool); 334 } 335 336 u64 intel_engine_get_active_head(struct intel_engine_cs *engine) 337 { 338 struct drm_i915_private *dev_priv = engine->i915; 339 u64 acthd; 340 341 if (INTEL_GEN(dev_priv) >= 8) 342 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 343 RING_ACTHD_UDW(engine->mmio_base)); 344 else if (INTEL_GEN(dev_priv) >= 4) 345 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 346 else 347 acthd = I915_READ(ACTHD); 348 349 return acthd; 350 } 351 352 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine) 353 { 354 struct drm_i915_private *dev_priv = engine->i915; 355 u64 bbaddr; 356 357 if (INTEL_GEN(dev_priv) >= 8) 358 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), 359 RING_BBADDR_UDW(engine->mmio_base)); 360 else 361 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 362 363 return bbaddr; 364 } 365 366 const char *i915_cache_level_str(struct drm_i915_private *i915, int type) 367 { 368 switch (type) { 369 case I915_CACHE_NONE: return " uncached"; 370 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; 371 case I915_CACHE_L3_LLC: return " L3+LLC"; 372 case I915_CACHE_WT: return " WT"; 373 default: return ""; 374 } 375 } 376 377 static inline uint32_t 378 read_subslice_reg(struct drm_i915_private *dev_priv, int slice, 379 int subslice, i915_reg_t reg) 380 { 381 uint32_t mcr; 382 uint32_t ret; 383 enum forcewake_domains fw_domains; 384 385 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, 386 FW_REG_READ); 387 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 388 GEN8_MCR_SELECTOR, 389 FW_REG_READ | FW_REG_WRITE); 390 391 spin_lock_irq(&dev_priv->uncore.lock); 392 intel_uncore_forcewake_get__locked(dev_priv, fw_domains); 393 394 mcr = I915_READ_FW(GEN8_MCR_SELECTOR); 395 /* 396 * The HW expects the slice and sublice selectors to be reset to 0 397 * after reading out the registers. 398 */ 399 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK)); 400 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK); 401 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); 402 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 403 404 ret = I915_READ_FW(reg); 405 406 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK); 407 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 408 409 intel_uncore_forcewake_put__locked(dev_priv, fw_domains); 410 spin_unlock_irq(&dev_priv->uncore.lock); 411 412 return ret; 413 } 414 415 /* NB: please notice the memset */ 416 void intel_engine_get_instdone(struct intel_engine_cs *engine, 417 struct intel_instdone *instdone) 418 { 419 struct drm_i915_private *dev_priv = engine->i915; 420 u32 mmio_base = engine->mmio_base; 421 int slice; 422 int subslice; 423 424 memset(instdone, 0, sizeof(*instdone)); 425 426 switch (INTEL_GEN(dev_priv)) { 427 default: 428 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 429 430 if (engine->id != RCS) 431 break; 432 433 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 434 for_each_instdone_slice_subslice(dev_priv, slice, subslice) { 435 instdone->sampler[slice][subslice] = 436 read_subslice_reg(dev_priv, slice, subslice, 437 GEN7_SAMPLER_INSTDONE); 438 instdone->row[slice][subslice] = 439 read_subslice_reg(dev_priv, slice, subslice, 440 GEN7_ROW_INSTDONE); 441 } 442 break; 443 case 7: 444 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 445 446 if (engine->id != RCS) 447 break; 448 449 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 450 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); 451 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); 452 453 break; 454 case 6: 455 case 5: 456 case 4: 457 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 458 459 if (engine->id == RCS) 460 /* HACK: Using the wrong struct member */ 461 instdone->slice_common = I915_READ(GEN4_INSTDONE1); 462 break; 463 case 3: 464 case 2: 465 instdone->instdone = I915_READ(GEN2_INSTDONE); 466 break; 467 } 468 } 469