1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifdef __DragonFly__ 31 #include "opt_drm.h" /* for VGA_SWITCHEROO */ 32 #endif 33 34 #include <linux/acpi.h> 35 #include <linux/device.h> 36 #include <linux/oom.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/pm.h> 40 #include <linux/pm_runtime.h> 41 #include <linux/pnp.h> 42 #include <linux/slab.h> 43 #include <linux/vgaarb.h> 44 #include <linux/vga_switcheroo.h> 45 #include <linux/vt.h> 46 #include <acpi/video.h> 47 48 #include <drm/drmP.h> 49 #include <drm/drm_crtc_helper.h> 50 #include <drm/drm_atomic_helper.h> 51 #include <drm/i915_drm.h> 52 53 #include "i915_drv.h" 54 #include "i915_trace.h" 55 #include "i915_vgpu.h" 56 #include "intel_drv.h" 57 #include "intel_uc.h" 58 59 static struct drm_driver driver; 60 61 static unsigned int i915_load_fail_count; 62 63 bool __i915_inject_load_failure(const char *func, int line) 64 { 65 if (i915_load_fail_count >= i915.inject_load_failure) 66 return false; 67 68 if (++i915_load_fail_count == i915.inject_load_failure) { 69 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", 70 i915.inject_load_failure, func, line); 71 return true; 72 } 73 74 return false; 75 } 76 77 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" 78 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ 79 "providing the dmesg log by booting with drm.debug=0xf" 80 81 void 82 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 83 const char *fmt, ...) 84 { 85 static bool shown_bug_once; 86 struct device *kdev = dev_priv->drm.dev; 87 bool is_error = level[1] <= KERN_ERR[1]; 88 bool is_debug = level[1] == KERN_DEBUG[1]; 89 struct va_format vaf; 90 va_list args; 91 92 if (is_debug && !(drm_debug & DRM_UT_DRIVER)) 93 return; 94 95 va_start(args, fmt); 96 97 vaf.fmt = fmt; 98 vaf.va = &args; 99 100 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", 101 __builtin_return_address(0), &vaf); 102 103 if (is_error && !shown_bug_once) { 104 dev_notice(kdev, "%s", FDO_BUG_MSG); 105 shown_bug_once = true; 106 } 107 108 va_end(args); 109 } 110 111 static bool i915_error_injected(struct drm_i915_private *dev_priv) 112 { 113 return i915.inject_load_failure && 114 i915_load_fail_count == i915.inject_load_failure; 115 } 116 117 #define i915_load_error(dev_priv, fmt, ...) \ 118 __i915_printk(dev_priv, \ 119 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 120 fmt, ##__VA_ARGS__) 121 122 123 static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) 124 { 125 enum intel_pch ret = PCH_NOP; 126 127 /* 128 * In a virtualized passthrough environment we can be in a 129 * setup where the ISA bridge is not able to be passed through. 130 * In this case, a south bridge can be emulated and we have to 131 * make an educated guess as to which PCH is really there. 132 */ 133 134 if (IS_GEN5(dev_priv)) { 135 ret = PCH_IBX; 136 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 137 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 138 ret = PCH_CPT; 139 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 140 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 141 ret = PCH_LPT; 142 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 143 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 144 ret = PCH_SPT; 145 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 146 } 147 148 return ret; 149 } 150 151 static void intel_detect_pch(struct drm_i915_private *dev_priv) 152 { 153 device_t pch = NULL; 154 struct pci_devinfo *di = NULL; 155 156 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 157 * (which really amounts to a PCH but no South Display). 158 */ 159 if (INTEL_INFO(dev_priv)->num_pipes == 0) { 160 dev_priv->pch_type = PCH_NOP; 161 return; 162 } 163 164 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 165 if (INTEL_INFO(dev_priv)->gen < 5) 166 return; 167 168 /* 169 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 170 * make graphics device passthrough work easy for VMM, that only 171 * need to expose ISA bridge to let driver know the real hardware 172 * underneath. This is a requirement from virtualization team. 173 * 174 * In some virtualized environments (e.g. XEN), there is irrelevant 175 * ISA bridge in the system. To work reliably, we should scan trhough 176 * all the ISA bridge devices and check for the first match, instead 177 * of only checking the first one. 178 */ 179 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 180 if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) { 181 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 182 dev_priv->pch_id = id; 183 184 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 185 dev_priv->pch_type = PCH_IBX; 186 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 187 WARN_ON(!IS_GEN5(dev_priv)); 188 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 189 dev_priv->pch_type = PCH_CPT; 190 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 191 WARN_ON(!(IS_GEN6(dev_priv) || 192 IS_IVYBRIDGE(dev_priv))); 193 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 194 /* PantherPoint is CPT compatible */ 195 dev_priv->pch_type = PCH_CPT; 196 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 197 WARN_ON(!(IS_GEN6(dev_priv) || 198 IS_IVYBRIDGE(dev_priv))); 199 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 200 dev_priv->pch_type = PCH_LPT; 201 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 202 WARN_ON(!IS_HASWELL(dev_priv) && 203 !IS_BROADWELL(dev_priv)); 204 WARN_ON(IS_HSW_ULT(dev_priv) || 205 IS_BDW_ULT(dev_priv)); 206 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 207 dev_priv->pch_type = PCH_LPT; 208 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 209 WARN_ON(!IS_HASWELL(dev_priv) && 210 !IS_BROADWELL(dev_priv)); 211 WARN_ON(!IS_HSW_ULT(dev_priv) && 212 !IS_BDW_ULT(dev_priv)); 213 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 214 dev_priv->pch_type = PCH_SPT; 215 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 216 WARN_ON(!IS_SKYLAKE(dev_priv) && 217 !IS_KABYLAKE(dev_priv)); 218 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 219 dev_priv->pch_type = PCH_SPT; 220 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 221 WARN_ON(!IS_SKYLAKE(dev_priv) && 222 !IS_KABYLAKE(dev_priv)); 223 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 224 dev_priv->pch_type = PCH_KBP; 225 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 226 WARN_ON(!IS_SKYLAKE(dev_priv) && 227 !IS_KABYLAKE(dev_priv)); 228 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 229 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 230 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 231 1)) { 232 dev_priv->pch_type = 233 intel_virt_detect_pch(dev_priv); 234 } else 235 continue; 236 237 break; 238 } 239 } 240 if (!pch) 241 DRM_DEBUG_KMS("No PCH found.\n"); 242 243 #if 0 244 pci_dev_put(pch); 245 #endif 246 } 247 248 static int i915_getparam(struct drm_device *dev, void *data, 249 struct drm_file *file_priv) 250 { 251 struct drm_i915_private *dev_priv = to_i915(dev); 252 struct pci_dev *pdev = dev_priv->drm.pdev; 253 drm_i915_getparam_t *param = data; 254 int value; 255 256 switch (param->param) { 257 case I915_PARAM_IRQ_ACTIVE: 258 case I915_PARAM_ALLOW_BATCHBUFFER: 259 case I915_PARAM_LAST_DISPATCH: 260 case I915_PARAM_HAS_EXEC_CONSTANTS: 261 /* Reject all old ums/dri params. */ 262 return -ENODEV; 263 case I915_PARAM_CHIPSET_ID: 264 value = pdev->device; 265 break; 266 case I915_PARAM_REVISION: 267 value = pdev->revision; 268 break; 269 case I915_PARAM_NUM_FENCES_AVAIL: 270 value = dev_priv->num_fence_regs; 271 break; 272 case I915_PARAM_HAS_OVERLAY: 273 value = dev_priv->overlay ? 1 : 0; 274 break; 275 case I915_PARAM_HAS_BSD: 276 value = !!dev_priv->engine[VCS]; 277 break; 278 case I915_PARAM_HAS_BLT: 279 value = !!dev_priv->engine[BCS]; 280 break; 281 case I915_PARAM_HAS_VEBOX: 282 value = !!dev_priv->engine[VECS]; 283 break; 284 case I915_PARAM_HAS_BSD2: 285 value = !!dev_priv->engine[VCS2]; 286 break; 287 case I915_PARAM_HAS_LLC: 288 value = HAS_LLC(dev_priv); 289 break; 290 case I915_PARAM_HAS_WT: 291 value = HAS_WT(dev_priv); 292 break; 293 case I915_PARAM_HAS_ALIASING_PPGTT: 294 value = USES_PPGTT(dev_priv); 295 break; 296 case I915_PARAM_HAS_SEMAPHORES: 297 value = i915.semaphores; 298 break; 299 #if 0 300 case I915_PARAM_HAS_SECURE_BATCHES: 301 value = capable(CAP_SYS_ADMIN); 302 break; 303 #endif 304 case I915_PARAM_CMD_PARSER_VERSION: 305 value = i915_cmd_parser_get_version(dev_priv); 306 break; 307 case I915_PARAM_SUBSLICE_TOTAL: 308 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); 309 if (!value) 310 return -ENODEV; 311 break; 312 case I915_PARAM_EU_TOTAL: 313 value = INTEL_INFO(dev_priv)->sseu.eu_total; 314 if (!value) 315 return -ENODEV; 316 break; 317 case I915_PARAM_HAS_GPU_RESET: 318 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); 319 break; 320 case I915_PARAM_HAS_RESOURCE_STREAMER: 321 value = HAS_RESOURCE_STREAMER(dev_priv); 322 break; 323 case I915_PARAM_HAS_POOLED_EU: 324 value = HAS_POOLED_EU(dev_priv); 325 break; 326 case I915_PARAM_MIN_EU_IN_POOL: 327 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; 328 break; 329 case I915_PARAM_HUC_STATUS: 330 intel_runtime_pm_get(dev_priv); 331 value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; 332 intel_runtime_pm_put(dev_priv); 333 break; 334 case I915_PARAM_MMAP_GTT_VERSION: 335 /* Though we've started our numbering from 1, and so class all 336 * earlier versions as 0, in effect their value is undefined as 337 * the ioctl will report EINVAL for the unknown param! 338 */ 339 value = i915_gem_mmap_gtt_version(); 340 break; 341 case I915_PARAM_HAS_SCHEDULER: 342 value = dev_priv->engine[RCS] && 343 dev_priv->engine[RCS]->schedule; 344 break; 345 #if 0 346 case I915_PARAM_MMAP_VERSION: 347 /* Remember to bump this if the version changes! */ 348 #endif 349 case I915_PARAM_HAS_GEM: 350 case I915_PARAM_HAS_PAGEFLIPPING: 351 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ 352 case I915_PARAM_HAS_RELAXED_FENCING: 353 case I915_PARAM_HAS_COHERENT_RINGS: 354 case I915_PARAM_HAS_RELAXED_DELTA: 355 case I915_PARAM_HAS_GEN7_SOL_RESET: 356 case I915_PARAM_HAS_WAIT_TIMEOUT: 357 #if 0 358 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 359 #endif 360 case I915_PARAM_HAS_PINNED_BATCHES: 361 case I915_PARAM_HAS_EXEC_NO_RELOC: 362 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 363 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 364 case I915_PARAM_HAS_EXEC_SOFTPIN: 365 case I915_PARAM_HAS_EXEC_ASYNC: 366 case I915_PARAM_HAS_EXEC_FENCE: 367 /* For the time being all of these are always true; 368 * if some supported hardware does not have one of these 369 * features this value needs to be provided from 370 * INTEL_INFO(), a feature macro, or similar. 371 */ 372 value = 1; 373 break; 374 default: 375 DRM_DEBUG("Unknown parameter %d\n", param->param); 376 return -EINVAL; 377 } 378 379 if (put_user(value, param->value)) 380 return -EFAULT; 381 382 return 0; 383 } 384 385 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) 386 { 387 static struct pci_dev i915_bridge_dev; 388 389 i915_bridge_dev.dev.bsddev = pci_find_dbsf(0, 0, 0, 0); 390 if (!i915_bridge_dev.dev.bsddev) { 391 DRM_ERROR("bridge device not found\n"); 392 return -1; 393 } 394 395 dev_priv->bridge_dev = &i915_bridge_dev; 396 return 0; 397 } 398 399 /* Allocate space for the MCH regs if needed, return nonzero on error */ 400 static int 401 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) 402 { 403 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 404 u32 temp_lo, temp_hi = 0; 405 u64 mchbar_addr; 406 device_t bsddev, vga; 407 408 if (INTEL_GEN(dev_priv) >= 4) 409 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 410 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 411 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 412 413 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 414 #ifdef CONFIG_PNP 415 if (mchbar_addr && 416 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 417 return 0; 418 #endif 419 420 /* Get some space for it */ 421 bsddev = dev_priv->bridge_dev->dev.bsddev; 422 vga = device_get_parent(bsddev); 423 dev_priv->mch_res_rid = 0x100; 424 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 425 bsddev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 426 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1); 427 if (dev_priv->mch_res == NULL) { 428 DRM_ERROR("failed mchbar resource alloc\n"); 429 return (-ENOMEM); 430 } 431 432 if (INTEL_GEN(dev_priv) >= 4) 433 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 434 upper_32_bits(rman_get_start(dev_priv->mch_res))); 435 436 pci_write_config_dword(dev_priv->bridge_dev, reg, 437 lower_32_bits(rman_get_start(dev_priv->mch_res))); 438 return 0; 439 } 440 441 /* Setup MCHBAR if possible, return true if we should disable it again */ 442 static void 443 intel_setup_mchbar(struct drm_i915_private *dev_priv) 444 { 445 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 446 u32 temp; 447 bool enabled; 448 449 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 450 return; 451 452 dev_priv->mchbar_need_disable = false; 453 454 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 455 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 456 enabled = !!(temp & DEVEN_MCHBAR_EN); 457 } else { 458 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 459 enabled = temp & 1; 460 } 461 462 /* If it's already enabled, don't have to do anything */ 463 if (enabled) 464 return; 465 466 if (intel_alloc_mchbar_resource(dev_priv)) 467 return; 468 469 dev_priv->mchbar_need_disable = true; 470 471 /* Space is allocated or reserved, so enable it. */ 472 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 473 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 474 temp | DEVEN_MCHBAR_EN); 475 } else { 476 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 477 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 478 } 479 } 480 481 static void 482 intel_teardown_mchbar(struct drm_i915_private *dev_priv) 483 { 484 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 485 device_t bsddev, vga; 486 487 if (dev_priv->mchbar_need_disable) { 488 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 489 u32 deven_val; 490 491 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 492 &deven_val); 493 deven_val &= ~DEVEN_MCHBAR_EN; 494 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 495 deven_val); 496 } else { 497 u32 mchbar_val; 498 499 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 500 &mchbar_val); 501 mchbar_val &= ~1; 502 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 503 mchbar_val); 504 } 505 } 506 507 bsddev = dev_priv->bridge_dev->dev.bsddev; 508 if (dev_priv->mch_res != NULL) { 509 vga = device_get_parent(bsddev); 510 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), bsddev, 511 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 512 BUS_RELEASE_RESOURCE(device_get_parent(vga), bsddev, 513 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 514 dev_priv->mch_res = NULL; 515 } 516 } 517 518 #if 0 519 /* true = enable decode, false = disable decoder */ 520 static unsigned int i915_vga_set_decode(void *cookie, bool state) 521 { 522 struct drm_i915_private *dev_priv = cookie; 523 524 intel_modeset_vga_set_state(dev_priv, state); 525 if (state) 526 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 527 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 528 else 529 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 530 } 531 532 static int i915_resume_switcheroo(struct drm_device *dev); 533 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 534 535 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 536 { 537 struct drm_device *dev = pci_get_drvdata(pdev); 538 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 539 540 if (state == VGA_SWITCHEROO_ON) { 541 pr_info("switched on\n"); 542 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 543 /* i915 resume handler doesn't set to D0 */ 544 pci_set_power_state(pdev, PCI_D0); 545 i915_resume_switcheroo(dev); 546 dev->switch_power_state = DRM_SWITCH_POWER_ON; 547 } else { 548 pr_info("switched off\n"); 549 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 550 i915_suspend_switcheroo(dev, pmm); 551 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 552 } 553 } 554 555 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 556 { 557 struct drm_device *dev = pci_get_drvdata(pdev); 558 559 /* 560 * FIXME: open_count is protected by drm_global_mutex but that would lead to 561 * locking inversion with the driver load path. And the access here is 562 * completely racy anyway. So don't bother with locking for now. 563 */ 564 return dev->open_count == 0; 565 } 566 567 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 568 .set_gpu_state = i915_switcheroo_set_state, 569 .reprobe = NULL, 570 .can_switch = i915_switcheroo_can_switch, 571 }; 572 #endif 573 574 static void i915_gem_fini(struct drm_i915_private *dev_priv) 575 { 576 mutex_lock(&dev_priv->drm.struct_mutex); 577 intel_uc_fini_hw(dev_priv); 578 i915_gem_cleanup_engines(dev_priv); 579 i915_gem_context_fini(dev_priv); 580 mutex_unlock(&dev_priv->drm.struct_mutex); 581 582 i915_gem_drain_freed_objects(dev_priv); 583 584 WARN_ON(!list_empty(&dev_priv->context_list)); 585 } 586 587 static int i915_load_modeset_init(struct drm_device *dev) 588 { 589 struct drm_i915_private *dev_priv = to_i915(dev); 590 int ret; 591 592 if (i915_inject_load_failure()) 593 return -ENODEV; 594 595 intel_bios_init(dev_priv); 596 597 /* If we have > 1 VGA cards, then we need to arbitrate access 598 * to the common VGA resources. 599 * 600 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 601 * then we do not take part in VGA arbitration and the 602 * vga_client_register() fails with -ENODEV. 603 */ 604 #if 0 605 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); 606 if (ret && ret != -ENODEV) 607 goto out; 608 609 intel_register_dsm_handler(); 610 611 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); 612 if (ret) 613 goto cleanup_vga_client; 614 #endif 615 616 /* must happen before intel_power_domains_init_hw() on VLV/CHV */ 617 intel_update_rawclk(dev_priv); 618 619 intel_power_domains_init_hw(dev_priv, false); 620 621 intel_csr_ucode_init(dev_priv); 622 623 ret = intel_irq_install(dev_priv); 624 if (ret) 625 goto cleanup_csr; 626 627 intel_setup_gmbus(dev_priv); 628 629 /* Important: The output setup functions called by modeset_init need 630 * working irqs for e.g. gmbus and dp aux transfers. */ 631 ret = intel_modeset_init(dev); 632 if (ret) 633 goto cleanup_irq; 634 635 intel_uc_init_fw(dev_priv); 636 637 ret = i915_gem_init(dev_priv); 638 if (ret) 639 goto cleanup_uc; 640 641 intel_modeset_gem_init(dev); 642 643 if (INTEL_INFO(dev_priv)->num_pipes == 0) 644 return 0; 645 646 ret = intel_fbdev_init(dev); 647 if (ret) 648 goto cleanup_gem; 649 650 /* Only enable hotplug handling once the fbdev is fully set up. */ 651 intel_hpd_init(dev_priv); 652 653 drm_kms_helper_poll_init(dev); 654 655 #ifdef __DragonFly__ 656 /* 657 * If we are dealing with dual GPU machines the vga_switcheroo module 658 * has been loaded. Machines with dual GPUs have an integrated graphics 659 * device (IGD), which we assume is an Intel device. The other, the 660 * discrete device (DIS), is either an NVidia or a Radeon device. For 661 * now we will force switch the gmux so the intel driver outputs 662 * both to the laptop panel and the external monitor. 663 * 664 * DragonFly does not have an nvidia native driver yet. In the future, 665 * we will check for the radeon device: if present, we will leave 666 * the gmux switch as it is, so the user can choose between the IGD and 667 * the DIS using the /dev/vga_switcheroo device. 668 */ 669 if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { 670 ret = vga_switcheroo_force_migd(); 671 if (ret) { 672 DRM_INFO("could not switch gmux to IGD\n"); 673 } 674 } 675 #endif 676 677 return 0; 678 679 cleanup_gem: 680 if (i915_gem_suspend(dev_priv)) 681 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 682 i915_gem_fini(dev_priv); 683 cleanup_uc: 684 intel_uc_fini_fw(dev_priv); 685 cleanup_irq: 686 drm_irq_uninstall(dev); 687 intel_teardown_gmbus(dev_priv); 688 cleanup_csr: 689 intel_csr_ucode_fini(dev_priv); 690 intel_power_domains_fini(dev_priv); 691 #if 0 692 vga_switcheroo_unregister_client(pdev); 693 cleanup_vga_client: 694 vga_client_register(pdev, NULL, NULL, NULL); 695 out: 696 #endif 697 return ret; 698 } 699 700 #ifdef __DragonFly__ 701 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 702 { 703 return 0; 704 } 705 #else 706 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 707 { 708 struct apertures_struct *ap; 709 struct pci_dev *pdev = dev_priv->drm.pdev; 710 struct i915_ggtt *ggtt = &dev_priv->ggtt; 711 bool primary; 712 int ret; 713 714 ap = alloc_apertures(1); 715 if (!ap) 716 return -ENOMEM; 717 718 ap->ranges[0].base = ggtt->mappable_base; 719 ap->ranges[0].size = ggtt->mappable_end; 720 721 primary = 722 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 723 724 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 725 726 kfree(ap); 727 728 return ret; 729 } 730 #endif 731 732 #if !defined(CONFIG_VGA_CONSOLE) 733 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 734 { 735 return 0; 736 } 737 #elif !defined(CONFIG_DUMMY_CONSOLE) 738 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 739 { 740 return -ENODEV; 741 } 742 #else 743 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 744 { 745 int ret = 0; 746 747 DRM_INFO("Replacing VGA console driver\n"); 748 749 console_lock(); 750 if (con_is_bound(&vga_con)) 751 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 752 if (ret == 0) { 753 ret = do_unregister_con_driver(&vga_con); 754 755 /* Ignore "already unregistered". */ 756 if (ret == -ENODEV) 757 ret = 0; 758 } 759 console_unlock(); 760 761 return ret; 762 } 763 #endif 764 765 static void intel_init_dpio(struct drm_i915_private *dev_priv) 766 { 767 /* 768 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 769 * CHV x1 PHY (DP/HDMI D) 770 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 771 */ 772 if (IS_CHERRYVIEW(dev_priv)) { 773 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 774 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 775 } else if (IS_VALLEYVIEW(dev_priv)) { 776 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 777 } 778 } 779 780 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 781 { 782 /* 783 * The i915 workqueue is primarily used for batched retirement of 784 * requests (and thus managing bo) once the task has been completed 785 * by the GPU. i915_gem_retire_requests() is called directly when we 786 * need high-priority retirement, such as waiting for an explicit 787 * bo. 788 * 789 * It is also used for periodic low-priority events, such as 790 * idle-timers and recording error state. 791 * 792 * All tasks on the workqueue are expected to acquire the dev mutex 793 * so there is no point in running more than one instance of the 794 * workqueue at any time. Use an ordered one. 795 */ 796 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 797 if (dev_priv->wq == NULL) 798 goto out_err; 799 800 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 801 if (dev_priv->hotplug.dp_wq == NULL) 802 goto out_free_wq; 803 804 return 0; 805 806 out_free_wq: 807 destroy_workqueue(dev_priv->wq); 808 out_err: 809 DRM_ERROR("Failed to allocate workqueues.\n"); 810 811 return -ENOMEM; 812 } 813 814 static void i915_engines_cleanup(struct drm_i915_private *i915) 815 { 816 struct intel_engine_cs *engine; 817 enum intel_engine_id id; 818 819 for_each_engine(engine, i915, id) 820 kfree(engine); 821 } 822 823 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 824 { 825 destroy_workqueue(dev_priv->hotplug.dp_wq); 826 destroy_workqueue(dev_priv->wq); 827 } 828 829 /* 830 * We don't keep the workarounds for pre-production hardware, so we expect our 831 * driver to fail on these machines in one way or another. A little warning on 832 * dmesg may help both the user and the bug triagers. 833 */ 834 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 835 { 836 bool pre = false; 837 838 pre |= IS_HSW_EARLY_SDV(dev_priv); 839 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); 840 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); 841 842 if (pre) { 843 DRM_ERROR("This is a pre-production stepping. " 844 "It may not be fully functional.\n"); 845 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); 846 } 847 } 848 849 /** 850 * i915_driver_init_early - setup state not requiring device access 851 * @dev_priv: device private 852 * 853 * Initialize everything that is a "SW-only" state, that is state not 854 * requiring accessing the device or exposing the driver via kernel internal 855 * or userspace interfaces. Example steps belonging here: lock initialization, 856 * system memory allocation, setting up device specific attributes and 857 * function hooks not requiring accessing the device. 858 */ 859 static int i915_driver_init_early(struct drm_i915_private *dev_priv, 860 const struct pci_device_id *ent) 861 { 862 const struct intel_device_info *match_info = 863 (struct intel_device_info *)ent->driver_data; 864 struct intel_device_info *device_info; 865 int ret = 0; 866 867 if (i915_inject_load_failure()) 868 return -ENODEV; 869 870 /* Setup the write-once "constant" device info */ 871 device_info = mkwrite_device_info(dev_priv); 872 memcpy(device_info, match_info, sizeof(*device_info)); 873 device_info->device_id = dev_priv->drm.pdev->device; 874 875 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 876 device_info->gen_mask = BIT(device_info->gen - 1); 877 878 lockinit(&dev_priv->irq_lock, "userirq", 0, 0); 879 lockinit(&dev_priv->gpu_error.lock, "915err", 0, 0); 880 lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE); 881 lockinit(&dev_priv->uncore.lock, "915gt", 0, 0); 882 883 lockinit(&dev_priv->mm.object_stat_lock, "i915osl", 0, 0); 884 lockinit(&dev_priv->mmio_flip_lock, "i915mfl", 0, 0); 885 lockinit(&dev_priv->sb_lock, "i915sbl", 0, LK_CANRECURSE); 886 lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE); 887 lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE); 888 lockinit(&dev_priv->wm.wm_mutex, "i915wm", 0, LK_CANRECURSE); 889 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 890 891 intel_uc_init_early(dev_priv); 892 i915_memcpy_init_early(dev_priv); 893 894 ret = intel_engines_init_early(dev_priv); 895 if (ret) 896 return ret; 897 898 ret = i915_workqueues_init(dev_priv); 899 if (ret < 0) 900 goto err_engines; 901 902 /* This must be called before any calls to HAS_PCH_* */ 903 intel_detect_pch(dev_priv); 904 905 intel_pm_setup(dev_priv); 906 intel_init_dpio(dev_priv); 907 intel_power_domains_init(dev_priv); 908 intel_irq_init(dev_priv); 909 intel_hangcheck_init(dev_priv); 910 intel_init_display_hooks(dev_priv); 911 intel_init_clock_gating_hooks(dev_priv); 912 intel_init_audio_hooks(dev_priv); 913 ret = i915_gem_load_init(dev_priv); 914 if (ret < 0) 915 goto err_workqueues; 916 917 intel_display_crc_init(dev_priv); 918 919 intel_device_info_dump(dev_priv); 920 921 intel_detect_preproduction_hw(dev_priv); 922 923 i915_perf_init(dev_priv); 924 925 return 0; 926 927 err_workqueues: 928 i915_workqueues_cleanup(dev_priv); 929 err_engines: 930 i915_engines_cleanup(dev_priv); 931 return ret; 932 } 933 934 /** 935 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() 936 * @dev_priv: device private 937 */ 938 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) 939 { 940 i915_perf_fini(dev_priv); 941 i915_gem_load_cleanup(dev_priv); 942 i915_workqueues_cleanup(dev_priv); 943 i915_engines_cleanup(dev_priv); 944 } 945 946 static int i915_mmio_setup(struct drm_i915_private *dev_priv) 947 { 948 struct pci_dev *pdev = dev_priv->drm.pdev; 949 int mmio_bar; 950 int mmio_size; 951 952 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; 953 /* 954 * Before gen4, the registers and the GTT are behind different BARs. 955 * However, from gen4 onwards, the registers and the GTT are shared 956 * in the same BAR, so we want to restrict this ioremap from 957 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 958 * the register BAR remains the same size for all the earlier 959 * generations up to Ironlake. 960 */ 961 if (INTEL_GEN(dev_priv) < 5) 962 mmio_size = 512 * 1024; 963 else 964 mmio_size = 2 * 1024 * 1024; 965 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); 966 if (dev_priv->regs == NULL) { 967 DRM_ERROR("failed to map registers\n"); 968 969 return -EIO; 970 } 971 972 /* Try to make sure MCHBAR is enabled before poking at it */ 973 intel_setup_mchbar(dev_priv); 974 975 return 0; 976 } 977 978 static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) 979 { 980 #if 0 981 struct pci_dev *pdev = dev_priv->drm.pdev; 982 #endif 983 984 intel_teardown_mchbar(dev_priv); 985 #if 0 986 pci_iounmap(pdev, dev_priv->regs); 987 #endif 988 } 989 990 /** 991 * i915_driver_init_mmio - setup device MMIO 992 * @dev_priv: device private 993 * 994 * Setup minimal device state necessary for MMIO accesses later in the 995 * initialization sequence. The setup here should avoid any other device-wide 996 * side effects or exposing the driver via kernel internal or user space 997 * interfaces. 998 */ 999 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) 1000 { 1001 int ret; 1002 1003 if (i915_inject_load_failure()) 1004 return -ENODEV; 1005 1006 if (i915_get_bridge_dev(dev_priv)) 1007 return -EIO; 1008 1009 ret = i915_mmio_setup(dev_priv); 1010 if (ret < 0) 1011 goto put_bridge; 1012 1013 intel_uncore_init(dev_priv); 1014 i915_gem_init_mmio(dev_priv); 1015 1016 return 0; 1017 1018 put_bridge: 1019 pci_dev_put(dev_priv->bridge_dev); 1020 1021 return ret; 1022 } 1023 1024 /** 1025 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() 1026 * @dev_priv: device private 1027 */ 1028 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) 1029 { 1030 intel_uncore_fini(dev_priv); 1031 i915_mmio_cleanup(dev_priv); 1032 pci_dev_put(dev_priv->bridge_dev); 1033 } 1034 1035 static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1036 { 1037 i915.enable_execlists = 1038 intel_sanitize_enable_execlists(dev_priv, 1039 i915.enable_execlists); 1040 1041 /* 1042 * i915.enable_ppgtt is read-only, so do an early pass to validate the 1043 * user's requested state against the hardware/driver capabilities. We 1044 * do this now so that we can print out any log messages once rather 1045 * than every time we check intel_enable_ppgtt(). 1046 */ 1047 i915.enable_ppgtt = 1048 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); 1049 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 1050 1051 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); 1052 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); 1053 1054 intel_uc_sanitize_options(dev_priv); 1055 } 1056 1057 /** 1058 * i915_driver_init_hw - setup state requiring device access 1059 * @dev_priv: device private 1060 * 1061 * Setup state that requires accessing the device, but doesn't require 1062 * exposing the driver via kernel internal or userspace interfaces. 1063 */ 1064 static int i915_driver_init_hw(struct drm_i915_private *dev_priv) 1065 { 1066 struct pci_dev *pdev = dev_priv->drm.pdev; 1067 int ret; 1068 1069 if (i915_inject_load_failure()) 1070 return -ENODEV; 1071 1072 intel_device_info_runtime_init(dev_priv); 1073 1074 intel_sanitize_options(dev_priv); 1075 1076 ret = i915_ggtt_probe_hw(dev_priv); 1077 if (ret) 1078 return ret; 1079 1080 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1081 * otherwise the vga fbdev driver falls over. */ 1082 ret = i915_kick_out_firmware_fb(dev_priv); 1083 if (ret) { 1084 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1085 goto out_ggtt; 1086 } 1087 1088 ret = i915_kick_out_vgacon(dev_priv); 1089 if (ret) { 1090 DRM_ERROR("failed to remove conflicting VGA console\n"); 1091 goto out_ggtt; 1092 } 1093 1094 ret = i915_ggtt_init_hw(dev_priv); 1095 if (ret) 1096 return ret; 1097 1098 ret = i915_ggtt_enable_hw(dev_priv); 1099 if (ret) { 1100 DRM_ERROR("failed to enable GGTT\n"); 1101 goto out_ggtt; 1102 } 1103 1104 pci_set_master(pdev); 1105 1106 #if 0 1107 /* overlay on gen2 is broken and can't address above 1G */ 1108 if (IS_GEN2(dev_priv)) { 1109 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1110 if (ret) { 1111 DRM_ERROR("failed to set DMA mask\n"); 1112 1113 goto out_ggtt; 1114 } 1115 } 1116 1117 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1118 * using 32bit addressing, overwriting memory if HWS is located 1119 * above 4GB. 1120 * 1121 * The documentation also mentions an issue with undefined 1122 * behaviour if any general state is accessed within a page above 4GB, 1123 * which also needs to be handled carefully. 1124 */ 1125 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { 1126 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1127 1128 if (ret) { 1129 DRM_ERROR("failed to set DMA mask\n"); 1130 1131 goto out_ggtt; 1132 } 1133 } 1134 #endif 1135 1136 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1137 PM_QOS_DEFAULT_VALUE); 1138 1139 intel_uncore_sanitize(dev_priv); 1140 1141 intel_opregion_setup(dev_priv); 1142 1143 i915_gem_load_init_fences(dev_priv); 1144 1145 /* On the 945G/GM, the chipset reports the MSI capability on the 1146 * integrated graphics even though the support isn't actually there 1147 * according to the published specs. It doesn't appear to function 1148 * correctly in testing on 945G. 1149 * This may be a side effect of MSI having been made available for PEG 1150 * and the registers being closely associated. 1151 * 1152 * According to chipset errata, on the 965GM, MSI interrupts may 1153 * be lost or delayed, and was defeatured. MSI interrupts seem to 1154 * get lost on g4x as well, and interrupt delivery seems to stay 1155 * properly dead afterwards. So we'll just disable them for all 1156 * pre-gen5 chipsets. 1157 */ 1158 if (INTEL_GEN(dev_priv) >= 5) { 1159 #if 0 1160 if (pci_enable_msi(pdev) < 0) 1161 DRM_DEBUG_DRIVER("can't enable MSI"); 1162 #endif 1163 } 1164 1165 return 0; 1166 1167 ret = intel_gvt_init(dev_priv); 1168 if (ret) 1169 goto out_ggtt; 1170 1171 out_ggtt: 1172 i915_ggtt_cleanup_hw(dev_priv); 1173 1174 return ret; 1175 } 1176 1177 /** 1178 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() 1179 * @dev_priv: device private 1180 */ 1181 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) 1182 { 1183 #if 0 1184 struct pci_dev *pdev = dev_priv->drm.pdev; 1185 1186 if (pdev->msi_enabled) 1187 pci_disable_msi(pdev); 1188 #endif 1189 1190 pm_qos_remove_request(&dev_priv->pm_qos); 1191 i915_ggtt_cleanup_hw(dev_priv); 1192 } 1193 1194 /** 1195 * i915_driver_register - register the driver with the rest of the system 1196 * @dev_priv: device private 1197 * 1198 * Perform any steps necessary to make the driver available via kernel 1199 * internal or userspace interfaces. 1200 */ 1201 static void i915_driver_register(struct drm_i915_private *dev_priv) 1202 { 1203 struct drm_device *dev = &dev_priv->drm; 1204 1205 i915_gem_shrinker_init(dev_priv); 1206 1207 /* 1208 * Notify a valid surface after modesetting, 1209 * when running inside a VM. 1210 */ 1211 if (intel_vgpu_active(dev_priv)) 1212 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1213 1214 /* Reveal our presence to userspace */ 1215 if (drm_dev_register(dev, 0) == 0) { 1216 i915_debugfs_register(dev_priv); 1217 i915_guc_log_register(dev_priv); 1218 i915_setup_sysfs(dev_priv); 1219 1220 /* Depends on sysfs having been initialized */ 1221 i915_perf_register(dev_priv); 1222 } else 1223 DRM_ERROR("Failed to register driver for userspace access!\n"); 1224 1225 if (INTEL_INFO(dev_priv)->num_pipes) { 1226 /* Must be done after probing outputs */ 1227 intel_opregion_register(dev_priv); 1228 acpi_video_register(); 1229 } 1230 1231 if (IS_GEN5(dev_priv)) 1232 intel_gpu_ips_init(dev_priv); 1233 1234 intel_audio_init(dev_priv); 1235 1236 /* 1237 * Some ports require correctly set-up hpd registers for detection to 1238 * work properly (leading to ghost connected connector status), e.g. VGA 1239 * on gm45. Hence we can only set up the initial fbdev config after hpd 1240 * irqs are fully enabled. We do it last so that the async config 1241 * cannot run before the connectors are registered. 1242 */ 1243 intel_fbdev_initial_config_async(dev); 1244 } 1245 1246 /** 1247 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 1248 * @dev_priv: device private 1249 */ 1250 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 1251 { 1252 intel_audio_deinit(dev_priv); 1253 1254 intel_gpu_ips_teardown(); 1255 acpi_video_unregister(); 1256 intel_opregion_unregister(dev_priv); 1257 1258 i915_perf_unregister(dev_priv); 1259 1260 i915_teardown_sysfs(dev_priv); 1261 i915_guc_log_unregister(dev_priv); 1262 drm_dev_unregister(&dev_priv->drm); 1263 1264 i915_gem_shrinker_cleanup(dev_priv); 1265 } 1266 1267 /** 1268 * i915_driver_load - setup chip and create an initial config 1269 * @pdev: PCI device 1270 * @ent: matching PCI ID entry 1271 * 1272 * The driver load routine has to do several things: 1273 * - drive output discovery via intel_modeset_init() 1274 * - initialize the memory manager 1275 * - allocate initial config memory 1276 * - setup the DRM framebuffer with the allocated memory 1277 */ 1278 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) 1279 { 1280 const struct intel_device_info *match_info = 1281 (struct intel_device_info *)ent->driver_data; 1282 struct drm_i915_private *dev_priv; 1283 int ret; 1284 1285 /* Enable nuclear pageflip on ILK+, except vlv/chv */ 1286 if (!i915.nuclear_pageflip && 1287 (match_info->gen < 5 || match_info->has_gmch_display)) 1288 driver.driver_features &= ~DRIVER_ATOMIC; 1289 1290 ret = -ENOMEM; 1291 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 1292 if (dev_priv) 1293 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); 1294 if (ret) { 1295 DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); 1296 goto out_free; 1297 } 1298 1299 dev_priv->drm.pdev = pdev; 1300 dev_priv->drm.dev_private = dev_priv; 1301 1302 #if 0 1303 ret = pci_enable_device(pdev); 1304 if (ret) 1305 goto out_fini; 1306 #endif 1307 1308 pci_set_drvdata(pdev, &dev_priv->drm); 1309 /* 1310 * Disable the system suspend direct complete optimization, which can 1311 * leave the device suspended skipping the driver's suspend handlers 1312 * if the device was already runtime suspended. This is needed due to 1313 * the difference in our runtime and system suspend sequence and 1314 * becaue the HDA driver may require us to enable the audio power 1315 * domain during system suspend. 1316 */ 1317 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 1318 1319 ret = i915_driver_init_early(dev_priv, ent); 1320 if (ret < 0) 1321 goto out_pci_disable; 1322 1323 intel_runtime_pm_get(dev_priv); 1324 1325 ret = i915_driver_init_mmio(dev_priv); 1326 if (ret < 0) 1327 goto out_runtime_pm_put; 1328 1329 ret = i915_driver_init_hw(dev_priv); 1330 if (ret < 0) 1331 goto out_cleanup_mmio; 1332 1333 /* 1334 * TODO: move the vblank init and parts of modeset init steps into one 1335 * of the i915_driver_init_/i915_driver_register functions according 1336 * to the role/effect of the given init step. 1337 */ 1338 if (INTEL_INFO(dev_priv)->num_pipes) { 1339 ret = drm_vblank_init(&dev_priv->drm, 1340 INTEL_INFO(dev_priv)->num_pipes); 1341 if (ret) 1342 goto out_cleanup_hw; 1343 } 1344 1345 ret = i915_load_modeset_init(&dev_priv->drm); 1346 if (ret < 0) 1347 goto out_cleanup_vblank; 1348 1349 i915_driver_register(dev_priv); 1350 1351 intel_runtime_pm_enable(dev_priv); 1352 1353 dev_priv->ipc_enabled = false; 1354 1355 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1356 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1357 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1358 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); 1359 1360 intel_runtime_pm_put(dev_priv); 1361 1362 return 0; 1363 1364 out_cleanup_vblank: 1365 drm_vblank_cleanup(&dev_priv->drm); 1366 out_cleanup_hw: 1367 i915_driver_cleanup_hw(dev_priv); 1368 out_cleanup_mmio: 1369 i915_driver_cleanup_mmio(dev_priv); 1370 out_runtime_pm_put: 1371 intel_runtime_pm_put(dev_priv); 1372 i915_driver_cleanup_early(dev_priv); 1373 out_pci_disable: 1374 #if 0 1375 pci_disable_device(pdev); 1376 out_fini: 1377 #endif 1378 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); 1379 drm_dev_fini(&dev_priv->drm); 1380 out_free: 1381 kfree(dev_priv); 1382 return ret; 1383 } 1384 1385 void i915_driver_unload(struct drm_device *dev) 1386 { 1387 struct drm_i915_private *dev_priv = to_i915(dev); 1388 struct drm_modeset_acquire_ctx ctx; 1389 int ret; 1390 1391 intel_fbdev_fini(dev); 1392 1393 if (i915_gem_suspend(dev_priv)) 1394 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 1395 1396 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1397 1398 drm_modeset_acquire_init(&ctx, 0); 1399 while (1) { 1400 ret = drm_modeset_lock_all_ctx(dev, &ctx); 1401 if (!ret) 1402 ret = drm_atomic_helper_disable_all(dev, &ctx); 1403 1404 if (ret != -EDEADLK) 1405 break; 1406 1407 drm_modeset_backoff(&ctx); 1408 } 1409 1410 if (ret) 1411 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 1412 1413 drm_modeset_drop_locks(&ctx); 1414 drm_modeset_acquire_fini(&ctx); 1415 1416 intel_gvt_cleanup(dev_priv); 1417 1418 i915_driver_unregister(dev_priv); 1419 1420 drm_vblank_cleanup(dev); 1421 1422 intel_modeset_cleanup(dev); 1423 1424 /* 1425 * free the memory space allocated for the child device 1426 * config parsed from VBT 1427 */ 1428 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1429 kfree(dev_priv->vbt.child_dev); 1430 dev_priv->vbt.child_dev = NULL; 1431 dev_priv->vbt.child_dev_num = 0; 1432 } 1433 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1434 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1435 kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1436 dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1437 1438 #if 0 1439 vga_switcheroo_unregister_client(pdev); 1440 vga_client_register(pdev, NULL, NULL, NULL); 1441 #endif 1442 1443 intel_csr_ucode_fini(dev_priv); 1444 1445 /* Free error state after interrupts are fully disabled. */ 1446 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1447 i915_reset_error_state(dev_priv); 1448 1449 /* Flush any outstanding unpin_work. */ 1450 drain_workqueue(dev_priv->wq); 1451 1452 i915_gem_fini(dev_priv); 1453 intel_uc_fini_fw(dev_priv); 1454 intel_fbc_cleanup_cfb(dev_priv); 1455 1456 intel_power_domains_fini(dev_priv); 1457 1458 i915_driver_cleanup_hw(dev_priv); 1459 i915_driver_cleanup_mmio(dev_priv); 1460 1461 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1462 } 1463 1464 static void i915_driver_release(struct drm_device *dev) 1465 { 1466 struct drm_i915_private *dev_priv = to_i915(dev); 1467 1468 i915_driver_cleanup_early(dev_priv); 1469 drm_dev_fini(&dev_priv->drm); 1470 1471 kfree(dev_priv); 1472 } 1473 1474 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1475 { 1476 int ret; 1477 1478 ret = i915_gem_open(dev, file); 1479 if (ret) 1480 return ret; 1481 1482 return 0; 1483 } 1484 1485 /** 1486 * i915_driver_lastclose - clean up after all DRM clients have exited 1487 * @dev: DRM device 1488 * 1489 * Take care of cleaning up after all DRM clients have exited. In the 1490 * mode setting case, we want to restore the kernel's initial mode (just 1491 * in case the last client left us in a bad state). 1492 * 1493 * Additionally, in the non-mode setting case, we'll tear down the GTT 1494 * and DMA structures, since the kernel won't be using them, and clea 1495 * up any GEM state. 1496 */ 1497 static void i915_driver_lastclose(struct drm_device *dev) 1498 { 1499 intel_fbdev_restore_mode(dev); 1500 #if 0 1501 vga_switcheroo_process_delayed_switch(); 1502 #endif 1503 } 1504 1505 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1506 { 1507 struct drm_i915_file_private *file_priv = file->driver_priv; 1508 1509 mutex_lock(&dev->struct_mutex); 1510 i915_gem_context_close(dev, file); 1511 i915_gem_release(dev, file); 1512 mutex_unlock(&dev->struct_mutex); 1513 1514 kfree(file_priv); 1515 } 1516 1517 #if 0 1518 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1519 { 1520 struct drm_device *dev = &dev_priv->drm; 1521 struct intel_encoder *encoder; 1522 1523 drm_modeset_lock_all(dev); 1524 for_each_intel_encoder(dev, encoder) 1525 if (encoder->suspend) 1526 encoder->suspend(encoder); 1527 drm_modeset_unlock_all(dev); 1528 } 1529 1530 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1531 bool rpm_resume); 1532 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 1533 1534 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1535 { 1536 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1537 if (acpi_target_system_state() < ACPI_STATE_S3) 1538 return true; 1539 #endif 1540 return false; 1541 } 1542 1543 static int i915_drm_suspend(struct drm_device *dev) 1544 { 1545 struct drm_i915_private *dev_priv = to_i915(dev); 1546 struct pci_dev *pdev = dev_priv->drm.pdev; 1547 pci_power_t opregion_target_state; 1548 int error; 1549 1550 /* ignore lid events during suspend */ 1551 mutex_lock(&dev_priv->modeset_restore_lock); 1552 dev_priv->modeset_restore = MODESET_SUSPENDED; 1553 mutex_unlock(&dev_priv->modeset_restore_lock); 1554 1555 disable_rpm_wakeref_asserts(dev_priv); 1556 1557 /* We do a lot of poking in a lot of registers, make sure they work 1558 * properly. */ 1559 intel_display_set_init_power(dev_priv, true); 1560 1561 drm_kms_helper_poll_disable(dev); 1562 1563 pci_save_state(pdev); 1564 1565 error = i915_gem_suspend(dev_priv); 1566 if (error) { 1567 dev_err(&pdev->dev, 1568 "GEM idle failed, resume might fail\n"); 1569 goto out; 1570 } 1571 1572 intel_display_suspend(dev); 1573 1574 intel_dp_mst_suspend(dev); 1575 1576 intel_runtime_pm_disable_interrupts(dev_priv); 1577 intel_hpd_cancel_work(dev_priv); 1578 1579 intel_suspend_encoders(dev_priv); 1580 1581 intel_suspend_hw(dev_priv); 1582 1583 i915_gem_suspend_gtt_mappings(dev_priv); 1584 1585 i915_save_state(dev_priv); 1586 1587 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1588 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1589 1590 intel_uncore_suspend(dev_priv); 1591 intel_opregion_unregister(dev_priv); 1592 1593 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1594 1595 dev_priv->suspend_count++; 1596 1597 intel_csr_ucode_suspend(dev_priv); 1598 1599 out: 1600 enable_rpm_wakeref_asserts(dev_priv); 1601 1602 return error; 1603 } 1604 1605 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 1606 { 1607 struct drm_i915_private *dev_priv = to_i915(dev); 1608 struct pci_dev *pdev = dev_priv->drm.pdev; 1609 bool fw_csr; 1610 int ret; 1611 1612 disable_rpm_wakeref_asserts(dev_priv); 1613 1614 intel_display_set_init_power(dev_priv, false); 1615 1616 fw_csr = !IS_GEN9_LP(dev_priv) && 1617 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1618 /* 1619 * In case of firmware assisted context save/restore don't manually 1620 * deinit the power domains. This also means the CSR/DMC firmware will 1621 * stay active, it will power down any HW resources as required and 1622 * also enable deeper system power states that would be blocked if the 1623 * firmware was inactive. 1624 */ 1625 if (!fw_csr) 1626 intel_power_domains_suspend(dev_priv); 1627 1628 ret = 0; 1629 if (IS_GEN9_LP(dev_priv)) 1630 bxt_enable_dc9(dev_priv); 1631 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1632 hsw_enable_pc8(dev_priv); 1633 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1634 ret = vlv_suspend_complete(dev_priv); 1635 1636 if (ret) { 1637 DRM_ERROR("Suspend complete failed: %d\n", ret); 1638 if (!fw_csr) 1639 intel_power_domains_init_hw(dev_priv, true); 1640 1641 goto out; 1642 } 1643 1644 pci_disable_device(pdev); 1645 /* 1646 * During hibernation on some platforms the BIOS may try to access 1647 * the device even though it's already in D3 and hang the machine. So 1648 * leave the device in D0 on those platforms and hope the BIOS will 1649 * power down the device properly. The issue was seen on multiple old 1650 * GENs with different BIOS vendors, so having an explicit blacklist 1651 * is inpractical; apply the workaround on everything pre GEN6. The 1652 * platforms where the issue was seen: 1653 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1654 * Fujitsu FSC S7110 1655 * Acer Aspire 1830T 1656 */ 1657 if (!(hibernation && INTEL_GEN(dev_priv) < 6)) 1658 pci_set_power_state(pdev, PCI_D3hot); 1659 1660 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 1661 1662 out: 1663 enable_rpm_wakeref_asserts(dev_priv); 1664 1665 return ret; 1666 } 1667 1668 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) 1669 { 1670 int error; 1671 1672 if (!dev) { 1673 DRM_ERROR("dev: %p\n", dev); 1674 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1675 return -ENODEV; 1676 } 1677 1678 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 1679 state.event != PM_EVENT_FREEZE)) 1680 return -EINVAL; 1681 1682 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1683 return 0; 1684 1685 error = i915_drm_suspend(dev); 1686 if (error) 1687 return error; 1688 1689 return i915_drm_suspend_late(dev, false); 1690 } 1691 1692 static int i915_drm_resume(struct drm_device *dev) 1693 { 1694 struct drm_i915_private *dev_priv = to_i915(dev); 1695 int ret; 1696 1697 disable_rpm_wakeref_asserts(dev_priv); 1698 intel_sanitize_gt_powersave(dev_priv); 1699 1700 ret = i915_ggtt_enable_hw(dev_priv); 1701 if (ret) 1702 DRM_ERROR("failed to re-enable GGTT\n"); 1703 1704 intel_csr_ucode_resume(dev_priv); 1705 1706 i915_gem_resume(dev_priv); 1707 1708 i915_restore_state(dev_priv); 1709 intel_pps_unlock_regs_wa(dev_priv); 1710 intel_opregion_setup(dev_priv); 1711 1712 intel_init_pch_refclk(dev_priv); 1713 1714 /* 1715 * Interrupts have to be enabled before any batches are run. If not the 1716 * GPU will hang. i915_gem_init_hw() will initiate batches to 1717 * update/restore the context. 1718 * 1719 * drm_mode_config_reset() needs AUX interrupts. 1720 * 1721 * Modeset enabling in intel_modeset_init_hw() also needs working 1722 * interrupts. 1723 */ 1724 intel_runtime_pm_enable_interrupts(dev_priv); 1725 1726 drm_mode_config_reset(dev); 1727 1728 mutex_lock(&dev->struct_mutex); 1729 if (i915_gem_init_hw(dev_priv)) { 1730 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1731 i915_gem_set_wedged(dev_priv); 1732 } 1733 mutex_unlock(&dev->struct_mutex); 1734 1735 intel_guc_resume(dev_priv); 1736 1737 intel_modeset_init_hw(dev); 1738 1739 spin_lock_irq(&dev_priv->irq_lock); 1740 if (dev_priv->display.hpd_irq_setup) 1741 dev_priv->display.hpd_irq_setup(dev_priv); 1742 spin_unlock_irq(&dev_priv->irq_lock); 1743 1744 intel_dp_mst_resume(dev); 1745 1746 intel_display_resume(dev); 1747 1748 drm_kms_helper_poll_enable(dev); 1749 1750 /* 1751 * ... but also need to make sure that hotplug processing 1752 * doesn't cause havoc. Like in the driver load code we don't 1753 * bother with the tiny race here where we might loose hotplug 1754 * notifications. 1755 * */ 1756 intel_hpd_init(dev_priv); 1757 1758 intel_opregion_register(dev_priv); 1759 1760 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1761 1762 mutex_lock(&dev_priv->modeset_restore_lock); 1763 dev_priv->modeset_restore = MODESET_DONE; 1764 mutex_unlock(&dev_priv->modeset_restore_lock); 1765 1766 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1767 1768 intel_autoenable_gt_powersave(dev_priv); 1769 1770 enable_rpm_wakeref_asserts(dev_priv); 1771 1772 return 0; 1773 } 1774 1775 static int i915_drm_resume_early(struct drm_device *dev) 1776 { 1777 struct drm_i915_private *dev_priv = to_i915(dev); 1778 struct pci_dev *pdev = dev_priv->drm.pdev; 1779 int ret; 1780 1781 /* 1782 * We have a resume ordering issue with the snd-hda driver also 1783 * requiring our device to be power up. Due to the lack of a 1784 * parent/child relationship we currently solve this with an early 1785 * resume hook. 1786 * 1787 * FIXME: This should be solved with a special hdmi sink device or 1788 * similar so that power domains can be employed. 1789 */ 1790 1791 /* 1792 * Note that we need to set the power state explicitly, since we 1793 * powered off the device during freeze and the PCI core won't power 1794 * it back up for us during thaw. Powering off the device during 1795 * freeze is not a hard requirement though, and during the 1796 * suspend/resume phases the PCI core makes sure we get here with the 1797 * device powered on. So in case we change our freeze logic and keep 1798 * the device powered we can also remove the following set power state 1799 * call. 1800 */ 1801 ret = pci_set_power_state(pdev, PCI_D0); 1802 if (ret) { 1803 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 1804 goto out; 1805 } 1806 1807 /* 1808 * Note that pci_enable_device() first enables any parent bridge 1809 * device and only then sets the power state for this device. The 1810 * bridge enabling is a nop though, since bridge devices are resumed 1811 * first. The order of enabling power and enabling the device is 1812 * imposed by the PCI core as described above, so here we preserve the 1813 * same order for the freeze/thaw phases. 1814 * 1815 * TODO: eventually we should remove pci_disable_device() / 1816 * pci_enable_enable_device() from suspend/resume. Due to how they 1817 * depend on the device enable refcount we can't anyway depend on them 1818 * disabling/enabling the device. 1819 */ 1820 if (pci_enable_device(pdev)) { 1821 ret = -EIO; 1822 goto out; 1823 } 1824 1825 pci_set_master(pdev); 1826 1827 disable_rpm_wakeref_asserts(dev_priv); 1828 1829 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1830 ret = vlv_resume_prepare(dev_priv, false); 1831 if (ret) 1832 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 1833 ret); 1834 1835 intel_uncore_resume_early(dev_priv); 1836 1837 if (IS_GEN9_LP(dev_priv)) { 1838 if (!dev_priv->suspended_to_idle) 1839 gen9_sanitize_dc_state(dev_priv); 1840 bxt_disable_dc9(dev_priv); 1841 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1842 hsw_disable_pc8(dev_priv); 1843 } 1844 1845 intel_uncore_sanitize(dev_priv); 1846 1847 if (IS_GEN9_LP(dev_priv) || 1848 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1849 intel_power_domains_init_hw(dev_priv, true); 1850 1851 i915_gem_sanitize(dev_priv); 1852 1853 enable_rpm_wakeref_asserts(dev_priv); 1854 1855 out: 1856 dev_priv->suspended_to_idle = false; 1857 1858 return ret; 1859 } 1860 1861 static int i915_resume_switcheroo(struct drm_device *dev) 1862 { 1863 int ret; 1864 1865 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1866 return 0; 1867 1868 ret = i915_drm_resume_early(dev); 1869 if (ret) 1870 return ret; 1871 1872 return i915_drm_resume(dev); 1873 } 1874 #endif 1875 1876 /** 1877 * i915_reset - reset chip after a hang 1878 * @dev_priv: device private to reset 1879 * 1880 * Reset the chip. Useful if a hang is detected. Marks the device as wedged 1881 * on failure. 1882 * 1883 * Caller must hold the struct_mutex. 1884 * 1885 * Procedure is fairly simple: 1886 * - reset the chip using the reset reg 1887 * - re-init context state 1888 * - re-init hardware status page 1889 * - re-init ring buffer 1890 * - re-init interrupt state 1891 * - re-init display 1892 */ 1893 void i915_reset(struct drm_i915_private *dev_priv) 1894 { 1895 struct i915_gpu_error *error = &dev_priv->gpu_error; 1896 int ret; 1897 1898 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1899 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); 1900 1901 if (!test_bit(I915_RESET_HANDOFF, &error->flags)) 1902 return; 1903 1904 /* Clear any previous failed attempts at recovery. Time to try again. */ 1905 if (!i915_gem_unset_wedged(dev_priv)) 1906 goto wakeup; 1907 1908 error->reset_count++; 1909 1910 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1911 disable_irq(dev_priv->drm.irq); 1912 ret = i915_gem_reset_prepare(dev_priv); 1913 if (ret) { 1914 DRM_ERROR("GPU recovery failed\n"); 1915 intel_gpu_reset(dev_priv, ALL_ENGINES); 1916 goto error; 1917 } 1918 1919 ret = intel_gpu_reset(dev_priv, ALL_ENGINES); 1920 if (ret) { 1921 if (ret != -ENODEV) 1922 DRM_ERROR("Failed to reset chip: %i\n", ret); 1923 else 1924 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1925 goto error; 1926 } 1927 1928 i915_gem_reset(dev_priv); 1929 intel_overlay_reset(dev_priv); 1930 1931 /* Ok, now get things going again... */ 1932 1933 /* 1934 * Everything depends on having the GTT running, so we need to start 1935 * there. Fortunately we don't need to do this unless we reset the 1936 * chip at a PCI level. 1937 * 1938 * Next we need to restore the context, but we don't use those 1939 * yet either... 1940 * 1941 * Ring buffer needs to be re-initialized in the KMS case, or if X 1942 * was running at the time of the reset (i.e. we weren't VT 1943 * switched away). 1944 */ 1945 ret = i915_gem_init_hw(dev_priv); 1946 if (ret) { 1947 DRM_ERROR("Failed hw init on reset %d\n", ret); 1948 goto error; 1949 } 1950 1951 i915_queue_hangcheck(dev_priv); 1952 1953 finish: 1954 i915_gem_reset_finish(dev_priv); 1955 enable_irq(dev_priv->drm.irq); 1956 1957 wakeup: 1958 clear_bit(I915_RESET_HANDOFF, &error->flags); 1959 wake_up_bit(&error->flags, I915_RESET_HANDOFF); 1960 return; 1961 1962 error: 1963 i915_gem_set_wedged(dev_priv); 1964 goto finish; 1965 } 1966 1967 #if 0 1968 static int i915_pm_suspend(struct device *kdev) 1969 { 1970 struct pci_dev *pdev = to_pci_dev(kdev); 1971 struct drm_device *dev = pci_get_drvdata(pdev); 1972 1973 if (!dev) { 1974 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1975 return -ENODEV; 1976 } 1977 1978 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1979 return 0; 1980 1981 return i915_drm_suspend(dev); 1982 } 1983 1984 static int i915_pm_suspend_late(struct device *kdev) 1985 { 1986 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 1987 1988 /* 1989 * We have a suspend ordering issue with the snd-hda driver also 1990 * requiring our device to be power up. Due to the lack of a 1991 * parent/child relationship we currently solve this with an late 1992 * suspend hook. 1993 * 1994 * FIXME: This should be solved with a special hdmi sink device or 1995 * similar so that power domains can be employed. 1996 */ 1997 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1998 return 0; 1999 2000 return i915_drm_suspend_late(dev, false); 2001 } 2002 2003 static int i915_pm_poweroff_late(struct device *kdev) 2004 { 2005 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 2006 2007 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2008 return 0; 2009 2010 return i915_drm_suspend_late(dev, true); 2011 } 2012 2013 static int i915_pm_resume_early(struct device *kdev) 2014 { 2015 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 2016 2017 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2018 return 0; 2019 2020 return i915_drm_resume_early(dev); 2021 } 2022 2023 static int i915_pm_resume(struct device *kdev) 2024 { 2025 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 2026 2027 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2028 return 0; 2029 2030 return i915_drm_resume(dev); 2031 } 2032 2033 /* freeze: before creating the hibernation_image */ 2034 static int i915_pm_freeze(struct device *kdev) 2035 { 2036 int ret; 2037 2038 ret = i915_pm_suspend(kdev); 2039 if (ret) 2040 return ret; 2041 2042 ret = i915_gem_freeze(kdev_to_i915(kdev)); 2043 if (ret) 2044 return ret; 2045 2046 return 0; 2047 } 2048 2049 static int i915_pm_freeze_late(struct device *kdev) 2050 { 2051 int ret; 2052 2053 ret = i915_pm_suspend_late(kdev); 2054 if (ret) 2055 return ret; 2056 2057 ret = i915_gem_freeze_late(kdev_to_i915(kdev)); 2058 if (ret) 2059 return ret; 2060 2061 return 0; 2062 } 2063 2064 /* thaw: called after creating the hibernation image, but before turning off. */ 2065 static int i915_pm_thaw_early(struct device *kdev) 2066 { 2067 return i915_pm_resume_early(kdev); 2068 } 2069 2070 static int i915_pm_thaw(struct device *kdev) 2071 { 2072 return i915_pm_resume(kdev); 2073 } 2074 2075 /* restore: called after loading the hibernation image. */ 2076 static int i915_pm_restore_early(struct device *kdev) 2077 { 2078 return i915_pm_resume_early(kdev); 2079 } 2080 2081 static int i915_pm_restore(struct device *kdev) 2082 { 2083 return i915_pm_resume(kdev); 2084 } 2085 2086 /* 2087 * Save all Gunit registers that may be lost after a D3 and a subsequent 2088 * S0i[R123] transition. The list of registers needing a save/restore is 2089 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 2090 * registers in the following way: 2091 * - Driver: saved/restored by the driver 2092 * - Punit : saved/restored by the Punit firmware 2093 * - No, w/o marking: no need to save/restore, since the register is R/O or 2094 * used internally by the HW in a way that doesn't depend 2095 * keeping the content across a suspend/resume. 2096 * - Debug : used for debugging 2097 * 2098 * We save/restore all registers marked with 'Driver', with the following 2099 * exceptions: 2100 * - Registers out of use, including also registers marked with 'Debug'. 2101 * These have no effect on the driver's operation, so we don't save/restore 2102 * them to reduce the overhead. 2103 * - Registers that are fully setup by an initialization function called from 2104 * the resume path. For example many clock gating and RPS/RC6 registers. 2105 * - Registers that provide the right functionality with their reset defaults. 2106 * 2107 * TODO: Except for registers that based on the above 3 criteria can be safely 2108 * ignored, we save/restore all others, practically treating the HW context as 2109 * a black-box for the driver. Further investigation is needed to reduce the 2110 * saved/restored registers even further, by following the same 3 criteria. 2111 */ 2112 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2113 { 2114 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2115 int i; 2116 2117 /* GAM 0x4000-0x4770 */ 2118 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 2119 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 2120 s->arb_mode = I915_READ(ARB_MODE); 2121 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 2122 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 2123 2124 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2125 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 2126 2127 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 2128 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 2129 2130 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 2131 s->ecochk = I915_READ(GAM_ECOCHK); 2132 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 2133 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 2134 2135 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 2136 2137 /* MBC 0x9024-0x91D0, 0x8500 */ 2138 s->g3dctl = I915_READ(VLV_G3DCTL); 2139 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 2140 s->mbctl = I915_READ(GEN6_MBCTL); 2141 2142 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2143 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 2144 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 2145 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 2146 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 2147 s->rstctl = I915_READ(GEN6_RSTCTL); 2148 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 2149 2150 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2151 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 2152 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 2153 s->rpdeuc = I915_READ(GEN6_RPDEUC); 2154 s->ecobus = I915_READ(ECOBUS); 2155 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 2156 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 2157 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 2158 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 2159 s->rcedata = I915_READ(VLV_RCEDATA); 2160 s->spare2gh = I915_READ(VLV_SPAREG2H); 2161 2162 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2163 s->gt_imr = I915_READ(GTIMR); 2164 s->gt_ier = I915_READ(GTIER); 2165 s->pm_imr = I915_READ(GEN6_PMIMR); 2166 s->pm_ier = I915_READ(GEN6_PMIER); 2167 2168 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2169 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 2170 2171 /* GT SA CZ domain, 0x100000-0x138124 */ 2172 s->tilectl = I915_READ(TILECTL); 2173 s->gt_fifoctl = I915_READ(GTFIFOCTL); 2174 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 2175 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2176 s->pmwgicz = I915_READ(VLV_PMWGICZ); 2177 2178 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2179 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 2180 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 2181 s->pcbr = I915_READ(VLV_PCBR); 2182 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 2183 2184 /* 2185 * Not saving any of: 2186 * DFT, 0x9800-0x9EC0 2187 * SARB, 0xB000-0xB1FC 2188 * GAC, 0x5208-0x524C, 0x14000-0x14C000 2189 * PCI CFG 2190 */ 2191 } 2192 2193 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2194 { 2195 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2196 u32 val; 2197 int i; 2198 2199 /* GAM 0x4000-0x4770 */ 2200 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 2201 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 2202 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 2203 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 2204 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 2205 2206 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2207 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 2208 2209 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 2210 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 2211 2212 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 2213 I915_WRITE(GAM_ECOCHK, s->ecochk); 2214 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 2215 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 2216 2217 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 2218 2219 /* MBC 0x9024-0x91D0, 0x8500 */ 2220 I915_WRITE(VLV_G3DCTL, s->g3dctl); 2221 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 2222 I915_WRITE(GEN6_MBCTL, s->mbctl); 2223 2224 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2225 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 2226 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 2227 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 2228 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 2229 I915_WRITE(GEN6_RSTCTL, s->rstctl); 2230 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 2231 2232 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2233 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 2234 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 2235 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 2236 I915_WRITE(ECOBUS, s->ecobus); 2237 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 2238 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 2239 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 2240 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 2241 I915_WRITE(VLV_RCEDATA, s->rcedata); 2242 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 2243 2244 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2245 I915_WRITE(GTIMR, s->gt_imr); 2246 I915_WRITE(GTIER, s->gt_ier); 2247 I915_WRITE(GEN6_PMIMR, s->pm_imr); 2248 I915_WRITE(GEN6_PMIER, s->pm_ier); 2249 2250 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2251 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 2252 2253 /* GT SA CZ domain, 0x100000-0x138124 */ 2254 I915_WRITE(TILECTL, s->tilectl); 2255 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 2256 /* 2257 * Preserve the GT allow wake and GFX force clock bit, they are not 2258 * be restored, as they are used to control the s0ix suspend/resume 2259 * sequence by the caller. 2260 */ 2261 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2262 val &= VLV_GTLC_ALLOWWAKEREQ; 2263 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 2264 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2265 2266 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2267 val &= VLV_GFX_CLK_FORCE_ON_BIT; 2268 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 2269 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2270 2271 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 2272 2273 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2274 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 2275 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 2276 I915_WRITE(VLV_PCBR, s->pcbr); 2277 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 2278 } 2279 2280 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, 2281 u32 mask, u32 val) 2282 { 2283 /* The HW does not like us polling for PW_STATUS frequently, so 2284 * use the sleeping loop rather than risk the busy spin within 2285 * intel_wait_for_register(). 2286 * 2287 * Transitioning between RC6 states should be at most 2ms (see 2288 * valleyview_enable_rps) so use a 3ms timeout. 2289 */ 2290 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, 2291 3); 2292 } 2293 #endif 2294 2295 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 2296 { 2297 u32 val; 2298 int err; 2299 2300 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2301 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2302 if (force_on) 2303 val |= VLV_GFX_CLK_FORCE_ON_BIT; 2304 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2305 2306 if (!force_on) 2307 return 0; 2308 2309 err = intel_wait_for_register(dev_priv, 2310 VLV_GTLC_SURVIVABILITY_REG, 2311 VLV_GFX_CLK_STATUS_BIT, 2312 VLV_GFX_CLK_STATUS_BIT, 2313 20); 2314 if (err) 2315 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2316 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2317 2318 return err; 2319 } 2320 2321 #if 0 2322 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2323 { 2324 u32 mask; 2325 u32 val; 2326 int err; 2327 2328 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2329 val &= ~VLV_GTLC_ALLOWWAKEREQ; 2330 if (allow) 2331 val |= VLV_GTLC_ALLOWWAKEREQ; 2332 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2333 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2334 2335 mask = VLV_GTLC_ALLOWWAKEACK; 2336 val = allow ? mask : 0; 2337 2338 err = vlv_wait_for_pw_status(dev_priv, mask, val); 2339 if (err) 2340 DRM_ERROR("timeout disabling GT waking\n"); 2341 2342 return err; 2343 } 2344 2345 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2346 bool wait_for_on) 2347 { 2348 u32 mask; 2349 u32 val; 2350 2351 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2352 val = wait_for_on ? mask : 0; 2353 2354 /* 2355 * RC6 transitioning can be delayed up to 2 msec (see 2356 * valleyview_enable_rps), use 3 msec for safety. 2357 */ 2358 if (vlv_wait_for_pw_status(dev_priv, mask, val)) 2359 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2360 onoff(wait_for_on)); 2361 } 2362 2363 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2364 { 2365 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 2366 return; 2367 2368 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 2369 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 2370 } 2371 2372 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 2373 { 2374 u32 mask; 2375 int err; 2376 2377 /* 2378 * Bspec defines the following GT well on flags as debug only, so 2379 * don't treat them as hard failures. 2380 */ 2381 vlv_wait_for_gt_wells(dev_priv, false); 2382 2383 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 2384 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 2385 2386 vlv_check_no_gt_access(dev_priv); 2387 2388 err = vlv_force_gfx_clock(dev_priv, true); 2389 if (err) 2390 goto err1; 2391 2392 err = vlv_allow_gt_wake(dev_priv, false); 2393 if (err) 2394 goto err2; 2395 2396 if (!IS_CHERRYVIEW(dev_priv)) 2397 vlv_save_gunit_s0ix_state(dev_priv); 2398 2399 err = vlv_force_gfx_clock(dev_priv, false); 2400 if (err) 2401 goto err2; 2402 2403 return 0; 2404 2405 err2: 2406 /* For safety always re-enable waking and disable gfx clock forcing */ 2407 vlv_allow_gt_wake(dev_priv, true); 2408 err1: 2409 vlv_force_gfx_clock(dev_priv, false); 2410 2411 return err; 2412 } 2413 2414 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2415 bool rpm_resume) 2416 { 2417 int err; 2418 int ret; 2419 2420 /* 2421 * If any of the steps fail just try to continue, that's the best we 2422 * can do at this point. Return the first error code (which will also 2423 * leave RPM permanently disabled). 2424 */ 2425 ret = vlv_force_gfx_clock(dev_priv, true); 2426 2427 if (!IS_CHERRYVIEW(dev_priv)) 2428 vlv_restore_gunit_s0ix_state(dev_priv); 2429 2430 err = vlv_allow_gt_wake(dev_priv, true); 2431 if (!ret) 2432 ret = err; 2433 2434 err = vlv_force_gfx_clock(dev_priv, false); 2435 if (!ret) 2436 ret = err; 2437 2438 vlv_check_no_gt_access(dev_priv); 2439 2440 if (rpm_resume) 2441 intel_init_clock_gating(dev_priv); 2442 2443 return ret; 2444 } 2445 2446 static int intel_runtime_suspend(struct device *kdev) 2447 { 2448 struct pci_dev *pdev = to_pci_dev(kdev); 2449 struct drm_device *dev = pci_get_drvdata(pdev); 2450 struct drm_i915_private *dev_priv = to_i915(dev); 2451 int ret; 2452 2453 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2454 return -ENODEV; 2455 2456 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) 2457 return -ENODEV; 2458 2459 DRM_DEBUG_KMS("Suspending device\n"); 2460 2461 disable_rpm_wakeref_asserts(dev_priv); 2462 2463 /* 2464 * We are safe here against re-faults, since the fault handler takes 2465 * an RPM reference. 2466 */ 2467 i915_gem_runtime_suspend(dev_priv); 2468 2469 intel_guc_suspend(dev_priv); 2470 2471 intel_runtime_pm_disable_interrupts(dev_priv); 2472 2473 ret = 0; 2474 if (IS_GEN9_LP(dev_priv)) { 2475 bxt_display_core_uninit(dev_priv); 2476 bxt_enable_dc9(dev_priv); 2477 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2478 hsw_enable_pc8(dev_priv); 2479 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2480 ret = vlv_suspend_complete(dev_priv); 2481 } 2482 2483 if (ret) { 2484 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 2485 intel_runtime_pm_enable_interrupts(dev_priv); 2486 2487 enable_rpm_wakeref_asserts(dev_priv); 2488 2489 return ret; 2490 } 2491 2492 intel_uncore_suspend(dev_priv); 2493 2494 enable_rpm_wakeref_asserts(dev_priv); 2495 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2496 2497 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 2498 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 2499 2500 dev_priv->pm.suspended = true; 2501 2502 /* 2503 * FIXME: We really should find a document that references the arguments 2504 * used below! 2505 */ 2506 if (IS_BROADWELL(dev_priv)) { 2507 /* 2508 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 2509 * being detected, and the call we do at intel_runtime_resume() 2510 * won't be able to restore them. Since PCI_D3hot matches the 2511 * actual specification and appears to be working, use it. 2512 */ 2513 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 2514 } else { 2515 /* 2516 * current versions of firmware which depend on this opregion 2517 * notification have repurposed the D1 definition to mean 2518 * "runtime suspended" vs. what you would normally expect (D3) 2519 * to distinguish it from notifications that might be sent via 2520 * the suspend path. 2521 */ 2522 intel_opregion_notify_adapter(dev_priv, PCI_D1); 2523 } 2524 2525 assert_forcewakes_inactive(dev_priv); 2526 2527 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2528 intel_hpd_poll_init(dev_priv); 2529 2530 DRM_DEBUG_KMS("Device suspended\n"); 2531 return 0; 2532 } 2533 2534 static int intel_runtime_resume(struct device *kdev) 2535 { 2536 struct pci_dev *pdev = to_pci_dev(kdev); 2537 struct drm_device *dev = pci_get_drvdata(pdev); 2538 struct drm_i915_private *dev_priv = to_i915(dev); 2539 int ret = 0; 2540 2541 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) 2542 return -ENODEV; 2543 2544 DRM_DEBUG_KMS("Resuming device\n"); 2545 2546 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2547 disable_rpm_wakeref_asserts(dev_priv); 2548 2549 intel_opregion_notify_adapter(dev_priv, PCI_D0); 2550 dev_priv->pm.suspended = false; 2551 if (intel_uncore_unclaimed_mmio(dev_priv)) 2552 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 2553 2554 intel_guc_resume(dev_priv); 2555 2556 if (IS_GEN6(dev_priv)) 2557 intel_init_pch_refclk(dev_priv); 2558 2559 if (IS_GEN9_LP(dev_priv)) { 2560 bxt_disable_dc9(dev_priv); 2561 bxt_display_core_init(dev_priv, true); 2562 if (dev_priv->csr.dmc_payload && 2563 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2564 gen9_enable_dc5(dev_priv); 2565 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2566 hsw_disable_pc8(dev_priv); 2567 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2568 ret = vlv_resume_prepare(dev_priv, true); 2569 } 2570 2571 /* 2572 * No point of rolling back things in case of an error, as the best 2573 * we can do is to hope that things will still work (and disable RPM). 2574 */ 2575 i915_gem_init_swizzling(dev_priv); 2576 i915_gem_restore_fences(dev_priv); 2577 2578 intel_runtime_pm_enable_interrupts(dev_priv); 2579 2580 /* 2581 * On VLV/CHV display interrupts are part of the display 2582 * power well, so hpd is reinitialized from there. For 2583 * everyone else do it here. 2584 */ 2585 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2586 intel_hpd_init(dev_priv); 2587 2588 enable_rpm_wakeref_asserts(dev_priv); 2589 2590 if (ret) 2591 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 2592 else 2593 DRM_DEBUG_KMS("Device resumed\n"); 2594 2595 return ret; 2596 } 2597 2598 const struct dev_pm_ops i915_pm_ops = { 2599 /* 2600 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2601 * PMSG_RESUME] 2602 */ 2603 .suspend = i915_pm_suspend, 2604 .suspend_late = i915_pm_suspend_late, 2605 .resume_early = i915_pm_resume_early, 2606 .resume = i915_pm_resume, 2607 2608 /* 2609 * S4 event handlers 2610 * @freeze, @freeze_late : called (1) before creating the 2611 * hibernation image [PMSG_FREEZE] and 2612 * (2) after rebooting, before restoring 2613 * the image [PMSG_QUIESCE] 2614 * @thaw, @thaw_early : called (1) after creating the hibernation 2615 * image, before writing it [PMSG_THAW] 2616 * and (2) after failing to create or 2617 * restore the image [PMSG_RECOVER] 2618 * @poweroff, @poweroff_late: called after writing the hibernation 2619 * image, before rebooting [PMSG_HIBERNATE] 2620 * @restore, @restore_early : called after rebooting and restoring the 2621 * hibernation image [PMSG_RESTORE] 2622 */ 2623 .freeze = i915_pm_freeze, 2624 .freeze_late = i915_pm_freeze_late, 2625 .thaw_early = i915_pm_thaw_early, 2626 .thaw = i915_pm_thaw, 2627 .poweroff = i915_pm_suspend, 2628 .poweroff_late = i915_pm_poweroff_late, 2629 .restore_early = i915_pm_restore_early, 2630 .restore = i915_pm_restore, 2631 2632 /* S0ix (via runtime suspend) event handlers */ 2633 .runtime_suspend = intel_runtime_suspend, 2634 .runtime_resume = intel_runtime_resume, 2635 }; 2636 2637 static const struct vm_operations_struct i915_gem_vm_ops = { 2638 .fault = i915_gem_fault, 2639 .open = drm_gem_vm_open, 2640 .close = drm_gem_vm_close, 2641 }; 2642 #endif 2643 2644 static struct cdev_pager_ops i915_gem_vm_ops = { 2645 .cdev_pg_fault = i915_gem_fault, 2646 .cdev_pg_ctor = i915_gem_pager_ctor, 2647 .cdev_pg_dtor = i915_gem_pager_dtor 2648 }; 2649 2650 static const struct file_operations i915_driver_fops = { 2651 .owner = THIS_MODULE, 2652 #if 0 2653 .open = drm_open, 2654 .release = drm_release, 2655 .unlocked_ioctl = drm_ioctl, 2656 .mmap = drm_gem_mmap, 2657 .poll = drm_poll, 2658 .read = drm_read, 2659 .compat_ioctl = i915_compat_ioctl, 2660 .llseek = noop_llseek, 2661 #endif 2662 }; 2663 2664 static int 2665 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 2666 struct drm_file *file) 2667 { 2668 return -ENODEV; 2669 } 2670 2671 static const struct drm_ioctl_desc i915_ioctls[] = { 2672 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2673 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 2674 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 2675 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2676 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2677 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2678 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2679 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2680 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2681 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2682 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2683 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 2684 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2685 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2686 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 2687 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2688 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2689 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2690 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2691 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2692 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2693 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2694 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2695 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 2696 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 2697 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2698 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2699 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2700 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 2701 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 2702 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 2703 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 2704 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), 2705 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 2706 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 2707 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 2708 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 2709 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2710 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2711 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2712 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2713 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2714 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2715 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2716 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2717 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2718 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 2719 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 2720 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 2721 #if 0 2722 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 2723 #endif 2724 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 2725 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 2726 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 2727 }; 2728 2729 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 2730 struct sysctl_oid *top) 2731 { 2732 return drm_add_busid_modesetting(dev, ctx, top); 2733 } 2734 2735 static struct drm_driver driver = { 2736 /* Don't use MTRRs here; the Xserver or userspace app should 2737 * deal with them for Intel hardware. 2738 */ 2739 .driver_features = 2740 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 2741 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC, 2742 .release = i915_driver_release, 2743 .open = i915_driver_open, 2744 .lastclose = i915_driver_lastclose, 2745 .postclose = i915_driver_postclose, 2746 .set_busid = drm_pci_set_busid, 2747 2748 .gem_close_object = i915_gem_close_object, 2749 .gem_free_object_unlocked = i915_gem_free_object, 2750 .gem_vm_ops = &i915_gem_vm_ops, 2751 2752 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 2753 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 2754 .gem_prime_export = i915_gem_prime_export, 2755 .gem_prime_import = i915_gem_prime_import, 2756 2757 .dumb_create = i915_gem_dumb_create, 2758 .dumb_map_offset = i915_gem_mmap_gtt, 2759 .dumb_destroy = drm_gem_dumb_destroy, 2760 .ioctls = i915_ioctls, 2761 .num_ioctls = ARRAY_SIZE(i915_ioctls), 2762 .fops = &i915_driver_fops, 2763 .name = DRIVER_NAME, 2764 .desc = DRIVER_DESC, 2765 .date = DRIVER_DATE, 2766 .major = DRIVER_MAJOR, 2767 .minor = DRIVER_MINOR, 2768 .patchlevel = DRIVER_PATCHLEVEL, 2769 #ifdef __DragonFly__ 2770 2771 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2772 #include "selftests/mock_drm.c" 2773 #endif 2774 .sysctl_init = i915_sysctl_init, 2775 #endif 2776 }; 2777